pax_global_header00006660000000000000000000000064151165435010014513gustar00rootroot0000000000000052 comment=b4c13279187941b561de5d62e6483a91759ba123 python-opentelemetry-1.39.1/000077500000000000000000000000001511654350100160215ustar00rootroot00000000000000python-opentelemetry-1.39.1/.codespellrc000066400000000000000000000003261511654350100203220ustar00rootroot00000000000000[codespell] # skipping auto generated folders skip = ./.tox,./.mypy_cache,./docs/_build,./target,*/LICENSE,./venv,.git,./opentelemetry-semantic-conventions,*-requirements*.txt ignore-words-list = ans,ue,ot,hist,ro python-opentelemetry-1.39.1/.coveragerc000066400000000000000000000000471511654350100201430ustar00rootroot00000000000000[run] omit = */tests/* */gen/* python-opentelemetry-1.39.1/.gitattributes000066400000000000000000000001521511654350100207120ustar00rootroot00000000000000# tells github that proto code is generated opentelemetry-proto/src/**/*_pb2*.py* linguist-generated=true python-opentelemetry-1.39.1/.github/000077500000000000000000000000001511654350100173615ustar00rootroot00000000000000python-opentelemetry-1.39.1/.github/CODEOWNERS000066400000000000000000000003051511654350100207520ustar00rootroot00000000000000# Code owners file. # This file controls who is tagged for review for any given pull request. # For anything not explicitly taken by someone else: * @open-telemetry/python-approvers python-opentelemetry-1.39.1/.github/ISSUE_TEMPLATE/000077500000000000000000000000001511654350100215445ustar00rootroot00000000000000python-opentelemetry-1.39.1/.github/ISSUE_TEMPLATE/bug_report.yaml000066400000000000000000000067011511654350100246040ustar00rootroot00000000000000--- name: Bug Report description: Create a report to help us improve labels: [bug] body: - type: markdown attributes: value: | Thanks for taking the time to fill out this bug report! Please make sure to fill out the entire form below, providing as much context as you can in order to help us triage and track down your bug as quickly as possible. Before filing a bug, please be sure you have searched through [existing bugs](https://github.com/open-telemetry/opentelemetry-python/issues?q=is%3Aissue+is%3Aopen+sort%3Aupdated-desc+label%3Abug) to see if your bug is already addressed. If your bug is related to an instrumentation or plugin in [opentelemetry-python-contrib](https://github.com/open-telemetry/opentelemetry-python-contrib) please be sure to file it there. - type: textarea id: environment attributes: label: Describe your environment description: | Please describe any aspect of your environment relevant to the problem, including your Python version, [platform](https://docs.python.org/3/library/platform.html), version numbers of installed dependencies, information about your cloud hosting provider, etc. If you're reporting a problem with a specific version of a library in this repo, please check whether the problem has been fixed on main. value: | OS: (e.g, Ubuntu) Python version: (e.g., Python 3.9.10) SDK version: (e.g., 1.25.0) API version: (e.g., 1.25.0) - type: textarea attributes: label: What happened? description: Please provide as much detail as you reasonably can. validations: required: true - type: textarea attributes: label: Steps to Reproduce description: Provide a [minimal reproducible example](https://stackoverflow.com/help/minimal-reproducible-example). We will close the issue if the repro project you share with us is complex or we cannot reproduce the behavior you are reporting. We cannot investigate custom projects, so don't point us to such, please. validations: required: true - type: textarea attributes: label: Expected Result description: What did you expect to see? validations: required: true - type: textarea attributes: label: Actual Result description: What did you see instead? validations: required: true - type: textarea id: additional-context attributes: label: Additional context description: Add any other context about the problem here. placeholder: Any additional information... - type: dropdown id: contribute attributes: label: Would you like to implement a fix? description: For guidance on how to get started, refer to the [contribution guide](https://github.com/open-telemetry/opentelemetry-python/blob/main/CONTRIBUTING.md). options: - "No" - "Yes" - type: dropdown attributes: label: Tip description: This element is static, used to render a helpful sub-heading for end-users and community members to help prioritize issues. Please leave as is. options: - [React](https://github.blog/news-insights/product-news/add-reactions-to-pull-requests-issues-and-comments/) with 👍 to help prioritize this issue. Please use comments to provide useful context, avoiding `+1` or `me too`, to help us triage it. Learn more [here](https://opentelemetry.io/community/end-user/issue-participation/). default: 0 python-opentelemetry-1.39.1/.github/ISSUE_TEMPLATE/config.yml000066400000000000000000000002411511654350100235310ustar00rootroot00000000000000contact_links: - name: Slack url: https://cloud-native.slack.com/archives/C01PD4HUVBL about: Or the `#otel-python` channel in the CNCF Slack instance. python-opentelemetry-1.39.1/.github/ISSUE_TEMPLATE/feature_request.yaml000066400000000000000000000050331511654350100256340ustar00rootroot00000000000000--- name: Feature Request description: Suggest an idea for this project labels: [feature-request] body: - type: markdown attributes: value: | Before opening a feature request against this repo, consider whether the feature should/could be implemented in the [other OpenTelemetry client libraries](https://github.com/open-telemetry/). If so, please [open an issue on opentelemetry-specification](https://github.com/open-telemetry/opentelemetry-specification/issues/new) first. - type: textarea id: related-problem attributes: label: Is your feature request related to a problem? description: Is your feature request related to a problem? If so, provide a concise description of the problem. placeholder: Include the Issue ID from this or other repos. validations: required: true - type: textarea id: solution attributes: label: Describe the solution you'd like description: What do you want to happen instead? What is the expected behavior? placeholder: I'd like to ... validations: required: true - type: textarea id: alternatives attributes: label: Describe alternatives you've considered description: Which alternative solutions or features have you considered? placeholder: Some potential solutions validations: required: false - type: textarea id: additional-context attributes: label: Additional Context description: Add any other context about the feature request here. placeholder: Some related requests in other projects or upstream spec proposals. validations: required: false - type: dropdown id: contribute attributes: label: Would you like to implement a fix? description: | For guidance on how to get started, refer to the [contribution guide](https://github.com/open-telemetry/opentelemetry-python/blob/main/CONTRIBUTING.md). options: - "No" - "Yes" - type: dropdown attributes: label: Tip description: This element is static, used to render a helpful sub-heading for end-users and community members to help prioritize issues. Please leave as is. options: - [React](https://github.blog/news-insights/product-news/add-reactions-to-pull-requests-issues-and-comments/) with 👍 to help prioritize this issue. Please use comments to provide useful context, avoiding `+1` or `me too`, to help us triage it. Learn more [here](https://opentelemetry.io/community/end-user/issue-participation/). default: 0 python-opentelemetry-1.39.1/.github/dependabot.yml000066400000000000000000000011031511654350100222040ustar00rootroot00000000000000# Keep GitHub Actions up to date with GitHub's Dependabot... # https://docs.github.com/en/code-security/dependabot/working-with-dependabot/keeping-your-actions-up-to-date-with-dependabot # https://docs.github.com/en/code-security/dependabot/dependabot-version-updates/configuration-options-for-the-dependabot.yml-file#package-ecosystem version: 2 updates: - package-ecosystem: github-actions directory: / groups: github-actions: patterns: - "*" # Group all Actions updates into a single larger pull request schedule: interval: weekly python-opentelemetry-1.39.1/.github/pull_request_template.md000066400000000000000000000034301511654350100243220ustar00rootroot00000000000000# Description Fixes # (issue) ## Type of change Please delete options that are not relevant. - [ ] Bug fix (non-breaking change which fixes an issue) - [ ] New feature (non-breaking change which adds functionality) - [ ] Breaking change (fix or feature that would cause existing functionality to not work as expected) - [ ] This change requires a documentation update # How Has This Been Tested? Please describe the tests that you ran to verify your changes. Provide instructions so we can reproduce. Please also list any relevant details for your test configuration - [ ] Test A # Does This PR Require a Contrib Repo Change? - [ ] Yes. - Link to PR: - [ ] No. # Checklist: - [ ] Followed the style guidelines of this project - [ ] Changelogs have been updated - [ ] Unit tests have been added - [ ] Documentation has been updated python-opentelemetry-1.39.1/.github/rtd-build-instructions.png000066400000000000000000020357431511654350100245350ustar00rootroot00000000000000PNG  IHDR~Hf?sRGBDeXIfMM*i~HK@IDATx eUy}ιVc80VQ aP!qF[$6EvۉFj?4&b4_!fPӭQ݉ ef 7I.>@99%@YCz}T`0Fx: ƃAU3L?b>D_Xv)JȜe:k9`Nq\ܛsܛ\[0eAm-~<9d0A/~nb0MՌљMfEثM()> rC(4.Š ,QυS,L!c)%Mf:Reu0[4ѓ$MuhxKRZ Tyohl M38?ӟp2G5Z"2,~i$C{[fT}fkjf):[g&f[8tpI& ꙶjd!QC'=@hD/v*maEKrmM-EtH>tBDӔ!A«,At 5 qpsf/# r ϰ3H(j[@t3 /B: OM@2Q㿘2Ypr1N&2"Tb06Ē%h@aI{ANH,,_SfZ(5̊]?pWnjj Pr7HPBq!3gL 6I m3V oQ J$ g|PڊVR0NTSG`f j%VsR2  kn`(9 AfFaQVƔX ua|pmbXH G~'8Sa0r (y"g"DGK_ip!1M$Չa?5n?1lK-q|!B78@Wz"k=ۘDdz6&iiub['1 k("" CL@$#&aNk %(D4E'3 Ův.<@u@TG=B!F95YICBdDhR/@q`6Nr90M Q+mVFbh/ܲQ"a1LKTG8&*zL-"HQquD];. !QBBǪ@H*1I[QKj[s%[p>ZI_x*,Х:F)հ%EG#2~ U m HѮpG6Ux~`64jMŏx%"Tv d8 E]ʥӠ&D\zbڂ!m6&v mhw{*-mLlS{A2fѣki2%oĶNHVQ9_0'W qk\mf(,0t4tE1J6E 0d$W GrKX^CoV gӛaQI Е$B͈Lx%dT+lGptJ $&v~'2 D v\`k,0FRx D hDBfm#R _4!Z+ol!@҃Zø2܆Dj?66YuH.GEqmS[B)%ƷU_QQ7 Y41Xzb뉭'}{MswA̻qۘtNNa:ۘx_BlZ"UA+%bj+2'DNeZv~)r #!@yuA><\U-8b\k <`bZ0FD@)bb.BAۤ|,P|1 d*γY)WStuv"[ '"27ڄ5ʛA1IM;5 yӸX.QgA + 2ApZP23ʳ-JF@,8e!=Re3NNaA?PHivF@"}tlм71QK%ӓS"jTWØUm?'RS뉭'D6&1N&whiub['ubCl[ {;=݊D,;DH($Zɝ! bT~IB@!LJJvQ^eH8IDƲ&yG+ TJ4Ѹ8.ۺ1 XkUUb9NRE䨮=ϗV{|j=R#G%XO'zbSxWjcb4a˯w8' !|=.i6;ufmvJOhĶNlĆt!6ߡ8ɍsGBM5A)z6g*@U -񠤰,+4Ϋ/ +(<>cTRwD }DzJF΃o>]Vf@'Ib],&krJBHt.BUI):1@w%mD rw{ѬE)xQZá\80fƊTymo=4[O̝n23C11#@1ƌmvDiLdu+Nf*oжNlDzۆ"6áJ"K-$ȇ6sy'yE%JߦFDmB1%(8 w4 t8r.A +CXHq΀o N@(Ķ^G tɥ'PAvS'b!xQaD:ʅKf!]fd@#d&4NmY@B +)r"{}}0%A!tʩH]Iڡu @%` S%2|jj%8SU.L^IK& QXxC$g)TSKy 23V^\d Ɓ~1JB SB'hcm=nzb1mvfmvĶNd9ĶNl 8aPB>NHu b$Ɵ aihTw*".4B3^ViN sBIʃɪp*hQQ@'yS*y5CbDk!3Q.w%Af{+.oZNq4qV%Hu0Y@'o%bTV$\RW\b.=H'"(eU91-KhD%~^!~J96vO~*X25WYp:K gցw ki)pSc^%%X1Shgc)V: z+(1qᅬ8D%_С$)^'`]2ZOl=64oއkAIic҈]$ic"SP ɹfP6;iY$]/jNH}:fiH8;'9G3bx$(tuLa? K_cq.y r΀E0OQZFBsJ`)3l1B.w3#堗jvACK?9: Q=Bþ`4RQWX̫Cg^gBTP%H 4$rM3\ubru!I}؈ CgXGmU uyP Mk c}\rd`40&((w!]z ^ L1x Lc #VՆ"JJ!2,H;Et=אvD)hy}.9=Ţ/Dg5 J𐮱ERט9nnOMʀo VTzl;D?'YOqEC)|,ؚbě^9W4j/zbnJHh=DAۘD^fL0!0ah6;mS&m։y-։?]XV&ƨ7 YHP AX? A8'B5nѢE`Â&(G4)HɒΕ$gAD*XU<~"۬Ԓl/x :B&DRAN i롅ڊ$ƨh]*A$dIB]:Ӫlɣr㈬RጡTpmf .%12C&rq$!.J!sR/WuTZ׻UYx@= 4c HF3ΆGb>`& S5 EO':5ï]eQ0 'Z'nh'Doyۧ6& J 8(mvN1&t6;mSYubҶNd*8։&+\N\s&!6c;^FX >ȦXBzyt`Z,p4$w8+ۚEr%!!Rcҹ4J Z.mfJ-% ,!X @Q0PCdz7B3mbFsp7%V !t5_ݩJB&Jy{Ӵc]ɆfhbJdXH0`Jx(Y?H|a,u3BƯ$EF1/ĚQOؑ`NXc(r|$yxh`dLU-Lh*=ZF$%(w ':H7#c%[ Ax- AH)ÏFi8K%՞H嬧@5!!ʅpša3F 1tT,uMឦO!FyIcXd3^P'ʣD뉭'&ZO[ZO6&fkcbii:1A['6> q.`&D E Bɯr(B pq ֖B! 3 ]`D$Ff%1Kt_JŠ,*W $` lž8QLbdѩ*#(E*yW^w9 kYBAvI=s&?xRgy-r' @S([#tq''cj`0] O(_y-{mQKޖ/𯎔(GQ\l #Vݵ) ?[ r VV&?XdHŭ!% ":=0^9@*(VVZOl= '1Qn/o=ۘ889qv\qkcb 43:ͥNerGi6;]\0 oD(.T2u"kN9l(4&]Glt)tF H$ Ep`$p@H12ܚjr7)_68* !b|pIaTU _2DPqքD'XZe )֌nb•Z%B!@hޖ G䒐P 0-bЕt&؁-1{vl8 D$Wby~1TBo .ES6K ase0a$GAzH #PM 4`@!5N ;m' 7 )Nq#q>i-yA.IM:kM0#7SO3 \^Y})!^"g|`qRZTct&?c.1LLlTS6Di:ԵqY7#%_uȣFQ~iVzb뉴xicbsژfΣѡNԉb['a✘NlĆt"6CX܂ټK]pT/*]2dl_w h &C$f%@KGHdAY,[rY.?;B.]m18}J(r:3契rJU@ *?TSCcegyYWҷ\)Ej o>^QG.Q%?&y+K1JS)V6(*| ɉtS-LL&[>,l_;i']ռhNh4+[MFluKqI icbۘ=IXD:u#n t82SiӴ\촭Vfmv։")yhՌmvJ6|ӡ!x)MprNB˜d~lۖ*?ɏ@Qɪ~k{ߝnGF0r)N2O 473 yn\> /`L Sp-cֈyBp}Pץ})^z/|.d/JO돉es# O8 9blHh^EEz %ZTmhbHF58opws81||#/[u> Q9xgQLŻ>;ɉR!jUDUQ71˘3YgZEpdٗ 3i#в\%3C2R|[Uɶ6cDUJ=D ̍R~i7ftոi4dVR|K<\GEg zXii=_wP7z"Rq1x҆ɭ'Az75ccbmLlcbivmvfmvjxub['.qD;0 EF_@ rt-ȇ*~מ}CrbwEQ4;؏r N<zTt DjI 'l]8+=@9%k=:M Hp?*j0ۺq]뉺.zv'1mLN`™N46;։"٩++։bmv:0/=p15^*F _?LPl6`H;r9j9p%izW.iu.:{yD3$DŽ ѹre$8B9Iޕ ? &I< %u<GOŠk|CI0,U3 T_ƹ c6@ ]q߄Q"jn L]8׽hC,p7և$Aď613ʼn#}bKPU)P9h M5V)׺F8D= 1:c)=wCzϚKV_ ҴN_`@?(Į)ku@NEoL@0_s/?V~lBvD:0 kkCw>ZutOD!C%bQ=Ns@qL뉭'z76&1mLl6;mӶNlĶN։C(.PA xsR'9w.a9/ k׮=co민7Vu^7[\Ģ0@w-ܢp<A5(qpX((k1 }Gp%CT *)JI{Α]LHH*bN >bDӯ~+xӦMdP| mP7Y<LxQAy 7|cMϡ+ # r%0/ e{&dJ^џr "N@$~ ^Ð*=ovvxv'x܂3-)wT\b  U%DRDԋcÆMˢd "x0˧}rڱP"ߔ]zDi!z[[Ol=6&1mLl6;mSmLlcbu"rڸN]`nh9|q9ĕju0nt]O|{WhN?jҪ8Q($N^9+\!"#%˖-{/PhEap#U1@YKl Od@,ƁH#Esn A$ȇzA+ւ-L|BM WXOе0EZ;`EkBx@4T 2rZ64 8`xgёk=)oO8}02CW?&"CeHJIR-I\DXl;*CRѦ#xe((RE( ȋ(IZV)͛7rN UJgόV@B!rkI0xì;#U#AG !L Wl}& f|A#)p"L>⎭BL }a={7"yG0c.eP }n aηIEL1 ټ.T*JsOt챹Q=k[ 3萓?k=~C^xP74, 4F57c&V#()@.kqq8vZL-@t큘84, O(ޫi {B'C*cp3vCViN/t p.$.Z>MBXZs#ƒ3[NJfRb鉘҇otU}= |77V TH4BcWn-цTUb[\ªCl!#YhriD:h֞hV-~miy DZG*H6\3 %]*EE7_Vh[Ol=Kn.mL\e{S1oFژ8NNk ~m"6m:'N0kM0!z2FVӬ'p"%S"‘F^ȄݭX+h|Ŋכ :q?})$>O>p^'Cci_|ོ``W_|K@bmuv] G!:opO|] ^[{쵧![1H4 ?'zYgmOj5'x]s[[oef?yKwm8_g./p+/8ty(>ހ8aʉ.85M$1e$^uk8Ï}~h$v:y "Flq7!'`4 s<D3Av`9P8J;,IB=^ f(@ۙ1PwV0Í<$𭨇@rVcIYRܢ $ҩjmYL6{ ^ M%BL.tlia*ŇlՔT @ќ% ~Sw6H!ބ[׶'SX!߱ZGEJ#[zWڮe60z6$jP ChY M'bN xZ׌}z"dzZ=7DSGI j*U_G[‚;ZZm!˼"w@R+z=Q0J*rdJD'6&1N0Piifm։Al~։gKPW)lBV YV.\Q,H=\vA 7B.EށÊ`K.x[܍UƪŽu{nP~|_u܆,GǜF3yD z[^[1`A O~pR\}^o|hnK۞zAܳz)w[~oWR&q~! Ž;XŁ 뢋pq4R^Hj!R٬OlB.v@]Y)U $Q:U$E`$Υ7P 3Nr3PZ[$q<G!/օ.>O?arɥ|Î1T@IDAT&fpe_P~t`А >f_sNA_|̹9bu^|GtA\Ç 6 8{&P|4 u10K̑|foI\xLEpfI0j]G A2N5ək׎m [urSAhTQ} #@9/Dox=tعksLu oq !9C_;7Yg__L44 $s\v%g}gӞ9J_x#:<ٛ\_S ձSn{<“yS~'?Kgz5{kyA v+mժU~80+6]sKA|o}{_XQGz+v &}Wz Y7:|u֭]TsΪ`xt7vх~vK'#7Q Se2DVmCL ssypZRu'XB̠LtQJ[#iFrL%MRH7A6Ȳy+R22c )?sAi^2HMhUK@'tuwѩ*S+5)*FWEA;=dUuRL .`xt>2sIZ2,.:2:J~tnq9w@ rWlY'Ȗ(ȟaoZ?$Ow' R,mhSDXv!HWk=q6Do71H씙3 '2΢rNNki}Mi$%Z['zp!rN['f~AKqMZ'O`,R0KtÀFuUB#K蜵#BeM}]tч?W Elٲ qm׿ r(c ^1x^%WWNafVXWxFũModo)Ʀaz`W4z`J$מ,VCp]v>w@sg '^`e]^hn0Z{̲G=T q{m+@x /|-lfW]s8Y/J {eRܲ+/[w|.߲e Gy} /8G܏;ToXu+Ia*l2);UYRsI,cB{(jjT3o~ќ wnќIƪ6' & |PjFKk N(EDYrn`]j{e9 t B<QX"4Y ]դU20&j"AnSq'%oņmDPz" F+.hZ=;o*50Q0ЖkhBIv!mRs>jIbekP==0j4<8pEg:#,2%pYՉҵE\ x4!i/ ѠEC/"ƂDˬ2SPH&̕ 偻ՓW l6jA뉺D勤[Olcbۘfmv6;mĶNyi @p,beR<+.+yʌo.O].,]}+VZ~EVZyȡqn3Yn+w{~O ǘUBo]w}cSO^u\r|t%dܻ+*IO~2+sSѱ[^w-nηg>,o4B?G s˗1[ǛB]/7 h3{yk^cahapu0lz?c O$˗ W,=<}36Xh0믻ߞ{P3 ad2AT~,YL $Som8+r*Yx0a@L'!-Iad;aPœHS5˂)W'>MVnqN(Ζ*b7SGQùaJT$T.e*$yh= 'CBfOTb,U5&jR wCѭuѣ2J( D>TFZlBKu䖞,}s]I=Fx%E`-Ar* jZFWaL/Y6DϢDy! w'R z-IymdDBcE%[O'mii'ލ1Dm~Oy;r`"mLMىtnz NCr3viCͰԩݝՌ'X:ɧmv7ԆˆN։?{DbB׸a⁔HW3G V?WX0@duM?qѓ_tuXXqqsCuc6fdL6qɑGa\nK'3 s[:ȂzĨ};@Ga׭[ou1Ϗ}_2J渐+V5?ۦMvkYJ<߶iV`4@ք8Z/x,FFzV V&F >Bam2k}]Fe1is/A2INPP5IqDt2tMPfbJ [FFU Qoo *daP"}-W"NYZBxzHFb`l(Hi2(.% aBT Pi>Pm$~FXhnHqy K# Ĕt2Ky8eoޞ81enqƉ!NEȣ8B`[\a8-;a'7-JZ,t P)RIBhK12J(i,K=I7ķ=Qz~Jj^h.9.#W Vu {"wI![[:JՉo=ۘ6&24i#mvtv&̞N:v"6m'NZ6psrIL.{M~_$-f}g=>dMy8R@p)+*yŻK`Mn+ux*k[|1|Y>K$)V_D2*LQXC)w'V!SO5'G55OeX#h' vs1{_9=QWs:I`0t!Vj}/)\Up沪ܵ⡫WN Q8SH]*dCx=QA(BV=bj뉶 z-yʗ}Wsodhc"icbNkfW:N:E['.!6:Q)W@#nUk^Z:3tn  V>IߟpI'iA76m\yRw 3F1bC5E G6X[+vp٭];Ȧaz\Ig ]- u?~v9[Yda3r C} 1, urR]7Ǘ)Sw~+_Y$(ﳟ,||?=" bD1 !-|qWR%ny8#%(E3cYg 'ݨl24\J`" jr /ԑDV$+Iaj8yt27l' Y+h#ڭE]D+w{}g)7ݴA\ dfM7.^(OwRVocw1##ik$7΁zߎQw7ZR%%:6LYX\+I7ݴy^{7~sg>s饗.r;f~cTL˂Pb"x΍7wYssù ~#Hn߲ewEc1,ϧy^нJ\HZ_qfSgeXCWRlQ)Q?bJ?`P!cT+!;ZoTDe4C%0GhRa!=GR DBʪ:)zQ4QԲ]-&Q3awɀIxFa'0b;{PBO^Nݞ͏d! yY;2BZ@mz4Dj-&yj2*tT|,ߘ|y*;hyO5(+WOT\K-cu,iFׇz"_X)EL)LJRip'J4Ϳxm~ 'Hoic"=찃xwKrS˛w6&cژN@cIS&Qڋ4 m\헦d>mvj?|ju'Z\'Xvz" Q:ѻ.⏟u)+Ms/#ljq.lC0|<1;X27RlbXa;N9:{QBfZ=x:Pr(I~~^"gj&j$E$̪w#{ˏz[kVϕB,1vEN=T.@kݟF][ґ_X䐯}kZMӝ/_r//wUwƗu䤠f؎ZAh<=狟ݣ%C;!9f8dw,|enngLDEwNCmL! UN WZ4  2 \<[ c%gNRljTq+Bi&4ŏcRWO|a+*USĊ? ϣب-6A1bpIEy* *gS Ѹ3WyJ4^85eg~iӭ6 T{{{ՃjWeGZ@~'ސL0UvqT=Ѫ9?'Jb5b-z86%#iErq ,}ÖsNT@.riOԤLkv=Ѱ6"ٺR#z"b2X+Aq:+p\kk^\J$3l=_1mL̐LNN gg^o^vxf\5>l)Um{(B4%kP֗2EH\p'UhkeWݱ̄\"ſD >$dԦ΁x`t(A͆A EUiO*igTq6sŷk+)@."7Z#Z|Q:Zj'10ZIh!Mˉ"l42 JƧBϔ Y{ŞX[^p\#zbQiG;C|DxdqCz>!VCQE'pj60ý/p.;CYy X)#zb(7c"ȧtg|B {0\s^t:Vy/*zm~4[On;gyY1SsӨA٩géw6;m휝^opE{7r1 v~}}8$Ԇ'.5q${f 4Hl1Q]4VZe+/$#b.,<o8͏'7)7xW9瘇>`?PcX ""Mɓo{{dxM;u&aY&G4P.p]$X:,9~_?6b)_ 1|y N!3._9We˞uaZ LV4B<)DU~cSϬ1ia~=/x:/j('%U]q`NNSi=?:V"^MdϟknW?zԀGG^|闿|n<ffNEZ<xcbxo-U0*htIP@p$~dI.gÔҀrYmrDQ(I壔2(L$9L暒QjaG84erA*Py⃤nQD:5sa<>BIEm}#cD=HKe0ۭ[LVOlzpޚ+2QZLH* D'd=1j4 gi_ǵu9yƭv:*w@K>i)j)6cȆi@Pdd}}tv`n W$'bF|SU;DPEFORc9" ms+X뉭'ґmB'd縷ԝ}i^SwomL}O8.6&1f*&~㬪N3k;}r6XP;I 4}ޕwݭ׉ٸ4zKXׂ܃9ma9t:k.F2eQVnΚ5/d)@ظ'~.I(R"ZUt7 2YQf^7m?Z.YT9~7~cٲ- p[<zrh{K_W2?O~|m}߽k'8r??/<;:t~翿r5<ݹy܅]xO=ןK줷fJ}Aru?s%9K_|/<]bŕ^N+v}?[zj >Q 5Pƕ\C/7nl(u֚[$]T`! 9+ol@!?$p xgYkǗh5تaYnHDdPM9Zlp!'ڦK!5Z wa46>ReToJHn+Ah6]$'YkI0BTD-DZeUzb뉴nLzb ژ6;m6&N/rwsAԎ{> ;~!'ܧ׉"%lݬX] ŢR L V`"Iw< t#T墐$R$X*sB8LGw^{PEK_G e-+;{ppa˼^,R}nhX'.YYsK>6K/UY0cGjWa'F+*+0ZLeRm8yt ΃$$F5"'rȶm9Lՠ9!:/RZh1;Ď;D8ۨ+{oH(P@1U$U6E¤ܖSV6vdhb&) [/>Km!,a*֗w5SĸbRG[$f} M.zb꤭'n^C3ic1;uʐ6&ԛJ)ps`(f܅uwni;uTmW0]Oy'n> 9'Cg8 n9Bb!9)"(UT'^zz1]8U-eJ:(WFJ._>K rayWlhޛnCS}o,Y|mq}Zr󘢪,-T$flrQG}8=seuap8M Ʈ,Lb=츿˿꫖p?<6ovtg7oyӛtު8N~;>1=<^z뭷.;__~;\ T'H gK~M!tX3qڌMpKhS2 ym9Dlzڳ1:l\Rm*'C/d'ZնT%v Td K^ŸoXt2Bk4T 5~@,! FJN- f7,ؑ &y*M1M4=ԄYA8~x[1 8KdK)WJU̶`gDP7^ju?~O?lYNw'" HqW4ls߱'Z3=4?;3 Lշ[* %\Byސ0Ghݑ[ )>4F5QhJT({khBCITWUm.h1$hߌf τI1s(DA'zbܹq§mLt6;दN e=,{Xћ5EXcDX1/?E () DD:(EtdAz_vm9w.l<ߝ=9y琍u"<;vfLcD+K`J %d>׉G4 ס ]\tbT0pƨ]YZM%ZuYwOR5LfcS<3،5"2Ի{{܂<-ѣ?ˆkY@͜9V"-$WtwwbMq,n\t k,=e#>ORb~OO &NAK{VAfx{ΑoPH-[:vI'2.[\#dru?Zds?3s),]Y=樣;rc8ն/_&ɺrgf"&6Rœ6☂M^!WV] "eĉ52-a1/ *M$DΜaal) 6?(%5 ,eVZ$ S8Hl.Ѱ +eئ\BMl$x$@F9bH^I{IQ6wAkX %E4%Y}KM@#qtC4k8,II/̨C [bġl[@<&::;pLϳS1ȳ<;eJsXNÈGVNE/@fI8y%{zn> O7,=Hl t BI,k}F>%KR 2ФV?0RKJ7-6&Fx@dnaiMAb W۝ @ ި# OUZ mlCw5eQCZ%"G |S4`&ydlh睡M!!P@KWM)B>Vk$|ƛ4\ZJZ #R@Qwkҕ!Z-JҒY҅)J*C  CHQ52XRWDhhw)xA>Dxm !mEݣ=@`TV4 V1Zj(dG- eTM H0EeoCwAR+aC%*D-Q#5l:<&:D?\7"v"N4FPta<;fd#K`J%#ubIh%c]XX2{@Nd9s ',$XHKd"1`}No$4-Y0 LK,4U-BL䐴t*V7`NoӱC !~?̞Ƶ̚5N]N|'I\ ev.&1uifbXY^jF>9eX pB uEP"FN#-E!u1`#m0:[$LVuO0XK~J͝`Ģ0H4Tjn)fj(<`B=H#2TH N% P+> /Ax !hFěZ /D)6%"͎Kǯ289OHS3ZV 9EE/Q`SàŒ,fV*{e3UI` JKeK" ,6m2EB׶DDk"`KU()XEebk["r--̔fMmsL̖î]cbVfK̖t'f: yv-4;u2,-zכ8kMƀb t<$QAȂVp$ivfL"aX)`_ȒTKxjA 1gT"E *P]4WYɫ#ҩ|1(jFj$RƢmQEKA6q: ^")iŅn|E8 $^.%O}ZhI6USTɸ"a҃FJkeɭLA~RAXHb@G5F/+Kv S$T6H4LD0[bDFoBbH@t)nL̖<&bg2;͖-1[b:>4_Y[[u.;o$U `QG$P7<5\83 Yx7%l ȁ{#aL *z$+ Je>dT=ux( D qQQGGGdvSM[b>GA 6vIs*<}3d&ȱyYY"Ur\U !p z V BsaJ+7t&]!#/Ld*p!!BD|F$A'K GlV'6SNɲeps7nLri,}IIsY ( b> V/1P2F5 Hab)| &&{H9̞<KmL R"vJTl> >.d!Q1REOD mu#STrAStҒ2qyз1 LM3EAeK M͖-qA|cb{Ԇ\50q= Siv-1[]Qb}IaD;G'gʱ`S$.0ocb3K`J@=*w>ȗF!Em SvP414 $jg0Mmti79CX]CleVH @ &$RUzJU:m$ŸK^%"qXV7:c`; Ih|T0 <8G>d |q"EQC%IZSSf8B҇@n:!9gl槭pg7JRB먂B]/6YtràezޫzhO#aԹmQ6\m{6VT`rNJ-N([b<&fK~=yLʳlt[X'fK|5_Y[]qEXh7D _7Ѝo@ '~1 ͤ& Ә2Eu!H!, %L,E'~X E@6F1/7F&rF:"o~ #& T"(5IA iB0-#%[#jʙJHM)f]' 2Z΅wĂa6BH-KaILST_"gQc(R$h9QjåTe-+Q~'9xeX*rCVheSaZGbèY.IyD-Q->TlbclyL3c㌃#֘fK̖-9l.Z'F?\# OĆni)Ao=3+ݺUB@tJ?@jt$=` ^~,y XNt-mfWpHsDDD8NE鿥R5IF'9E d6IBJE6rc.Vبdu-@$|CQ0@L,%K`@pXtMNt A-"ztmJ@%Nrs1G& XAf"8MpWP}ΙXT)V-٦4dd M>kj@Vxm - +ʷhl4ހY`w#cC ` d:"N: Vk0MB,6 ۆAܓQ}8}6V)7OR6D,lA[F6qP"yd/jjJFd_R%fK܊cb<&1q[fK̖-1[]'WV@Iw.VM:R>xP\!5$$xB%Fb^-` #pEA!-QFcA.SVERl#<pKO^R~_ wbA*@ 2bi"!+F4p˴F" J\$L  t  -Mj%P$UTYǣU-(aƓhI4hW>嘓'{QspF+>_VoCʄ7JԏM>=BlbF$˺e% /@fzG8O_8.9o9 ǒpj1>D[AŬɩ+rKl$`Q5#)զ.Bz(Bm>4*?DA(+B){j.%C2FzƟ`l52Q!~x"(y'l%fKc}&$}fpG<;u:#N<;ͳS fMyړ%['WV0_'PnH'"@$j mĜi7@G|HsISBK%A6aˈT<5 (CܧAUԃ@\-#ʒ;.#w1E4)|}R$ŲGe[B` %Y>dIkSĈ8-~lF! nd#:NdMX҉gZR`!AV2hB:"6`Krl6hoTTLAʍa-D@kߔb%TR" .-+T3/4X9((;Sr+H"osFn`D~X^pM,f@N ^} =Q@|D\ʋ(Ud Ȅ'Ejo̕-1[bhECWQl*c"|'D<;ez!NC)X;9󈂸&#<]w܈ "7|+K`kK`#6{Ry>#&a b J1)h0pb˝ CVdm 3e I,&G /t?,ӡ6kZCW3DDZJ j"Z쇝El5,x9Y_sobE n=xV ZaTD1#$#0xD-p X8QaOꄪÆ(rpd'd"n?P,p#cPh3A^5;zܪwa{ T,NEDfWbƠXG<ճ5<;0$td4ol!r8.3przmqPÔRT PNQI?X$[Y4|z`JX `|PBR R)l2V@*· Ǝ{g[*.SȖ-1[b<&1aQ+4N4N],ۙm.H:ѵqᶹ׉aX,g ?YH ԅ*H%qItDtt xOPǗH `#Yfs97 l Z h3/FcvsH\ .Mp%("GSz P?>4!+%^xDݗD:7e`(" #Eb4@sZs*I"#C$$DX@fAI.nWeA,KbQ/?Ey!XY 0 ,S6Sd HEADǩ;ha IB-HJ c7Tuf$(%[Ӳ,r  N/xEH V^eFOF)[(hl%ҙ11u":cc11 JyLtI{٩<;un̄*N:UPI%-H 1cRgBlJƢ^%'" rPVS`@՘$]D &kSćNffPɥH(p&sR KF Q/crŽm, j?~kN+<_/k#mY pft7pU$8-kq&5m鵺@oG .HIrc5JU˭زH<2E;T^ܐ7}HDeoµ-Mvka3Qkir╊=jK.oRxgVBUKPs?;)BHj14y F1(WJ&ە!uL^ .QAAwe״~4mCømBT(0J" v g"-24CPɨYuȃWC*$KI=C&w7NT[PgB 4_-D P(bh́L8U͡RQ%j%11yLc"3<&11N:qY'WV%X?&huZ/ xZM"a D%SD]%/HL@bwyƛBH?CJ"R !Y b'9$Ie$ P=K[v HLJl|`"M(HnRȟJxRr(#|dV2<[iy &OV"4)@2cmSJfD[)f%&eIIp$1bRV-Cj…敓Had&hWu!3uVHlla%fKcbyLcbii^'5Ɩ$f l]  [,% ȂF͘ }`^|\Xc^`PO"R dVG1o/ f kr{F*= 0YKЅ+RTE@$Bjr(D512$,g8,IXN34"f+H0&)X$d<]q'DBU3-WKWIr+F&1@N|K) qӐG6 g0%WTL|$}qfi4eLD/CrI0*AۈQi EzOeNIV*&>BnZf#Ȇp :"PcH+G/j\HHbJV|4dBݡ?FIBVhϡEV9$:DH ,Q |JJ H B<IBz![b<&ڷF<&胗F2CʳS"Nfǂ#Nu"k\'"|e lu xXRY@ab5&JcV #vXWUՏ}D8Sme3Oȃc6E|P0S{"YɐZo5xM0\$CJlE5`;6s3m84=ňC0J$ZkȢ| !1S?/ev Al8]1xއ8锈r ~m-8Y0PdcEVʏy /rUy76cήNɑ nB,BtESZ c<*xIՓO2hzKe*KBV{ 鯈$ H2Tjڬ=P]Oaԋ%&%EZ6n–/{-1[K+=1ZR5bv -'FKE%1#.[bD$<&n1ΖD,N4[bYslC^:q,_Y[[,5bk$wg z) @ s"*73j`f2 Nb6a ׀Dr)5?˶) 1%_HnͪQL d|M2Ďu>$}R8*IX6?hiP/B(AJ-)"|]S~p/ߡ O$HC >CJB3ѤJ|45Z`L$ҋHoʅ`Eێ]^X*Göqx$nw \m=dhJX1X#%8ipC&ToChJD%>S9mT7vRm0Ij( 7ѼIJ $SmRwCDlA}m-QAgK\%v˭?=4juq?0y2*(aIղ%bRɲ%1 5N;[A2x*)A^WFY!&RTdQ=ѠlU"R6>,Z_S+D!)E_Cc%ʥb6t-x_-Ǚ~l(٫Ue&6K٠V .c?ju@-ZK"s TPI+ĀQ/U딪n-yiACq)CUO9B*Fpe-v}_̾Lf]ˠٜnP\%*lEf-]B־.8tr.:k$4-1[b̘l=[b֛fK̳ӼNܘu%%b}8#6x~ՇK9>} A01Sp 뗂{sf+gA' M8&QvBԠTp i9TmqHzH#DX݂0 Na4%drA*% z9+ 2r R*I"UpM #DxK;  I $/(p/J"K!-62.mVB `]X0!UKY%J0u;jq18UE` SV”@:ZVMEURAF'gzUlm{N_*e\]YY]͠iiX m"6,Fbc7VBD\~.GVX@Hh& B !:Zx"-L=\0LU)4`WˁEr{ B*vIEDw{R [IW/!D&S$,POD"G5FA;+ʇKAGv i\%[X\tIrhZ(~ŋ|Cm+RV!LV f*C)xᓸACU&Z!G*Vh9bLSnv"Myg MZzh}ɄD\j1ꢔ'גf00k_EJRƑЦqW ,Uج1DQHRu 8 VgsX+I(mϖ6 ʒp%:e$~aE`v UNkud ®a%)O^W ;#‹F'U`US&L}hGEX[:"eĄD$w.%jKiFξfMAP\Շ[F 7eC2I8%F"I} BDjF(65Xz\穲ޥ`wbcN#BoePj-q [6Ƿ o%w9+?臇Dr B0R5\uƀMv?=[V@6%1q71M6&-qK%2;͖g_N8Ls1--^:&WL+=ΊMr5Kn?pJ^J+p?M弢AWgi;f_=ՅehX#6NAXğD  Xp2Q)K:o) /Q D&Q. jHʗeAX.-LkJ2ѤY$L@/2i+{b{Sp ?UcmmC G %g o3H3)\:%bk4)ֺ,7hF ϝmwhMu!vS|`JRA?A:!ғ9Hm rfN2e[BAXl9UđB93p)w'YP!j18Ǭ@wBtfD1zI"[BcԆWu/[̼/T -^?K s{rj* -qX}6S~{-3&n%n1uȖl1mq(C+̖-UvyEf_V&ijZ˺{V6i;g`kBc߫~нgY W3ሃW'l5yL~W Ʀt9) Ȅl䘃7>&m!6NKsNIj@>T1^ AEqL8P EBtL[d@ *(֓2頀{[kC` EDR4rpD%zj)c=6a7HcZ.ͫ)W@2V3;/ԞM @EQ0j;Hj8Iq\,ua/I8}yx$&*Ti48q97Q ,2HI0G)ELhQ.8pE#BnQKA ү@.S=:sE ܟF!+kg%0mY#Z l~Ij8Z|KԁQ)Z@/Cm}ѠLZ`1"{PT95V5d`닉QjReU ߶`NpSK^%ytF\DSZrU増[&%Ҳ b4٠`&O6V cuz([^BFkV(ؗ/Jk 1 ZDژEuQS%f߈ …ysE5>7i_rgH*FzQ>c4f?68#.տ+`d tR48PZ)h\2%$n=dعĢzbrZ!z)[a\Ew`#e,->Cx-G" 48 kD*U4Ԃ,qܹr=(eK2vp\F t_5?Z2%f+@k N` C^zή%#.[,RX]9zӭ8&n%uсn2Ȗ%ltd([Ӂ-;;͖Ds_,ڋ#Ƕwo}͵܎w/Tjv`j :j|{[WǮuugxw],tfy=Zo9G.jX!d~,"b".>. H*EO?@:80Ȉn,E &&nPj` hW ?/SU>G$B[Cرy}5%y#EY>4S<`_X(NL!u7l lwgq _31a![2~ &i¥Kv|~&[I@LrTNjm2eBL"C!d&NpGkd V)\^efzz<K[!'h)IBpP+mĶ|j'9N(oVӡɏ4IKASPtKxѝq CTR-C, Mr8&YNJzV`#*x0jR//Z<e7{˖,7 忽$Z9%^3[}r6Hak$JTTnxC[˖,1v^77H-Q2cblh`l[tv-egiğLLBRc'y^VGo񰛞;V:}Qa={ǴumW]X]yh9zRG#M~Wi=LqD VrOSe#3.u/޿Ud%qok:i.ٟ}jc_/Ȁh?W-g]E*,Zn!6iv &`rz1`} \6q1 FB0(xҌٖp{I I ZfA-FDxO< *ǘߋhQRI$ ! Ȳh d,G?WDAQ6MrVA :!$)\u*P !tRf''"3ZGUT_8c~,jnj]R5}BۧQEFbo*BͰf|SMz'R m.K62 cm/!"I@6 XyJ+ks9"20^D*KPԛ}졅cBciQZZt۲< ,^;=FL|[pM!]x#Tvm6\ιQi{;^|AzNoqo>h#֗'vMo^w/3բoϯh L^|H }*):/y?qA; G&N=dhnX #b r+H ??6 ~0`ξV֓^Xtر&Mk_{dNɡLyc YC GzhcǏm[ٕQ@/ogZ`G=i⤽yś| @AbjDIԭu%v}wuE ,ZheǍ0q+^W#Bp}"5*B!n镺agM1cfxJ&՗Xq7>?`ѣGnN2a$9\%P )j=,yZj'0 !|jG .#g!{|pE/^r刑ƌ7u^ 䡹]ؓS)!kiQG`gE, zM]pT+QcG7nYyMa6eS %~ij<Ǜ;0uoM8!mN7Z[xb-7s>svSgӌ3Ϙ9adphqJW-DAfׯZ[͛my%/,|ap41n{WnbD]{Ku`s2nm9pF{PN%9mY;jZbZ'ϣ&Lyxӛ,XШ@7z̘=ڻT(%KTv%:&7o_OE٪{L86"&Ah.5,Q;n\Qƴi;Μ)xUTrw~/X[;nɓް]#L2={5:{# ɤ Ty9~++w]˗/ \ШNNMK\UV/e8S ԅڻ:;54BH>pۭ7-x V3f\\bV|D 2GOxr>1ޝg?ԓK,YtIWǏ4eʾyBOaj$|BŲ,R<ômCc&}ϽJu^Vn{矅b.+;Θ4n`'(LjzK>Dhi:?Q U Cc!Y%Ry7|L D *KLPr|-OlLD%DZv I(4C$d k֗ǜ4LY7Ę8%*! DXQ8 yMp-(SCR6-6pL ̖{dKȖHo* mb6f|շ_xmg}Gy־ߝ@fUۗub;U+*Ri#_}p]ǵè]?+>=Mm|:g~`=GLOcl*Θ]Ќ]rS>5^-< #tq+۹[/^H3Z2}of7`=tCKKQ~`!Pﰽ=w_؎ɯ|s+YCt"6kvZP%xhZ5X *BkC޺1UڱrŊ=+.aպ.y_| C2a;rQs9j,k ]xk[o=.]/e<=}/>1ªļ= arD떨3U+~OnZ6\W^qo?+JI1Ms!|&s/٩?;,O!ӦMW2q$ݘW?3^}s?o.8_Q„8:"i}晟|"ڹrEd􎃘;%IRB7Hp|>_$Gh8Y=N;'p2rÍgu1c?}gƉ?RPj)DPqsDEI?<׀rL7˳WZŖ[n,r@IDATg]'-$?IpX2їx8rrOyy!%$?jigPm7u#kagY#G}^f}ܹO?Ԁ>}f73/:7eЌS|`C _B>򑏽 ԿSڣ#BK1Q+/yW^vٺEoĉ<8DJi$ /нr~_e]gqo0nw^[N({n,T+4Zn1QABr=CA5(8R^g†`k/n1QaS2;V->LFfC<5<$9hc=0,D,/c"$FPv 4s96cvnr9oEYŶml]7)Xڊc<#-}p;jqͨwnbP;xˤ\}?<`vm+R5/+ۍ}M:K/+I=wل}nj$T8@>\Ppx+ 6qvt(l2fUm"fT+1/CFd黥pE̳\"M_[?:߆QTTjSIdNp9:֢FM7xƷN2rn)!Uw${nOq_@|$2+8L߹Zץ]|A>묵"#>}QGoShy.:Wk$}1++K mm񳟮Hpi?W~]n8WO_ kc %RW~~ork|GǮ#W$ z;SNUrꏯ5p뭷s= | (P ME>y UDcU5?~bx\[zjrץ_΃^_swb߿~tg'"A|mL7?;ģe/jtCv~%~"P4bPw85!ޜq}G|nNXRO M=\/BDCXPpwX/& 5JA=ָs}~ppt:ov}W9;}4_!ݐ(/BBOQ]zIlL>RAlj2!=XE&2`L<7^BDJ+VYI*8$ 3R,1i $Ayw lk˦#MM`;&B˟Ml'4,QG- {lKDv̘)v[KOH[Y.9-65.<ȧ}꿮1H?3gmxq RphoL4)]4/.j%:nx'*اIxjC,^AU"-1["-ޠ"%:&DhZة1qXtJ-V=2f2xY1tL4jj=+[j[Gj&HUߏl_N|ΠDsg)[v G~O6gތ߾;Ak;U{m@|ĎJ@e}??}/ꛪZ;G/ζj\Q'E~7jO}ޔby?3|ޱ 寙7:7s=#7e^Ad cvġ_|'s)Sfi)ig =Th?Uo螎I+ƍ^3צI,)U{-@# rj|Х+0 C;rB]w̱g0\y~/df W&Wf*xk3}]7֋Xۿ}مQ~*ܓZ2_笶R?uCpDe|QJIjJ`uu-\|g[ʀ$ ͕rt/c5qW]e4:'(9 uw~rʆ/G xZZ;'p='0?YqN>Vk~{Ro?׋2PcG,iE`9hP3ԆnMq晟g-Xzi+D7ZF)S&v_R+Wyٳ^+.hxG /<ԛg7UZA;WÚo{gz>`Km˞~.0M\7vuQ@hZUNϭwK)L#;`g:Pһ晧|4嫯+_⥋m.~[lj2%ucݚ?Cf&Ewa-jhK~8X ~|Ӎ:sU'mnc}{5 >fr#&N1vR*կ:vw7A+?҈މ;o\KU0us!1u"9xċȼVҐ.VO|x=wjgy/eȥ YJg@k}_Xhx=+V,O,U jZ׀ ׊___vݳ:hN=D|n(T~r' [<q!چ%ecZ#EēO:xNVl,zqƔ%j:Ҳ1'j 1`L ["XзHKyZ5 hHȊ|iuXbp0Y1Cϟ$ɽ<(HSb_Q_>BDڇ6B>!lS+[b=1ѡos^7]-?r;"Ԇ`%tv 0c(wwwM4 7uq;j] FcOl}yS^o9ݡ/7b [OU"jV*cqk_}Ǹ{{}o&W{eweuTQ>psXr7mϱ<pmܔRw4)ps?7oNL{nQƷon7b zVo6̉;C8V ] gNf $jm=NZ3 pk;/|r}W|]׹:vT&5Ew1Ӆ;@;K&\]1.ܯ|}h@(W)D$LOyp (gO?Ck{xKtCP" ?"QO?u]L%暫xKw\):gXk뮻Aj(뮋]` ?~|v'.i`D!vEbѭFz@o;YH,7Jqq{@=^ )w 26hVdޟ~%>5̕V?|^+)Q& >j_6m^[栃^="#Dyݳj%5j zlmL9 }[:p "CSOQm@Kؽ)`HޟKbl"b+njLGNVT-mj{VA Kf݁QgE r _{LӠ> VކJQ Y%RwΖ8ixٺk{YzgE9t^V׉%DI WGe'r=a Iy`>z=v_P']u$  i@d XX'tb<ˤ3gv]Q5=ٍ2D<=vpǴ*"heQTU_ڪ=G*]=P7wk WK(K#q5ct.w3N1Ώw&SdG+jX~*.ss.P1͑& AvϾ|û=Uvۦݶ1z3yGxp,gbݒYs!z 7Ϡjs?|5`_Gb#={<|_*4:Z[8KW ؎$f2Tlߚ$`?fr g ?BQFΜX^^{.ӧffgybؓD6'k÷@M:c FTh{ךv#rR뛸fgL픯g\xEWe&<3O?]fO3m xpQ_=蠞zpt&Kh7O<1v5X ]D=cEOq PMmFN?r\`~Y|_=̳aClj??B՞OFF.,a>] oqQէW/f/ŊŜxG9lTl?q|9j}ٳ|ԓOn2==XOVf<`&Z$]vdd_TVV~ kzYf̙{O><=ܫ[58d``"BǢ 8a܉d,nH컆3a3؊9GϞ'3>,hѿ95ud(i2*y;N8Ӣ{pXPd nd2$WSw=m"q@AԻW~pɓ'cU_|/{{i'?N~-$3/yڏE/^y<}W,Y]}ْ)ǶKh2uQÇp|GO=i_y5 ݘ;2 ;h Θ/O?'j9 m6GH"8~,~<{:jogN/bb$A=KdAӯ׉4BoD$)S6^bErV1|,h`t૤9|K^)lP9Q]/^W-O)Ni@*ˈ6htE;^bZ,o/Kdjp H ԆƂhBy"[E!J٬Zyﮱmmu]iY/ż0PŘ2'IK8=m깗vZ) էy e0RxhƊ6dD8 # ;͋_a+Ӭ=؟3/v}~v }zsܘ+sv-n /lD\Ǟ|@gs;;6e3Ds3@A04lض߾fÆ qO$B{vm?9ٳ>G"]7&.5篮>[A(CI z!DёHX@C$ E᧋ Hz!Cݘ1Ǐ;H-H"x6v{m~zѣ91:@d5I9Rw)-8ʸnرri^/p?}-|IMMM-VZ"d?0h 1d#Zx'` jbҸyЉ̙nSޤnoNW/Tw'Ӧqn9;-3 :DX,t"4ǃ&zFUզ:3ss/P=˷v]k4xV0f}ה{MK^RZҭq%7wZףWx}ACXw<#.#HFޣqK~qr9>),'~֩vgi jh3Z~)Xc[`")|zmvQ,YmmQ>lo)ӨQ{!?p1+?0/=Prh,2)կnS[M>v4'9}fL_}ninljihd'T"{˯|pg k6ܑsπgbֶh$ʞ)<cO8G+>ґ,I&[ۘقf)(%b1ZIŘh? =׿ַO"7o4r G5w#Fȵ^{UM0/I4S3*>A*64gc5VQY<<ƍ/~OĄ[Uk;KG qĒ޽{{`/3` .4Ģm8+8wAN+W;o:<ӟwEEEk4԰V8_W^qe~Ӧ$Z#>@6Zs~l2HF Yl u馛x9.ƛo+}:\bZKb1r~:NoKO821h %h[~ۂEK[ b++/r߾ZP24pႥ#B2 F!9ڨF{}8:CM|FLVgsw 'QV&ɾ_}K\x=Dh+%o !6H-׉*MsTci BUrX;8v7)bD]̜'*/2ѽ*p≛{5C +WLytʼnP:Q8.ZP~pO9jO%>N' JYTJez:ykֈflΑN#{yEE[Yja&7+8>n:DvNIuk28T8-E?@T 8=5\*l . QVD<7Y J͓T![@[lH뵭TI%èGNM!Ȉ!N hQDN<ѧE2sԉ6?ۋtYEImL+_,}֞^ eNWsw,81h- Ny٩>7!}=?sCϿ7JQ+o-S ۡ;|#sv/)wsdxi_+jJüjem.w47\װfpe"!wI&vŒoGcL ;Bt'X|uaֱ+y.jђ3}wk)6"F?kيy"HS|+ӧM9DRošɯ?~lsPobD&FXgz&FoԫW;[hTjTFp`9g]P@~F%%lrg N"д?Mzx[Lj8aMC[A9)D:!_[ ׵%k.S^) ߅` W_}9'x5bT$GV.XxeR#'IyNS'AԵ"Ā3~e%{7D/!Z8É6-&Km'u." b#DnNoШ;5(թVAO´ՏHG;%Ah-IRq$m{I$e U>)I% k8QDL pR02t$*8:Q(0tk'E =OɢM-t"0Iـj$:#6JCzj^5Wq'em:͈'R؝V*a:t!f >:fnm0g2NLiD44 C'?9齫ZTQ s?Ib:(z~ <h׉rj'.1-:p&  }`OЊy:;!J9>G]NMJ56< سf(F$@g2a N,pYN4QЉ/N4(-it[-ꪻ&ɴ|z}=-ǟ`V@υK7,m *M/;':^^olZּv^l{lw`an~Ud lݪ{gj>`ħڴ,T=~kz nzªR΄mhdyԷי[o<.ݑh!_j=Ȋ6dtQV.+ikw;xeQ9 MˮY%2[ajH}MIOk2<\jjϹE.s%J%ŠZR;lهHܙ)%b: ۶5PܔQvG֬^:An.y ˝w٥9mMŴ'OՕ.3g%Q:*'DOSKVi:("մVjj绁(@(ɒJ<ԤͲ| I~A6TprH: z[!Um1E{jU;H_6RmoV}栃zvD^GN6~*U&eeJ[3?pPD6g55--m&5}!K}]o֬::& omi:,<<~Aa5܅ph` + !ϡI?{ Ҳfr"PHM}hj :̯N0p#~3H@nNEN 6DKc*v1e˖F* u'"8ZCe&w-lzxŠVD^@X1pN(ٹE:QlqT# N$pgBaLN)"WD8PpUg;q"L{TfQ\@*bM[hZU.ԼH2t'3D*tyʼnB!c'h}WO{ iN#??  vDlN4ȭ{|DuN*J4"bXMZ_=2| /BCXfqC'RUڢ=Y`J*()g$.&+*,NU[RF }S;V/wԀ[j+-kːd'mPa( 'XN t6Z ԉȭ)a?og<~aotyUъ:A͆f,xq\GpМK}j9;dۓ o*.cꢲTX0阯x@.%93lӢs)[V+'Mٯ%ґ^umKc꒏~80W݀^'߬*^|v+.Z}u=~|ѕl% ⛒5^߭mIn7o5o\_pWjcD4U^ު^g`pFTBLI X|h&ނӼ6V`vz]XUt}jt.Muj"bج̫%ޡR:CEUWiӦyYcÝ&b&ʖeo.)c9e[Z[=aqX|'qL6U[jN›dX56Xš y[s}F;Ѱc Itcc J_U:n իWڰqԺ(-:+S4Km"ty2/NSg0^fY.=Z0}R,VV8x% {`͛;mXƪ78ϹYrIy/)NmLW"mJqRLr5$>Om]o+*Gc]Oڸal#J_O07{9c&9950f3D-Nm'g厘̾ qs9}~7qαL98;iGHotFp2Ŝ )!N40 .@1FC"d<6Ș^WJ.AˇWb詿$̞=nr ׿^>cN$1yҤ|`N:$jD&:N!ƏZɨfF(k*P7OERNmAat6L{ڸnc߾CC6BE}ذm"ZĭNj*8S}Ym7ժQ`u)8}z/!_ Tv  tH%/Ơ: 0U:jCR@X:5q?NDNTƤ6S /E^~ M^ۺvY5[x^yU9d\of^x%Tr餷\o\LzI6GۜROc0DpK>MlW_FiE_LvÍGhfZ4nu;dH&w=yEa[t*/m{ݚ_rCtQz3 NGogK*l7s>ܫexݡ0(]a.ܔtx~;^ J,x{~OxO~ ++>e5ךх~!fwuYV?3s7ee KpM! **+//E ACz&%'_J'6y1%urPu]&(K&OpbP)VF",u(̌7Gɯ?wwpK/ m\~_ :DSMm,J x@ՀqL>(a\y}'I*a %!t{XRbI}BȣJTKNN48i`9&K]p"K)H[BDRS ^U (ӈPtD-?, q h+7r!Tu[iF`Hʫ& DeIХӉL8q"xl> (K'H?Ъ+?@IDAT0I_VWPfDD@41P[UUX0ۡL#9c?}0;hlܥmݍمkG%lp7)h=bDPl}YoA$w ~z-nMNk_ ԋt\^D"rGCH[ {@QxH6]-M?^pH] +Ullhϳw ĉy`1Mp=r81( o2鬪p"8 w1cOuM7rHሇ)S^͇[qzTm!~P\i[R֌`HAjɉ^Ph=]ouh&#&wջp;n?ѣ+8!É7ρĮ HAZD3qu;m |΢2 ֌*>hЀhMfClE1kK>|ׂef-z877q_&r&궝T~LDuڤjA+9!T:G T8 Z0[ ND*+AT aWsaQ䜢\NԆKwN76Q:R3'Rm “q?pp(E8I9̬0"2:bMurlj 4mчN:QhD8QdB'pNOH[MrWsUa# @zOlND.Z_X(` C99':(҂HhX5n|ljy: sbNTM@QQqAFuZDGPcuZDXNt疊C,ˤ;w;Ɠ5=Yf$#2*_p%_!_:%JyW_Z^\ݽt΢L+ԗV 5|w!{dX"vac̈́0S |{UooSk,]k[ۚǎ>!3~aĤU1mSݞ}1¡vs}xe/-Dw8\؟i@:%" ;I#%KWE*!a˨~uznu9D_{( ^]ބNT[7XaJ#Dx(Ǻ: 2<:-yϺ 2G.\Kew_ajST[K_ }59>~9TxPt(UjT\[\i8v1A5  ]A X8j5'&vY SԂs~fTCUT|5xv7B(pG XyoMu \0kDnK^")O4eeGs4T1aO cO!$5w&XΕb:-#C1o hrdL蜂Ppnh?+@682bg!sN79sf74Ox|9M6DI路6LL'&DEz@t)-sJC%A8ɷ╤(y̺"Uį2q]8O_Ғ[며9 mHky81CHʃ 'Ɛ/bboDl&F)G}Iu$6S|%]q"5 F(3T6%:ŒpլD ;P.77 H9z#`$(f[ 6|T PGq\2=ʼnF %N][B:<։#2NTE{HiͶ㚞1NN '0to)N8e֔{:$ČN9x]j[3i噶.9̫#TM%sEj\DXANщҍl,WlhHD"5NTd&L'8H с,?E'J} ;h5U^p-G/u*jJ )dEj>,gq_o$^CvN6UUU\M%sbQNݳ5ϏDJ9ۡ{E] "dP *WF>8"-g[h1teX^}UD[9*T||Fԗ=) l|ټ!;' LZTTF}˻mSA^e]!ute#}z jX˪v7PpYqU}YU=WV0*Jd>-cd^F ݍKWpy^]JJ+o/z/&ao?hgNu&pmXﯺ[Y=3ܢ aNI 8;ٕof" 2`aյY7l6~ eYz QUFM`m~UրS5"7~c3=[O8Z)Ѥ*^lE=\;Cў~qjv oF2u:Tj@{0벦 lm .U 5A}qqwI.fJ~$:B,kV ᒿ555Ob6c_kk8}q$ uT-9lʡ 2̺s)fzxcw:ԭޟj:Hvkd;ͭvB HBZm2d7xXFde}r6ͅ|(NT(h미dhkc8_3U2U{TV׻KiJB S%[PE'mHiu"qzWpː f7U\1ͱ=߭_ K\{sBy35\@ 8w4|$eSAe]QIYEEM)PG- a%aqMbX³I7LWE\iI[ׅ-,]*[ \`5t -@Kio婬#LJ\pf HV3K͚r3?I lOԤaE>el9dW%Xk?^;뻳=fDZv|oK-Glv+ [}y!DůЁxQGGpYr!eE&y` c ĥVd2üdvf+eu_-f5^ hH]pTF<2^s:;V PHsO8 ߃ͣ#- 4#mWDRDMMmm뭶sYj?T.`c/7[fA}>(s B(>Nyހs+  p; |J@ y 몼'C(n;ߡPU "2`r7ͣd"5DKTC 9hdWL0ٲ*EX`< *Rֶ3~ϯίk`PS[k#W@Lp O7I NH;&S_Lm%:*ы0.׭[RuM 'Ѣ]ʴ\,{ &N~q8[Ʀvs >5G#:1o ؃zM< t.CT9#Jw.8Q-fDrRB:(6 m0D-M%'uoFٜ-'b#_ydDJ ڌw$z CNxלjt ,N,.)$ru"( 30!v? Љٜ(a~Tb0P6lvJĠ.`!LoITωjVۺ#1Yj $ U?'5" ~jzPa;uk XnLRdZϟ'";YNN ,pbA'f[9:QjMIxdLۢpE 1oL\&„zx 馰&+wSI= Ŵk,oerFYڭOEeu_;8sIK'SZT۽vUP/Q 8H(Jj+ìk`, =⛮іRJ.+iZPKkq{-HO"&O( Wì|CTus޲߲?1ɾ)'F@!1 tvMÊ ~j]W !|Ɠ-.Up܁/udDUx0fӜl2O}WVx} uWEv:U;hpA=ldILЁLFq㜿]ðd% )b曪ּBS 9-LE %sTQ{y#2E 8 Q3$[Pj\9yoƍ55Bn nVfqyLoSD+R~I=ed(EdDUJp!Ix%,%b`qBsPFg~hs9Qm$Ra'niaiw*4h@dTbƙ1S{駳Z4GlԆf3`2r8*O)y[ʿ'&pu :@d,b}A WU֌>t'ٸq>&?9 E'(̾WdNF`'JҮx-&BE!<>t0V}*ukon!Ҫ_-UqsTHF8Q=D HPyA*Dja D2fA+D;cх`p D_ $q"(<9 4P"pj)`bЉjh]6ŠJ(N8Hu &L#8hP TlNtAN:=CN-dȉ@WQ A4(Yщj<d P*!Q#RKZEKB6UF-UAF'rS4c?˦b휘U[o8;DA( |'GM]B'Q_D OA'҉~z=K*Ɛĕ_)SN2B^ YY˺0Qoee`֓KiVk: ?9**I KǛz7SFxh^K^z8 l+b4af%?n6g|0ޚ=gz;֪yI6!8GBHxhRL? q D»9q!PbѢؠ-IW"$B<672X,c=.rhb' -7j'](:I"EQMʟry.[BE xd/DF0ɠ|  _m A |yyjpyo(*D-}щ}!z h(NĹwM q`\,Y˭҉rVDCOĉR&6jO'rϝ,3LvtgÉ f_'htзN,,D;3G'BʌAP'njcxtbgNd?D)$Rp_I.9<fiR#a."ZI1 (rbpx95J5ʴ#Qwd˓Rdjx;8NtJE:|2h,RF5߼ Y׵.kRf>qВfVwy}Jp@l< Nq,؎f '(;Au7)ƚ%boNOȌD"3Mc#3LQ3B AO~~Y"M NL 1/`QL4ŎDzv4 nYU+WLf XZC2ݹda5m]aAiHMs^6YusT0IO\Ue&$9C?|3ybz>tP䥉/NLy(Z Gl9@Czi W:nCM/Q壏nc-{CX«7̵~V?SD#|o)'V/Pw3(T']!N XgEph$:x>hDApON;mBވ;bƉP IJ2Eؚ@'Ď+p>Et*{t[^u݌AB8 D'o B'Σ^uT8!4&LC&%>tӟƆdT)8T>'z9X'҈ZN2l9ԥD ).`7b]/6SC!ý**Ϧ8/ 'x@0:b-щ` ayhT~ 0ԗ'􀬶X'" ȝhHdt"?%kNT_\u' C@Z X'tK'ft7R^o::b~M.Rjsf97]秞Cy)/:-.MJun\$S,SqN9t;iMҢt& sɿ?nR(r6Yw }=I-b%C>:/ a٦Lհo|'}BYv:+~fbmmd%ʏJ_` r&W%N-zE B qpԛ}ȯ^NM'"ޕ=i2]Y~%u|+}TM`LE>In_ OX64To8Mh=M8 薊C歱A~d,.DWTVg\rg*b )Q}&\ ~ey> (CɦT|0t\!3 u۷35Uj ~<86HoLDIDks n=9EЙ1Li)eMƪَ JEC ) (*+ڱ#~!"qzWZq.FT )aH7vLuŦ9y.:5i? xF+A*zժwޞ_;/pʈep_2;ʹy)F9NDo"chZȉz$Ρs"./&XP\o!5D{2mK ޟzoN/!'>(wq"E%R%|Ġ>+"^i(yZ'"MɥAD~=|Bds8q3ub5Jo5">ŅH-.t"Lc+tgƉA: NB')m'Di` DQ\ެ ͉AזN'J-qbF'#0+.5mq"lNDC4-T R!stb)2Ŧ7'fD+'YN,pb7atYi:h,,`sVp[3f!'Ʀh_67U)lBǕfYaCc5_Z|檰`Bғ&L17|)❡OnEڞyJW=675g\wVwvo{uhsӄ\8NDg]xwٕ}pׁ*p0(A7Y*A z=њ)&x}ƌr-Ipj)'8ݷ7S" B;vlo;K/8_;yӂ!1%o|:|axcʏVȫ6^Ɖ_|qO9d??3+EF 'Zڻ ^sH)]MhnaDςɗˉܲq_( H𳻴y;⽫-DFVeZc`>[N41zF>' I_'RY'ڛbj1nVVAb8=N虏UD%>Ԙ7>$\hT-JLp{4A?f_DAnFD];e#T*RøߺDAK&_.V N '8,D`h Jc |,? zd|iq2Om%Z( `7Zz¶XYf |J"N x8q?Ysc &R̼~{JBTG DV'9EW/z;okl@n +nmngW93guvH4VaG ٳ.wޚk3&"8OM55|ȑ/G~v>Z.ifݽ~ܗ'Mwt1mvMMNsa>JsN8a/DzuGU |oG̝3l+O:'V\r .KY! NkWJJhl%YLä98"/\ę-٫~/𹧟a E81Oy+rny%zꕿ+V,D!0cʫr%^8[1s&CJL|࢛GXr81TD-Z6٣GϠEęfN8D4o}MwpЦ ">\YiH5t^f2UL'Y#?vW=ۉ3Ŝ+-%/[4awwn<+MJFn%DGSOTb\ ;_ D6+t2/yª+׿X BXIchD"It܈:8xC\Q;~ݚc=/_ ~~O7nh`jN,:ohqM'Ӡb_vcΉЋ踩4tp"ݧ`A$eB8Zr^X Չ2μm+ͯD/0EMŌܱVNT3(YB kgȉt2TND"['"M։ jcⴰ!oFxAHJ*TѻBa w։ 4:kNt"NС))aKlNjuܨ_NT\4<91G'7V={p"t"b;DYJdX'8QөNi :˫}>o l< N VtsC947wAyLw 0;O:M#ęgj'9w/8UUU aF|U] \DiD` ,1ud>=zvmիrWWW?vlK$.ߏ&8駟&&֛nd~oӯks3u㮹ymќ^c"?wG y  `Z,M+dV>o 7#z71|>l CH/ rm.% ѣCf(SsGq4z f@5}o}ad>u(q9hȰCR3h5{ 5Xe'T,g:re\<Ǔ;8b}}<='>={0h W\rո$p =?*@öŢ 2slaCZZԼDk#Gs_mhj374'6OQA}+y]t~MmmÅoQ !OVe_?Ip8}3'N+wSx<lܰaSg|;"a΄ kz|W2H% ʣpUP!Ckm Au 9jRN*Y=ԀXE^ܳ7$d-BQ5 x$kUҌ>a:/WYC-h1AŬ ]}f=Rݟ 'ҴN:Q^myja 8)r]0wkѡ騔= QB}PgU +g;'ҰPSWferCO Yig꒽lM4@,{' IkFHn[,)K:CC XS]vJ#@E:Q:՜pmh+}+hĨ ujMDVJ.UjTKd3 pݦY-N U^D. uZDQ/tщb (iYX:QB B1 ;SA۩3`2{I&*'~㍻64,P\CRwڽۉ.z?5p;(1^`w=1w;Q A'[SKT9q{Sօߍl_"6aSp]zefX0_+7׬b1_2 *A3"zm1fDLш`\aC2z7 g;~sBCEDgaPYoE4M0LkS?#OPeWPO+FR?L~; y{֬7MFۄ٢$FUyma2̆?u- hTD`1fG@_j_3׭];wΛ|VZ=dE|cND :4;[us^؄윙k]b#NY8B@b453S3U\rih"6DI!MrI{+BG.}Dք <4`ܳ!hG_{m׻~)Vpyի&N '0*7DLQ#.afE%]2h|A)qI/|w%lx(#V 7j Fqo[RPe"Tيf⽭݅ ,~o}ĎmDuը9̐V5IVgN 5D F4b3'ɂ|"dYR6BJX OKfNFUR 濐Hc9ži0FIܜ(~R4 Ag̉pX8։ԡƀ %̮t&911ddɒ"n'b!J.@F&Y+aŶ)-V'։p}לщ՜iʼn(uZʖ$Z jFFlqA $lUÉ9:1Q+ԿkN։v&Ϥ X'8QĂNu׉Zv/0JfEs">!燉 ]ctjMty\P̹H[d]wl ?∓N:-af`n[[S9e9}Z6e:Җ6l/bk 2~֭^QiFY ~xW3&Edhv~y}-^iS& 5z7fO;^WpOt|Ia:Kď/СClΥN/e.od2v3_7Y'dru kEG; æ`oXNu8`5ߙO 2/w]40^wC>1F>HA0b#n8F㧟qOЀvaGD$rGh[B)uZ]Bq-HW4@Wcio~_Z}!o>6S pb4goʠƌ9 /l8Dݡ>ݺw'.w8 ('=7AZb}$5.$3~;ko E'6hr81 pY"Fr EU;͡˭OA'sMs~v`!GRM`V-D:HC}'V@m- 9pV KҹHCOmQgxMkNTu䵑|kBPb9qB91,6T] ~ä bS@Nnh, ?/?2doaAD J2 &+5kj @IDATå_xA6q)7$z Ћ}_ S%Jň0 e{ai2` y$ @8N---A{}x m?YfEYi>'2&QN>m&, Vq*AZ>GAZTgʴ@0mN,+bIA%b:Vs6PbMl(ܩo790`}{p>mm1'6S0H&TM_JL64vAҜam'͔I۷*] )F03rE ĉ`/)/ΑePL&t[\rbQ3/d8 ݳ",Spޙ{S{P}8ќbH|bQ|ৢn|{xoueZO'=NԨx(qSe(a={l]Z{sH 6&P$( O"VF֪upfZo cNN[(: .ٗu}uz{w~zKE;o-oarZMDM0+К_m<~11˃c Mu}^a%BژOg2Eym-;#Xqo@ڱu .*viLPxlh"բИ`Kȗ!nѬDT+ҔēD܎y.Ρq=UK=&R{s-[i/1a9c\')s1fၕ 8gX53:B ˳&>~1crr4dCs9w_n?Syͷ;tPr =!9eO,ey?C{![d^:,Dwr-'S9=qGz{֦iZ0˦Se7`!2%we}QR9Jc[[o75<}?7M-r뭷>v'~kݾٟVŖf9.ܞ>T2Mciv6|[7C/r7է??}&vd{x׬EӾ6f?BB:R*T֋@i"vm#.^';&R"||׻e/ݙNmQ|jjwK|U]FkL R'DGƶfI 0 5ٱv'DIBPXD>U}Zx6jSaS5?c{K8!%OMDX&*;?~v%F yDq]vflYHU| _;_ӫ5tGԫ`r/[bw!nӐR`0-qO V>-(4e=,ZKʎU%qK|$-f9ȅb=5JcDQT bL|薘-+jHz-іjćnIHA+UqYM>aeJj95&>DKT:!(1P'$/YYER<^e}ZbHPo:FPPFK$EwKhcYP%WLcXfHcp?ġ-pn-։|v{Y3*bI|0o/CiMOE++ͭf'=Io}~|o}K_2e" 􄑙kY4GgSk_?syIiK_򒗼Ux=GcVLuɊGGgc+eg߸o?,e_>/CNT̀ $ǤtV, pH'sUκ)/>|?3?w7tD7o|O[ĵboࡧp㭷ɟgc'|Sj_U_ynQu=vT `58!;\ɝ3=ww/o|-Ϯg=Yҗgtg>;2Q6G#5}\. g:b06` 0Z]Elyz_ݟ?_WW]yA;;ۼ'T馿~,SDXk5RIREz/Ոw=f&`-Qg0HYY3B>ef#; 2Az_ ^wo{7wWZx#.Z?ï; =c.*(xۯ^>G6lk.֟>`*n-|ыnE֟^i[_2S&[Aaw7AU0c<۩ = %i UCI\Gy|䫮| ߺSvtϡ{KوvǶؤp7:/"AD,$6k+wDPܒЏQXyEX) ^JRRIPB__ 6&"&rY Toj- 'ҰJUIGuJ~5*S|KD|+5mU13PeݤU, ɥ%Y7_(_DYs&x͖x_y*̝GL/>YWFsw{P{s+kScT7nonՃ;v-.LCNb;g=Fn]vE dTL^fj:\|1cu]'>wUW]{d~nv}KO!uV|ӟ<@5\ El}l{6f<&U<+@cduXִD #wwNb \dZy :ᤖxC^;?;>Ny]]7d}@WVv/[ CȰFZiu0ۮ`qlOHS3,hTJ(>-11x p,X#m3`kKI[wqC{DޭA"1|+)[Ic}01^%oD޶C$wK-V{v-ubLcNL_}L_fF>h } }WR΃w=u9!@%dJX|e|2 [WֿDjCص`6ؘL&Y^v9Hnϥ|B?Rb K; G76Y}jX6ei5][aUˢvPEl'*5tilOa{lkع*"C0H=05ꃴDz@rZDk4B+}rV,mF֑C79*Yqmt>fOJh%=9j%̉R4$b+)ڛ=ݿ99$}WUK|K-1DbCQ$Y{(:*~-jOVAi[<݉@_eI [be{6> }M/mk9˷έ+t%7Z!甑Ժq`q1g<.TeC= z[Mdq'U$iE>^<ܦi-1&3Q~./8)Z 4 |߅k ~lY|`-.[Y2lwDKRΏ̶d&C!\wNfSbTR:K{M*u5L$k!l2Ǖ$נX`-v=P(r+|{%7HqP ,] mB [3=}JY*cVn⃮RK3rE2!smv$v'E!)&CXF\xfHd#noѓHAf6&28[,Yڹ[z뷙̢ 6<H/$? i#>K= a%4z eZMd[Ak͌ݒkJ.q_ pB0AW:]},Rk2TҍR'R Гdwe<sb!!_)5%l 0 YjS jWT| W`Z ̴Dj@DUJz["jNK#qm?NW[y{ՋI:aT-Egoyk V4BQ$159被b\0((%"waDkn$- ԃ9H A\[Gm(jK+ 8 1FeYk*mӥ%[9}GmJeK4J[MzVU|LU3<)*|WKwLWإ۩/[h%qYD\vA6Q;xc"%RgCTIWN[p5F!Idfc"{HV, \e2%zP†A C,e!EzNig*ɤA%rbK $#N-SD3hop-&aLNZA??ڄ-s|Lʣ0"uKHuH=tKx|c=z1q6 xvoLT)h3Ir7~ug5-Z:cʹ5ưgs| 4>JQn(K'WeI  &e.nnihP@lZr{:g<>Jt=Bv:4KBpN/c[.%tMκO=le.hT8Sz3,[@$dht_F8 (/C@y _Q 4x|\'_rrD5jCR!944O `)mU4z鸿kWlJiȱ&*)Y, ^?Y tp25sW~IЗ`FXi(S<29^.46X>>&:}H r&B9DjZ6.ꘊ8 vP3*\62GufwC 5ut"m ,X%VCXS e!,0Op1BCG5>QsTR*2Wj W5Ū }X-yJXv -n?~qt xa eKT~MMK̔**TZbd:f%G U;ۿ^+wl{;}-1&lDvKDI!!9ujhlD놕JBSc"fsN`.ĎU rD*e*1LD>ahDg:g%ҁ<H}KUirn25aKb%>%23@:f6&vK|NL[U:Q{88bsr?tN9*!`l+ů \Ty)۲d#ߞB K&İsn_(PeJ@ka,8A)6)$`&4G)=e- D12=!pVXE-!&@"xܱ뉗d,y q9yU&t&.XMJǎ걃>Hؔ0>H. Z6; -Hi dQS6$[N :E@U\(P,WT;l",*<Ȑzc-DS !>d<(WJCU7?(Q5=pj01* Q dk4d iNU3"o_ZD py~#}~;v}մhU`ti?斸|OWK G]*C.nvdCp wbWCJ;";}DBmpm\ jKLs1ѲNQXPvI zL珎X%Z{vggacbx*\':f8p 'ydG3sD ޔf(3G7>9rD=w։\20 I&H [{}ȴ{ 0%#H'&! ab+7W(:: sQ!&Ј7AG(dYUg<%ޤfe2eR^OAdGC kZ] G(%FkĩE%hyIm,Ex%X-̏:WԎ 74;DPK}zꕅRUn*RŨBSJ_L)b<Z"24#b8Eį߶[^qtVK9QNoKЇѣGrzk_wlg',n=xHcb=&:J9 8n=;<;14x;-p, :q#6,NODrJ)o"ˆDp1 VF@A21쒄Cl+ ! %ʔ5I `~N$/Iɉ'TBJN!*tq6iDb]KAլl%tK4`Ck-v2dU|-HJKE# ʼG$ɀX013h$:m)X%AK`@?BbB?&wG$mMWGhnfx` s L <*0郛sbIQ`dObɯUks@R5p$lx.dPФLi0v8cj ŚCv=,I--T_DPdJ%R⇈tHK{]x(]V 5P,XY UE TAF$[(T&5<%~@nŲ+1 d1%9݋ƙPF%", >kCIyWbi@^#AōnL\+ׅVmesJZ%2)R?X,v'A5 X"UHX(e2BAٌb(U,mIj b2ct'#D5D rPgS%9@?PȎ ~47!i,ю bC%*gmX='^_olQ-s[ b{_㔏_x/EojD [-Y;1qoKAF]q(3d2P)q&c2)qĊ!T ¸E@& (u /rF8nyUP^Fz-E&Tty0T$T-p4iuʨ,H ' BAR«)<ŌTnqܘA(!X)rr MQ?DzItC -CYkrmh'#ܦ tȈxrzORNmcKݮ@rf.l8XoFb*$,TXJ2SgEŨF2$urs+F VDKn3M q(q=`.* S.2"K2AEI)"J"S2pf3y%E4"ZYh\** .M'gE©.Qh0byReH`UIWVִj $ef .IMnhBB`js! < IVEȒX(-1V( ־u["5[b4D%-z["sgwݳST Yd&m떘EL&.,^H=w ;?5gLZ8G)\sϿգ\[s3ڤO \iK|K%}*=GLxg "1l%F! 2NeqGsa)^FP0H@/t!pe}.T~iaxƍÖnSzH7+Q5^'ʊ)4&9D!N3)djϽ q7L:FM`;%JvgoxOwށ|%|LH@&*DMp$ S>N`5Bun'%Mahfsev ÆV>)Tu_J)sP!n ?PFgk^&q>ncq/5BtXJV'=r`L =ְ|u% |ftz5*EqGXǍ[Hn~$ZHc%TE4L%RʔVdU^%vK{1 C=&:r:8xdgIcN{vڳS'N<]W=Mo?r}uh 5 l8 zpv>'rU4[p R^\pJ|y$-AiEO}HzREG&Q/&R~R ~ @xHxt N2EKR4hF:rr [S McpAJae)sh&uf&m2ÈFh(PAO#  L)@BJ0(|Ftib(AҬ(lDϵ_} U0{~T#UEΣXS&ܦL {1*Ipũ:bU\LEmP2"B@UaI$A{Iʝ?ڂ(R:QH:9My>Hv(q!!dMTvK^DIDg _U_ezLw|CAcbdmN8qka'%i?ҳj.ί*=;uuvˮy>gL>g)W>97~KzbBFl%_$g"\z|rLr\xeb\f5"a fClܡvBM/I/  1_\RUjBJ&E>hfM=8Nuʄ9b=$Βd$ȒnfRvr񹃵yAUP,-Θr֨dğ"8åЂ4STɣP%>_dģ/Ww,k4":KJ*e&FYٛ8!\OIBʀHjrO=(a„0֖]Co]/Idwm,%vKHcb=&g=;u۳L*ק׉fzf);./]ru37(Yf".Xyʂ6[tY4`AW3/:Urxu-HUDV_=Yl`rT'[(N<ኼƁRV;u'֚ ~u:+nm/,YEPtbX$<,ʮ2q5E6i'KI[˶j:\E WzF. @ Pq狃)Ѵ {YZ.٫x̫@qmQ8Z|-]Ფt5:p$0MH-e E$!zCa Jc`Gն6PPيm"<6E Q+B&n,iC\Z\"@;s'8'I$Ih MY_Sg%)}xU(bg%LEHƛ^i  sM@ Igk‡31ӃҖRq¡zmhĥLU)TZ=Ie٘Pu^Jjio#!&Q8LyTL{؅z!K+)8Ť8dmny V:> h);_4M QvБh$[ H0tI6TŖh d8[.]ɁRO*AUb$#,CpAHݘ[({)3VUFuK6uKH{wCkDFyD9dϠ񖱄A>:ti&5cֳӞOTcҗz׉.Fz2vZkxƌBlؠ $BTcXƂag+*GH Nm X&z`v =,#9 X'sd8ǒ8OW&X5,,lcB* ]X,t>ZeL?\%w#IMnWQ5aDқxSxFC@g`45d*E^Rho$s (I%1QDJDsLD:ӷf H?cTON>г嬫g=;eD@vYuH]O~RJl:fgNuОL`q`H/ܹF%<p`KJv mVxjkDE6ee㌯@IDATUqݡ# nA >H>2s<9 7*!#=ـn g go a8H:yoOXB& ٳ혞}N5.i1%u0W;A2DZ2pjP&x,0~UDKV^xj3D'! Ez6SHZF9y2r}C`׼ng%sWA)F@(ԎLkH=doH#~t"nv5-tżIZiQpXRR44-p\0 JyR"$K<-O FV#jkMn%+򷣡1^ՑV{v!씉󲞝׉NukKws9݈ fшN=0?WA1,:@#a kxY('|@TPC #@./E+ T 2/§ JZ /ӂS#2KxO# QE/q׍΃B^.^wİ$V4'.8K9"aSk7OL@iosfS8E3_.Zg ADz*=\/)ȥ73_,F[fU ;D_h# Z@,ْ8nXII7`,%PD/ئ9#99`E.O] =b)Z&Wr!kBon)Z C@ZBn;tKQ,kcn=&c}^zv5|YN=;i-; Nq䴷i~Z:μ1U\-zD YiFl#6`SQ3 8p oKםzcm4B"4`|Lk=0XFž:uI0.)o'+  eU4}XJ}@$^&Mf\p]+|%!Oqy8\4&Ob9D摌vq%8+GCky!bkR"P {K"i !#FZ|a_K, Q=Cr7W&2$PR,%_L:@i {I (0TV+RQD*炴Jbj WtKLS4o򠥾UK=Iz-[bvt=&22e1ѳSQsc\:iɷ&J!g=;i{h謢׉ γoFp{El&3ܵDdT b%XD$"+20 ARZ &/  a`(Xyi&"grh>b/jѠtCu[6 3>4@Ρ }PJnr$H^r-;1)OЖU<  S*}`_>V T S8Lo+/3dYeQraUbBXdqWS5=k)pF)TJ^9! EQ}g ;+,cT1%bXD$,nhXV;̀L+5,f(BҘ JgOhbPMB&:! D!8C%&a `p (w2"|wa~"#A h'#Dq)B:45R %+1Ys#3=|Çr42#Yfp#V.ȃīP!B 2 hFR#+M?Dɦb_с2OQOl2(:AQo,Y<:ܑtXR+J9)L!Y F_2XV5"{1LND?V"~ZZfI>+ sC\M)`28|,> &8JrT&CCn?M=%b ,a:%vK16cb_1H)٩󽞝씕EVGNwu^'bG,Hhri :Fl&`8L%4Sd>OgG{/#| 3c'5o@ˊ&9 mr!#`^dk1] iBp(?qԷm%3-P(Y6"xGM(&WG8IZ 6Zh[ە 8Q'ҁɠZ7r5jg9DEALPzn)!_8 @(MX&q*5r \clTju$dHKUU_=k. 0}ɑ~IcE}lD`AQ0(KEHHB^Rl_;l XZ$$Q clE `l,Inv%,JrLU} :Ⲅ(T&^ CdQ=N~ikyXnV/k'n{L1DG۞Ğ:׉NlBGl&A@ XwG Kl ЈL}L {dċG :2$Xk17hR4qR0=9%2zMUr tDkYnmt1w'#1C:zL g=;٩Ҟ:Ib{ݮ9qrXi¢&p@"5ĵ{LpB %e%ɜcq4jpj}bC(0X.K9-Q`(SLk䋫hH,$c<[rDo5@^ +YzY P;Uʂ Qw^ȉPZ胅\\Oœ,&:P 4ln>tp= d$, pUi a#$D6oa =ѱODWIJvRzUBp9A[8h&&9؜ > P"U䤛LtT#24I C %W1)r&x u4!3YJNC ܓ$YS=* fL+cB)I8(x񔛅lJk `5'9FC8[4<Y%DD>7*5S4y81 Y;8Ĕ*h}-1Dt=&f(`%cHL"zv|لsgԝ;ub!NFkۈ YDl&9@?Hxh0I9A'!qyVcBJ kɠ+&'i 9H0Y0%9fy:7zґI7*n59L6+c?8( ;!x 'svȢJ[8U LZ~SLllDHRO)!z3/&km_6}LBr[ Wa1w/9ĺ3qlđYo:mohrbEu cPݗޑz{H ZnbM_RC[0ۛvɇWkD:`Aʔ BS9+j v{wa¶Vz1D!')2}WꤷmQ 2# 2 Gf1&*Ԑ +Ivt#*#k떨,tn4Hw coz1ޱD}zvg=;i{Fl.f @-~8w@ ^ўʲ8 ƒ1 X"c'AĦHQ\r+eeMYyBOG",[L AG waXw'AOTa8E+imU:SE+j態@ fzJ2-,=IJ ZO4\ r9wZ؜RU(*X[ neD*1=MR K}&CMp'ɫoheGQ@":rDW:PX{AjD*~vpM5x`L7 095,"F!QM-īDUv##O^Wm'Rxd'-}D)+- vpu)--˰ Wb*R5+0Rc9&`c^'?%[Ȣb>j :V8r&zTGq |D"4|k)(nV|2rZC0«2ՂJ@_PJbYaUv `م $x\Ø$K.%K)8Ya90[ jR@puRnL3in%cb 0 ,qV44CD-[[b:"Ր@nDkXuk٩SQ-뜷8Ahgh5oiN&i/g=;09s0=nr@8>C$'Ҋc8"Kb!R@$h굀W@gDOCgngnrı"] ࢏׺$R'i܏ SI/1@~6iD$%1ġ7L@lkvxkуos(9Bws KAES4ʏ&0uޕ)-KRO/B4g P=-kQP0TYNDž ]tx̓/N?:"n%^{VaF2Sq_JCANL,<2 bÕ9Yh#  DhFS9Z! (IJ4#8sm'χCj )S`EV^GIh4cyQ-"!Y|fKnPS@"+] -ѝ %vKKhcb=&g=;1׉NdFl.u"Л0}td-Q*<'cB1Xo/!AS b?Qf8MdXM65|hSqR?M>1!z_(3b0U/8b@XR8D BX&pIK-3qhe9(RPHI"@j9q(i@ap-HNk¸^kP6T\@F`*\QDuRK9#R%%X@W?T,DaCiHb:ANid~R|P-Hh,LSK%ᤍ^$:V%MRr|c4g%+ o7^D!oD %R5%cT'c"mþ8r1!=&ENACڅ5)F)uj^':5"='_8 m&q]{ἄ+ [@Fy5A$\|htLW'`6. '*,.>1j-hqb< UCm!:APE\M,C9ft1J,2%TOq%:E\s Y/:wL|7yk3Յ/^rc\\[\xkO2Gd{>J1b&#f  IiH@ LELG(P":@hxMZPR:xvD O&,S;G ٬I-5Y-K s!*^g))"QA|p #/@8z HDqQ+M @ayT4{E IU,NR6ͥ,&!ˢH+Ԡ7Kݲ9 kaD3(v s\QIcBRbXh bPR с"']Wj`$,rT0 2|) _KCLqt5 -[=]Fv=&:F+6=4222zݳSg\2w9n4MֳӞԉ7E{HMȧ׉:36bcp.p˰`h|=xlyǾI4;tNeC=%EKbWCtˌ[%3UV Y"Dj uɎffN~H/tK-[bv=&سӞ:?O=;i{DYY6bs!6|Pb Lxd!V-`\n@r^$gpcK &8c9(֙%xԅgEs: Ay \` #DzBHbh1q Tm$ 5E2wX[D56H; R=pl>4,$D%#MP蕇47szJ9ҩR/7 l]MWAMdR8n ߯ڧ0ٖ ѹ-Q!FG|زsl]rK:HR!Dj+#)v3hIZRhdj$a;[郹XX|nW8 *:8 CLoBd)=`8YZɯ}"SWu8EGo? h˴ꮲbkQ?="A4 dCn%vKc1ў6#P=&g̨:kiN{^'1_#6 bכe& Ú(@'ėEV@9 s# `cٛHF,%<•Un4Ѓ  QI {Tz$a1JG)0ڒZhBa%Iqepҫr**|'Tp[G.TE +.oDsd i"*t2P%Q!T,CxE'hIL^jʒj QhYJJcFT\)8'C8ؖ"@!9T5"<4ZTW6+~7&@.(eɂkFs$IH6 GJ'5^U6 DIja&K$g@VE;D%bX/V-1m[" MDz28zLt@Da?#cHN31&:zgtYd킢׉n`^'a[:1ߩN:<b3m$%-lVFÌ 0MšM$-E1p3#ܔ%=:ە:S(S$Ȃhoj. И۞$Vxf$C ,`Đ<-]Tprys2 IUCfEډbI$|HM$[(4RH-xL" ID&J~(=eD@>r9NQ#KB(ʈFz]i2lakҸ肏 ZI)xM*9_]NU,j-'iHֈ? 502d>H ֛ ݔNxx1bOcVT\|;PCY~h%OTPj?!c%9(ZOk D̀)0iNc%#Zd&dw6b4c5<%-lzXÛX& Fv-$"g/>\ vK-[buZ=&c"32 bĂ*ܦg=;i{j׉,=.mGl܍>~<"W>6G$HA!A_`'AA|QHJ^sHb7! xQ-%k9 O#Sԋt@*e-K!"3fۙf}߽ۍn @b (hʖDLhQd(RyHlU\y[*$v*(q8mɑbIfERHQ@ Pz ׿shtv;5־瞳7o}{ou֩x{8z؈ ewthAl>@~#䄝Ē ĦnH@Yr$l{&c.hݒ"~66G;Z\?ܡ|$|&%d!F{"!b╷ɶl"utzҡ[OaO}tj`au{^푶5@zZ:8QWVD/G2*$6ƽ˚snexֆfP`~$DPG:qGL ؂_IhX%Z WvPe~\"n6f 5yQҠME l\$q-*c9JM`s1Ր'NWʼn -OBP&f΋{n/gV -۞M^K~~CY7+̉_Fx(@GbDX['Nl:ufo[NtE@K^':' JÕ#:̙s+1i*DH*t@%:S8.kዀm ֍`REv.A|r>p0Bߥ84P`[lVFyaI" TKG\,ij5Vy oJ^U "c j .\܊D$#/ݴN֠@ts)dgm--@(3`~cXp<̦4PUJD c7&#dzd@2'Td\y * NE+A[]Ob"rG[VT@x,?c^/gH{+1[VyYĞBи|&'$˦[יQPƂM'DuaM  e-ݦJcpK9eWw-lXʦRYGbN+qi LH^%G#G%CD:u"1^5i[mu!^':1Of"Ńr 6 p@ ~iB S]iU*VJu#!yd"A~ BB08#y=0<#C}AqhP9$!aK0 `I@9擫J)>ZYmժH ݪHQPY/˒ WA"C8Ժ 'MdY*HXR鱌$2²eAI$3jᝋW򈶲$NUD$ŏxPeRH.Uj \e>K3"y!njB3:k-J4P!`PW5Kp `ǏEqnD~`+1?RP&m K2VdL+c  Wz$"#BKH{HGbAL.tLjDPTB̓I>eTČ(։QKQVYjN3h8ui:uD- ,F[qK6bS[鳵;@B[NI gXjhθ 6ނY!P!Z?&@%qM :a# p&eA[cs !д<9!!>J6l< #K C6u)aJ;q%D.B|JO(J@iY[ĚLu)[."GADI 8Z{Ybg? cí*O0 L8Y'gBMpeo>:o]OV(uPxpv7ؽO|Q(QAMf!;$ >JRA޲ƔN>M~iOO&2 iU7GXwv JL.o ML9+Xl %2.hI1Ad6[Ҧ!p'Y)ayt "4!?6]WD!0(B#KT"W*5:E=3e382O2FZ':ѶNl塚5Բzŏ&$VF[mu22ub NAkFl 50bQ8.\բO&`<>@Os]KN(9  DkC'XyȬƢ"{*CF$$)4g> _)%Y K@:8Bt~hf BtDiP^qʺL GʔHqrL#_:g6Ё^uZjWz`axD*L#6{+;TľU %s#[!R<`x11pb@BX$(mLlj z*S9, oQA;$HyPre~m>(?=B/2}(LK媧P3\֛RXtjs,WK!H &CHXJAf2(;aZ6,r!*?%AQ& @48#Od#T-HHClgXݴe$.œ2'0! ?\C!PS>n0g_A $ՋU7yi|gjiKpHWqdŅ2Z h)$ iI#_ H87wf6w%i-֡"n!>(mma1.'G-\Dn\Ho"D=3ߵND N+Z'2g" u ։HiQ)Bж N3OᎡI{>ub,$шͭ 6nA+-^;zJio,pF p. zFV^!$@q2sqN:T  )t+ʔ_wqWϩ07dh.4CyY^ r +IMSHHs CIpC f]JJ(IoNV/q*s`)#N2::1SZG1:KND*`NF[N̦muu+^':1%s#6QCl&u9b3(yQt1i 70?lMÄ\4sQL}0}Hp ݹ,4,rjiq@ q#9XA8@+6IAyT9"OD<^)(v.&Srf^iz`O{!kI(zS.z'hxT )Â=iJs32y+\G:0ۃt(h`& CuF 2MJ$aQO * 6iw YBD2ExR ArEAى%4\V\Jvj8 %j&$1B`R$1u/TrENyNYYJ_KW'|d'sȬDt286ttÎL3 Y(""jӻwy7S#1-t==73Z'Nh.f6ӶN:IN1)]/uCB]bR 4bqDepaDƤO"q!8(!Փ dpF%{4Ί)ADUVFp?B@ q`LIJB7D?XinO֡)Zn)P)!.Nf#3TIg5dԒ%'>3)\ |\"`r#10Аx(hT8֓/6]% Dv-vYU =&#@iLyai YӪijlhA_Dpi)KpBxAtԋpC8m!d0F4i-M(kjDBzu"(JAJ_Np1rGb#1ҋr#Ga'1uJ9:ubDm&ԶN3{:} sHa8gu݆SS{J[/ЬmXNQ:uub#6Dl&cv{f_u&{=3+NN`iqj"^x>2!/\2j-`ʅ".̤=O~2!YA6$^oYzN.n I 4VGvD$# y n W~P3_G1 \wI-Z%o:ďATGQWŖS  rGFdy)9:WP^WX 'I,%$7 z$*Q$fc fEl=HT4={$NtTcdt։L:e\uXm:%U՝.zBi:]vFlX(A]¿|>jlcnַ: lD3 Q&Ц5 %'ۋ^qL'H/+|xFjln`WBj&P3zLi2O(xNS%`npcLYJ U}"D TC|$gƀѭ2$#Gb։juG&Z։U1:mԩӶNcc_c@:׉N:un3FP%!$0*?7o p)/R I FLo/ꑗٍ̤ ^=Z'RZ'Nl4fCS N:V;^^'*^':Tb 3A@AL!@(sD!H!P>P*@8ExMa)ӫP ̧aIA9S =歏&)`KT tjy*;)+H!BXx 2!і橹]_o PV f6Š! S{b&p)0I!2Ke:":%ʔ])d9D0yj y%԰sY_|T6 sRʕ b#0b[DN4µb$#p" LiiWJB抲ds98 w&iRTLn(TkEDsZ=mKFZIH#u"SeFiƨhTJĶNcDsuq5Vbui &NT:o.9!3ĪUzI]A1(8D.= }#=4L pAؑniz} ZJ̉@IdLg! ⵙ=, #SwlX7F3E Gn(p "6sp/YPVEJ((lݍmP \(Ǻ,F!B9L%S#}(h"/NQyid*[C a3rt2ro {aENxTP#+pd(JU7M5ʤKd!}0qjdءhfTakc, aC= !gpߦD'{A^`h{@Kbsq.o!glW䷞}ŚID`nu]?iy17kڧnqff7:!FO~;.\ eWs [ vDcii+q1ϳ>Yy.Y7})n0-jؗP@}o}lߩ,Mم/'bdo$%fU0GO_||f>_+cǏ>?IfiGM^hU7=~[(1u{G< Q4mSڭS-d-n IДg߸&wpOf< Éo 1oSC冹7-K'k=64>!M<7b:-щY9BMMJXKTGP.0T%P^6WpP5`'+0'M;kSWuHDDJI9k@00L] mJD<`y [F 0G7{w'l . Aǟ($$P]wロ98袭LcjXHG6_uRSp7bOqhImT穧{NL+( #jbo K;p8o׿9O}o=7ƃ=hӬ<>w/жAafs<>'MwBl۬ZhW@Z1fsWP+|y9@fLUtCJJU"!EDYј.N72!r-MC 2"VӉβVl30.]p:@a` BL)te#QFLjRbC͇2m2ޅ7r(0S.Qzy ߵ!oбXןzkc8) AmF TO}XM4;!Mko a,Zz#dv] ~B(u*Jұ!ʸDkuC\:=K=C:fz[00 <£Ezad8xw|8ԸICߙʧ5ɋ>3}۪Ӄ@aU2/;{,g;_} /]Ɩe~ H|}wB>&4}1޽I"=2d:O&& lL{A7D-aJىDykE[q98O&voTYU/t|ö[V;Kl>lb~e6OB?Q阺{CfͶNOTCfo:ur&D?'$;U 47p" ?j&q 2%·?J u {PR7}:0L#> ;O^U]{Gga5԰3J#ͨuÙaA}AI+7TB̏U2҉MXZH8x+gOf4wټT4iY caY!龭Sbɰbɥa$(3)84 \$pG HzPM '6n=9ߚ "T8bfwTRHڧs:=i@ G~qfzayR!UNia|M,>7ߋcgR0K8 o?E-\zxcϿвl E`4ܛHKGa`lXBZ'vNUZWC-KdggM׿ه+@M兝`wKg0Csi^{$rUTj|6/3Ͻxw$lliJ!,VS E e2bN@P^^T - |Z?e d 40[ utڒ ji /'W҉<֯2 ((dTdju P rCʮ`*0C]@ oJUɳ!7w*g < WZ[޵CDn-q+S"w>dw#DgcZX8`M B"$Ja&2 f0#gNM/6b.KoxI;PƗ~žոXP0CTEMpPkrY%Zx&k ;<꣧af4z_DJ[=jH?gj$NbO(Z=yn慣9I{g.m?̳ꯖ w?Ss,NbO-:ƹV"U/ -ʖ%3ϿGn5D nv)2ԑr5aN\7>tt`NoQQԥX#L\';rJI֠X[Ѷ{tk ~i!Bsx]QC2()2e4%*L88/)7#^7Kr&0\\ zrT Viw=h6&8M" 9$ͺR76eҎͭv_֩,_)~#o0 bUoo ܞ2ZBMTV!:r3h'Tې4?P̀7eiucp&L9-ݍNdubHtd2^{9nh cG/]v%'K; q :(u'%^L1.\'>Y/}N^ ?-kƌ{1_l\<?f}\x̙35X7ap³[SeR .&pE[$۶N}Q0(<8R;<\ JvdAFUV FtHq*>BxSF GD#{j:th ^O'VRIipxW_'ʠ2q :) !)(bwy`/g|ʗ) &̈ oh\qxyAr-Fɀ(N*dd@(֔r]E'@a&|e72XseC C(m)( o5:@cY(K ahҍ)S,ǧ^ E6qHP5k"/o+Z &l<*!С`Frڲg2[u"A!WD*b4{$ڝSnH+ 8|) wCtpnp f /^&}7s=}*TkQ;y.:p6'ܼܩB0Q:$p¥}S_ٌa@)4ymc^M.lX~nDRz5%2T |^lKE[ B׷Nl :>,7qg=X>B =#&ЩP@jӍ Jb%g!.*j6/ aXs[>< '3Fol`.s,3@ ,/8Aّ77D_ry1"h UtVgêQ]as H$ͭf|r;Z[bĮlo^[ۙS4r&֮#MXT#Q L0l6G"|Hr]b<;\vmobn:unB(?F"J]GU1b`x9S[oDk&G&AQ5'\ N2ZN)jCѩ1*:V,0Yq|\(@%OaK\q$IJCkMn8   &UF!u b2 VH rg~2LGnA3 (#EhP>M{f^\ rXTIx% WXkX="hZ 3(^1ײNӲt TkuAL2AD!eKlWhPd-ddWNIفYd"-*cR~.2R'LIV^{T8BՀ:1˱#1DD~o}^%׸3!aJ~[,ݵU:%pwPes{ s#:ᄅؙZeiOq*? ş#cyMXJ b}E3-M>4zLE oŚJwf ĘE@N D'\> n ocMXh ;CfS<WzXXiHm - ,, RQo2ύ0vy{qIl64n9wIo#&Hʅ VՉP&ȇsf:1EmLoHSio-ވgVU Vx(*Yr;iPYWX>ũ7r0_\֘I)d ,Pr¸ &?(gG}^)È)R)wT!|^r=J^ 62t74`.KNjuK_.zijkՎ6H&v3[;RFdۺښV~ţFb4HSj[) sm|uzRXy3;MղURL NS +D5 HTF 5 djB(q<߼`Rv-f霥dc-'y)usПV\V_'œ6Kx>2wI0!rP|Suʧ*`k$u'\'6 ʱ,c3+Kȼ1,cKcH`5 5 v Fl0Y 1 7baCMr-)hO_d@Kk ]ٲ8k>ހnڵ։ÐHI޾[/) n;[:=vY asa**SQ*"͛/k$-fͩ.*L\Aq!EaL9a|tw&bh 5$!nUbNy>Ǭ xs,{?$8LpРZƁ3 `CeCS[Kn`S&N M7J[ S"Ӿ&r ]iKD#J2E cc`B42'A=q5,l^IA]PH"S[‐kkEk:}NHl 5m#`|ݏ"=Z L.(Fa.8z*\me`A>P"qc8I.^bt'm4Bɢdm,%=zHޜoUYxpp0D22Լ𺏖5zEڀY6 u5% 4b ظӔAB,&}y85fc+/L16{}X:\ p$j8k}un#oaq;LmoRNPN*1!RhZ9BFbq ҍVAj3]5!('F)ȭ%Yhf.tT´R;t"0$"SɢثSR"TJNNRVb"1q-!:^,1NpA#@+N,?I# !3/qLk<"$45XBLPS2Aސ>9*$:Wƹim}n>$4^:D҉.%}w A% Z)]I@0eR|,ORXM&}/2rTgQFH67KS hǪ=JH戠2 Igsay ×}'n֟-~_wLR_w\wG##a-[3p2 [ksmmdpb1ʄmq2hj3Zc޹ dkMqpcHԞn'Zϫ(}7a^K:C[X o::CM&2 V8A e5A*`:sY7x4Ʈ '3R|7gq餑(j l3,n`a.0DFE8c%HZճ48ǹo+:Q3fy6X"X>[@أZԗR*o*RV\Le&PwM#Rsnt&#Zlgdi_ VV))SwK*Ё(X:L56][:iP"щ jNe [B o)u.l,Ƭƣ" zIhHkvmNoˑn}Z|`uDT;1"<(9:G""x{TppanaG.cّ8SEp8RIW-IaI]Mpd  և@ɁLIPu3T$#Śqy:86+:P' >)~։S,, ߐ wW@BD$ƒyaθ[RC tIn XT 3ȥyYX߁'J X TUk慒˺ab=83vY@)u(jH76At7a@Aåep0UP/5/K+ȇU+.,U1*^#ƣOkr#Αu; ל)aj}yq)"9g8f#5` v&Xȭ=j僂AnC$d_Eou oRKmWvoϤ21q݉7e8֘E7q5ZhA|6eQ0x%$98: (ﰼH.~޻Bx7\fuhX/ʏ`FY f-WQسQaC/o86RJR|e]t5W_:F-e|B7˅|D$P:mtߖp۽ suا7znSe}r57WʛW)[7ݴ6Oq$i!I`B9"@|1A5# @[9Zv\dT̶s@ХH<$]ϵ> 0Un\0nD[lŗO@BN m*5fbʀȂȜ䣨 QQaOsm=oNpl1prXt1 ɷ!h" $CgavЋQt MNUhۜmz:%pK`t߾0-'sof8f>;!H@kS7Ф[}5'~u%!#M^5'G@IDAT]ax)0.Bqv`m< Dqؔ 6ifW-2L6`j:luVFSD@.77DOIP ~+-xA%5LS|*ƅ>։r5 .kd/]ӣy.%;s LxOsh8dhF7)5`ڻHv^ڷjĨ ;8 ьxq+B,TVlVJn᦬2Dy#^ +-4P ;R/0+uJ9dI%b`VPqf/H>jǵ1ZHViABIx);e㟽GKh۾yh(i1ڛpd8Qfջ: HH{$Ñ8U2)8ƜI2x2$_J\jn \_޸aFym ѣrQ 4̨(^Ki%g7d+2Řø?'MN%fY3[$ s־mL: P16h=ͼf>MH Hd:I@6#Z[''7a w\Xåμ@'%5P M \ҏ 55liqXBRX! d5sg̊:Q*ɜl7'E&%Te#Q&EoDp(j(עk (-TJàhTZ|24Ȟ戀$<&bN5 Y4V!Ħ o N214M\9/C9l qX*Bщ2m-Q&ss %l9J# gc8;LQR ]Q,j&$O~[:+Nòzi$GATsP|`zSi9m)Kyar~5i ܴOFx` +1Żhڪ +is&+{"?Čڶ(1hiaPϖ9&Fb ℻=cY^-V-47o0uP_EҰ!.:HjoT.>LEz%`Ey~ktQe"-]lNY2pqsU "H7t^ǎUaP, h:2j`H7۝G2i0ce->TJF։' vO^'gФ1oBLdg+AL6l tDT#ƈUD8VX1^7,ٴVo&SPZ?%^)2\xA\x8.z:@4;X|HuzN6]ob/ c<mgEΉR)UZi Xԑ S:[PN=.PbPKLSU{j,ҎyA,O!mV\'=uv:nzt$𝙬fl'<'!ƤwZDܞ.QaaOh [tC'G+Dk Jgkbdaì~ h'w4m5ʊEU KSܟގMDNn=WP>( C`TVӓ٬GMX ':zCC W{U\) EYY˩&(ӑ2QyaN 9(8A\)-R@Q^Z Zp9$N皓%R$ZWD߉7 TaJt.#rZP;=8'Wbܐ%$?"= \Oi-ìd3pL<\NZ|ZRkIUVm(J2a"rn޵v{e9 qׇRdJe()qRb?gD/#1h[fX|=iHEVz%`d2c(zW;N6mīۢ6xX奩W1p5Yd ǘߩ[~ 'L&A‘ kMYt%'3ZV̅:W÷1j_QHͲ ]KN1{1=\OCi;"\`L9w;h vKŷsPZE'baѤʤ0הJPcbzu"0-ąbGj|y{lٕ?x&c:}P4Ry,dEXk bߖ<˚Z|,lc%dA뉶8 5i%xZwR.ORc"#-<0Nsh`}?܌/|@3:A.}a^inG_ˑR0 Ǒw:Jq$90- -ݗ0J˶l 7QΖX6 jQBdL(2V&\q={$G]'z$9G1jc(9HtYiq dHb3lo'yєQ8(7Lo^Xk18edftsjX0PQpgxJ@{]Suflc RZ:0 M4mdzRobG/ĦHD9RC,c"7n=52izg:}v[a,`<j@SO-*$')\aPN٨B,Z"E_ M!%b#ZѐnH>t<"ձfY2νX'8)hd)NT?5ה{D cȄ& caB;jy8R&l2G `Ė%LAKVZQnD' ZȠ7} T$VIDȴ)6Ř:hO&Bu*iuSR'BPlR`6eXb=,eK&ao`kԂZC(B d$f!&7@ EޫPU#ўpG8QЯ35;PxnJ ogB-% = RI}[j#󯷦B`(]&QW˲  g6>U%PO ijM;&80ܓZ,&#=bi˼2y575/h|qm5>gB`$גs=ʻ !T!)Z?pwWt[z[Ͽpy{;:P߆lߜu 6_HE +` ac ` I afV~^!@*4_x3hgyˤ |%(MiV*>dOu"$́ fd][v bxm `:9j924pKW."#?s(kqS.Xwy=Nw=xdU,*jNȒ}Tç7vl!X-S} s?Wbf؜6zhe4t2t$3OI)>6(%R5 LMVBr(eX6gӝ )"KK6 ҂|J0 moU'+P_*Ra mbVannq^y$$qy[/7MubD59KF6JtDe4t7ْ@V! .P1zjI!] 4qw??F]+[]`0#|iw9_M[krSY#))JkE02Pt88 p{{wsd4=if=틲"w$BR%09ݡF:B_,6>xwuۣNYgӧ>s>{)( ,f1j(k,[=w+ۨWJ&S=- \qڛ5GHIAvbz5$ <n݃jKH_1z+Zd;#NHPP8&'Kwƹ3~C4vE$|_bs @J% Q@:lܢDiR~2G|a4ZBMƇTm Eᨦ''{Zs&=f%ussOOR"`(ǭp,K TV3~du*Rxpd6ut77Y⏷] uS$dzdY>\"O]cNXAX|^]cᐷrJH7:zuba =H g0dȯ%c߱'^?^1h^f;{ϛ>ˌ܋{́ӧ~CQ/?_ Cz%IP/ Ly8m:wy$Te}t?yc/}w;}+жٯ_9|j_?iP1ex3mSY^xAtt?w=`/~Cǘ6UI~TP ŵiq;zϞ#Zj4׆OΝ? E??|?+|B~_B%|u7MHt奢Pez$~蜯Vaɲ^600"v~IPo*6v#LkO? '1fG,+>~cLp{)ښyw7M}s/])+nYܚd|z]gN?x8y܅/\xt}jۆ13Du#|TBMQ{~'?¥

Sz`TH?] \#FF^ɋIr3Oq `;ug'MEP tz=t e˖Az:qi u&YFIx\q$J@:1Qn0S6v <7Xcj?~T\uD|E򩕦qfDeqTC0qKC9 Ω((y4|RC%0p'CYRTS&+DsuЍ2⿲H ,Ý2:Ea0hX'0F][Hѕ^ ̓CnZs2Y4YIZmr7 I(k׉$#9n'G2!8;p^\8DJ=~x;9[ >'W"w >HoOۃ,O=DVMqpu60:1; ۘs?S:kl?#݈( Kz叾Oh;GUAӟȻm6Ñp5{==| C8afLoZX$ E WXHZ`k>{>ޫ_37>[6ܚK=0ry྽C}:|6m[N/(1ckZ¼֊9S߈"_Q/~ɿ ?}#oA,z6??vנvsQA`lh$B\w# *9NMg'~D^>/]:xx{~"_ͯ}jw eMN7sT)-Co~]>¹s.2qRt0\,鮷?γ3Ͽp~~}?wm?>/]r|[;+oK_+WɾJW}_Dq3֩f&dX(`XBl!5׳]bO`ߎ=+{g-dn>n0,E$Siݾb2 JXf "c9t"$WՉC\p]XQh@OEz3DZ>$XXTˢAR 8֙i%֣;Oa"L6{a!^sSDiÊ`d[ (v$I'yqlJn#AzAtu$)meLf)%"iGj5:QQ(qh,[z-7sPۉ%+r>q`t%;YI;j]o>͙]EB%(byڒ%H k}u#޼&]Gq޼uvdgdʼnLnC PR.}o'ڧ_ऄ0\]OV鏓Rk)„gP1h=hRIߟuПf 7ၐyχ>57oxGP+e;|x_|c%Vs%&4@nv吐}[zܧN!ZN"6oڙZѠ0mӘaam-^N gϼ_=‹[$ѧg~'%{H{·~|Σoح1sOA e+~G(g-6~W_^Ht G5r  B|ۃ׿tua?}{|sMXI67$CK}:'gƮo&?Z9SyCS^8sf2ɅKWxx|=wۚp?%Kj~M:xwG?˟j+1عra4c-x*@$?xWa8] X:i*CR/(#bU tW~sGnDb!+8Ȓ0$F0" o)p"3:QtEh5V,bNBCo&hx,,jl,fSÈ+J *:tcmDR`ʴdքSC6-9:dHKd]%mX3oP6mde$nWBت|֥$i9eX0zCԆU:Q1/K*I$3mgE]?VNuQ؋ hAIaZ*$ RD`ҡES[lx[mnx$Tz$F"6vT#Nh^xD#q2%1P-T@i M@,b&jO]B? 8x +WwrsB̳̬t%7h[!InAwrT׬@ؑ+$$ Φ\~90UjLAYhь9Q=^k[nf 9S9s*od'?z]m,~+_;Zw=~^j'nzCi`&d֌mYY]{jY\;pQ@ĨM11nd6M~n-+w{|ϼsབྷr̙3gΜ93/#?Ji>dg]>YZֲeѿ+|4r18k! ^pyfMnzt)j6zTU:zSOkosmx^ЗȨVX =Kq#Gg6z 8RQ9:KD,7~%r'2\i8 n|#iٰ$TaG/47qy#=)5Xď(m\\߁0zHfaKo |lTWSӀUu5M?+++uJA#کDU  R Ӗ<-,,ʡ"Lo+8:Kf%iF1@DY^QDE J "ڕӗIf66.ĦՎ1d3WQSu{k<\8 \QZnerBu%, AR Cܓ rzd'ЂlIt`B2ьi\&JcPR,.@ Cyxy@@m n!BRA: BlNA=^,V ՈM*jMMu&* 1Q0Ʒ!1ΗZ-6Y%E*JC>#~I,`!)\WVl)t,z0FCu 0p]5Ƕ⏇Q^R`RŘ'/8O=Q3{d _]EOt i&$H)S$Hg;vmYI_2DXOں9zwfa/aB/cWԵ> p j0c FZ|D-()I-,_&YvLXvသ_4!,S&s_GOV}NQmGj"&WJj]MʊزSb@w+=h0粕͘bZ?+5c&N,+y11W7k`j ZorL~j`gJ.Yl,`&l_~h۷xI|^K>rڑM>TYkǎ"^–/R8];O,vDMԒvVVV6ZqOkc~o"(x;!1m7VqP㠝ĀȒn߱aecأ':{]FԪjӏ`i|\LYäph6Dg(jgU2 dӽ(slh^: d6ː4bn( 8/kus!(:dj())M&`i Tn4TRDMښQZUDH~\m Zl1%H֢L9"UCQB9D8*@5 %j*78$0LJ +l^T{~+Ǎɞp}-9+E}Vu"k4*(뤂`a,q%BkD9~;cY씓۟ /7pB8pc'NWZIhO8ᒹ] ;==%5JO/k%dEtFڭjltrKo޼1eִYyg^k(3#)AlDR̄ c&&Hȍќ|X$C`B@`"AێM0'epIRV- %C!& Cr[;+12ț "-*D0.c\M'%D8bU`Ĵ9? LRNl~I: Ul5pRyl#t /lN3Y(RD,k>ZdUωWa6̲Aza$^ F!X$B̀/YCN[X% L*U P/m բv.x= Ҋ<)1-c &iQPg.hV!Z7@] Հf[ZG2ID ã!yh|'(sM">D \UK2uu ui3d[AZ|cg/qXUJZ@b.5߼;0_3,_LB.,Q oL%3scb>vBƷ i@bA[:^4l,-~sanHgvWLw}O\5 >}՘}=G?\v1y褂Z>z z^9tr{wT FW| {E~ioC7}딌 $EF3^RFoZ uWnc 8"p0p~Xh3hݣ?u FגV{ZI[[Bzl)7ϡ"S,Mukh@Q.=ѢlF3gzKNyyxm1rh5[w73aЌ >SޝVS~C;7NG'O>«VBRrV~V4jaqdZ2-~δSb\m>rG7$:U^)mRSe՜6V4$Sjrbuu= 33x kE-hgjJrjb\Y;of1PCٽ_;+%5=>1jVЦک E:njH$SC_rJ%ܘ$,^AI51ȌK#g GbPXvGII <yHc`#<BeS Ui2c,"PizjLgsŹWeh/O_p۰c+8YyA9i0*՛v_ăF ۽$E{'=~jźn Sp03Klh#{v-83* ?Y3 tssެT er2}J2pN2݆IŐT3x: LBV`X2£`tdє'p*^UUQvxeE3 M ?dt 1Df/Ey)A4lǞ/Np{܅ru2;ݎ~d^r3ӮLMx4܄Ws[&M^E$;E 7ѡf6NSD""<#lrS >+/+#}RϷ㒑#ܯs?lq=j䏿]Rn潦Kq7Ү]R{Tq@Xò2>)Qn>۳y kme@f02i'rfWL}i_(y ?u]qlVP2Ljlyy]0}FDd^4=? 6;Y`M;Gs\3v?9c+͂տ<7ٕX5Ai t7rq'ڸu~p SrJOFCϿE%<ъdܫAF]PR@vEYKywyߧAKs/XS\i:jC1 X߰qcaZݍZ|2{98U~O %]$r2PO\>D\>s_oRO"L:5nyE՜+ Qzvm䊩Wj o?Un뮹%=gM9gI>'lq !cb+LB+za1\_,<NPa$KEPxqMGG3p Ʒ# Aoj\hDzuSi @m-;@נRi@R7 FvM O]*$H!hMDB'x JRQNHPH a,!+2뚣I8ڼ謎YY|Cp TBI h"pb:sDF!J#!Ix=O<]C@YlDcT=PRWz}M%ɥ#kr%I:+t-l~̬ܼT&&K80Od=ơзGWߙw,±,Wn`F&Ycri~/O.*++g}Uc.pɒ&.]y╒6JǬ`^ЧہC3墜Xp%r2עɎDbVlZ2;xL49$=-5<|{ ^y*+,I |b9KсSQWGq.{uÐ|sm>h%2zXVZ;X-\. XрeV]}1и´'%,TŀNӁڑ.Xt:'lȶ_yoZ-/xо= Nޚ΃;"w{OxopRڽ%/ZqQĬTه\^Ч;2F_з{qo (+F%9 ɅEf!K_o(HNq(pPmA#6,z坅ra|ExuM^(|ZM..4À<9՗儜x'EKɿp VWwlb`ޟYK׊5O3遲8>X-`6̂{!6z?HpІ8BQ{RO#ܲ#a~x+-ø0kJY{^m?mºJz"Jeՠ6&:9=гEeBH9gUD=7޶};M>)iAWX)Mx3y/*,b Af{3Dl ǎR򒜜tcJ믹˷yI =usWն+>gk=S0ZDPvڲ3&&.)5߾[~ ΢UĺhXIવ8bj5tP.~0yn]4}d@@{"=!>;7=\=6&QҸV`nk5H+PMEJAV'ǸQOMY0+Oע92+\@z$(8 sv^&+TZ"$}Y%fc'X-kN95Xd|,@VTJ,۱@ˌ 1=aѣ;cVR7}^2^ޥ^ {^ZrqKxPYﭾW[ziL̓ dW)ꍇ_: : \ӛ0#! c1jHm2?$e!7I){F('ܷA퇂.` W6} ˮʧDjENǤR=8^{擪;YV1F<9U{Iv:M"  kYMr'l/@IDAT$߂'2DԥpP^*G)&RZ Ը8c^ZbQnaXZJqI*{ԾzXhyw# UPKNSA#NRPf5+f藾M/Q5HA62 (z@ ǔ/E*GB[sVYz%nsHOj=҆6&bBǹfKt.{"V0A7<7Mã$ԻZ˼;^M5{p1j=9yiqfa3Oq[a]xYT^zsnZls@/*˨?}jK^1O7r!ZPptTԵ(.'NXy_߁]ТرR~֯OuUtihJGOz8׍KXxgk5{d7w`e,-j4wru!l DTE Du5ԆD (Z31 pPahoGôxMpS3rrw;cT\BDzn^2^hk.phexB k3UBB4t^w:q㴽 %/+s*E7^5KüppcO]¼˼rw|rO7_1owkfPn~bGO"L l^VjiqnҢ|j5Y}iI)陓n=ed^uP͋jEg#ow<[ku^YA1CC𒛕oV_LK>sg%7͟Z#d 0p A)6L4%1>pڼu+[o2BnÇ Xߨ_234q?rWnjE. O[(\(wŚu7mQO^'\޵izx-`sJjZ٩+.who]pKI=zH*;3{tzXBx4q3 :1,)10k8 @9/-ZeW P!Y_ppNtAaN=kC=KxGLKoED;3 MqȽB_ى3(Rv4( D9EJ)CId1hf$ HH 0)K2><#:q&0iR4)Шg&>Y#(RRF$p 1n䔥G!4 :)ȭF1=4F@2HCɞU,%pIU$?0dW X5Oȭ=ŘʢX*"#kAD~ *7ry_•$k%6(ۮp+R WR'Sj:1Vd/QQXir (,6F@t'J҈I}Obw,%}U=HK>aZ:v9>kP e˱aDb.ф}#Ӷǿ:y׾y#=Oi|W&Ёr߽89Y:oz3/l۹/ii,;`7<'Z`xx-`yCDұ{P?{#?0ђg3;LK4`φ2OE+{Ȑ:Zma(!b 71= qiãfb鎷xK<w OL:$q;XYdpcM%leГ'Oq?cinQ 7^~c/M^;p\2ym\̆W֔c[`M ~^,_a͒utmrBaуA=]C?iᱧp@߀ExHs5~?s蓧t+|ٽ[?g~\PdG^1R_wSMޝ5J慠 he+PJ< SK߁T"SKزGWl>h*7XԈe#WJk% 1}w~':Lh/[:iM;* [H޼=FXbՓϾl)_oO8ctז(HOM+ϫ_|^c7#.]W(m0ѯ~~V*ݾWVYeB&64fJ#H`,i=tF w޽|* &x4񠗑s6yl#KKkwy?4R aL ɡw:tLdOIrxfNWf JcMAS*zpRS$5dF7b\<(UPf|bEDG<@Vb]69(:M-v;*9 |r\)&α| [)* \CN)Ks\Fd5fR7"LjaDPeFIjʡJ\j-* urYJAHeRH|ָ4'2.y*)=|OTRѷzR\G=Rc }:EJO+8(Y~l[zvl$k82 ;cceEN.ԋ%T[[;x/O¹_zzX|yۙ9] J3{y/b{?=Q^-N@&3{)`Cp1nG L^b^]*qWt-,gx݊.]慔v)B]}-68D/~=wIݽW.,}4r6/Lp$[$ gdӤT}$y襄ҫ%/W NC)MRfCG,Gy/W/_%=n>hn5=!= )mųݷdڬylTOvN΁P)l9dvM6.i^(˼1-#+; 37PEz{KA;vß(ɳrҲ>؇7\rfìa|}x $bM(pݷ~gaqI^Q ~[R2N lj6¥]J0 KPSbRP6Qg![<5 /;F .!P*3"g/\rO˓SR2soMďjo(a~Qȱlzɩp~v~AzvYQ1NP{%-y{p[>-Ui}JO(ZՀ#ZT05"GЭ}f7&!Oc$8}=K/Ҝ:u'yI IIQ116nby$uݠ>{-CONNMHN!#*B4-WIK?;RO$f48m2:ִ i6\3WjG#ͧ̚ǘFz+ dZsbJyq[v$plNk"o @z\jfi^a$RH7ԅ>Z<籣ǎ9}frZ]oc> B6# ɳm#T;*+ m5l %rA9-9>wwg%3&$[ 4IX :^"E|uCAKقl2ڋ4~8~]{22QUܾvV>=+DS8-`qj־z2ZR)rz1֗Ո$`pXC5M,eB|D$I+Op0{ͺTK.N1Q&n3ISIԪ'jhmD=d};!…Hco~ ^蠗P8C0j 9̥ZdiHMK4 (^4Z WTfy _( @*Հ7Ũ-}, Irکjf,Am&\UjBMY@ QH0TU#Ez0Q";fsY w(_)?*S*۵bmW<ъ8g9?n=1J{OZZ>8:Gs70ڣ#X/TǡkiQD2u|^յu,|ylC~xcشmgyF_t +62t9p45b \jI ޿=p.ړ8 g_y639qhSLk'M}5W DGd̤>yL{pݫ#1ZP]]wSb/qo#Po_=с!68h\C*/p|w)̯nō)3qdսNd @7^җ?>ZVfPjBoTPhi-܆Lj 8mqlFW$j9f8qy.̴ؕ5^Fs.VR/-.^D^B[r=bvRi]lEBl7LO\tS\\:=x,PsbZf6gpС{?܎k;1 ffGFiTf{3_`㠓ں52!6N+mEX^L#h`i6psD vї{,[pOҲV~pۑn,R7o /-*U4:XZV\ZRaJC9-mQzjҭ7&L؟SG+5UU fQ\BRzfNl׆D'&Ho܄b"-1w2+(߳ /v鲕y&wI卆TWOUpܞ^8S僀^OjuH5 3XMWP 2gφM] MsssJ=~%))We0W]}]~nq{ɽ*V4Pi^x~@qBWpd9<~ȁG wFE2<օ&ӞWM;d-f ͨ6:0uJS)A/Nu3,4AZ- 5lTi97C7Yb͈HCGOxC#,z[>OHRRN2; v)' sIaI[^ /P\UcLs%wޡ#GS301&鎽O^b!د{ؙ05ٍ7$=">1d?He.E\|p$H:ҩiy3#hLȏs@C~sGUt{I{xTjkǎ)i37*|膙|vv@ 20ܮc~MF|R@74ll6#h??~oܨz5WYa':cEEt7ڤhVZ]~V?m[&xY Ћ |\0hDRr]_a%Ψ` HDQOijP{B'o(mx4Ty4IDg"ə=j}Od d_뤉zX=[9׆ި+1u pisxHт+z1n%pam>HB*u+*QL.ä)*q9H%+=|yyp% ^=v8E63;EI3V/|xRT'f}D\$t#jDF&$&2/bo\b[^wn/~+*X\|7b|~lneL#|D:< .B3Xf@B)vҔPG :M-Riv_<$>kj헟{+cm۽B b]Y,~*fX-_ۀEu'=Zo'r7sM sRjdZg;5"n WVՎ";2PK/֔,|}j qeoxǹ~C6Vǿ<;hLយ4)nӇŝq$!76 ьqpnOq'# M+کsm œd%fp=7֮cʼNUϯDnMLcE:t$VQifOzv~|=|ƝOSrdh1c a+ ۘH DJh5h٦("(5䣧6t m6"gQ˚OHr)YMԜ>5 n3rs0*:jo|rɘHwń܌ j+7&1D@YŠGߺ^4#=x.@Hb1-)w@(Zys/p^DQ+T)a<{=!)G~zD.XYE&U)=W7TNx|ͺQtե|UXL=^3yN&hZK}s_3._n}n^t\<Ʃ{wpm`"abՃi=| S?>B !am9{vSko(G9'/Etx;ў}^~ K?ca.:e|֥0/*q8מGζSNػ$wY܆V֎ #zH#{Kb&+Q@&?}x?kR#lGlxDj[ ߴua{e%`oU`^]KO7oyK[]찁6bL-V/(֪Mk,8~l%E+\.٪SO7/瞽9ϙ;ݾp]Ők(?ޫ{7g!ؐNJ^WBdžǞ~g^n3jnA4 ?Z(m{P?2Ya/N' Qmy15&( _b+93QqPH7 6b};~fMoRK]B C7l3?||o"Dz?pmo ͋ms4: U43Z5SMyiW̥UnRX%+ܬV!9YNW(+Y4iZFggrVjUu-c rlӘa&\(I3HDޜj XڐwSCHiDܮ4˜ Hv.|!,o _-1X1! bn!B;iF)̲` TJ᥁PeɚgPq1}FP0!3ܪ HP7=F\9تJU-VMM5nF~$1)VX$TDl՞5!@J >ئpApAS  Na'P#ՐhD՜_eIj=ɘh!Z:(fj.$Jb1ex<,%)D KKXrCٍT `SRҬqB,US ӯ(QU5UAX >=:BUeE HA-!(.3qy63EOENJ1Fe@M:6ȣvt'8ư7'bzSw$O$(ڄHFcZna6ZYY|h\; `rQ\0dz}bTC!̈́3}q_z(z))ΙnjF9rAvwTN(P{LoG>uFĬ4*49G/csg*w+׳fF9_cG]Dlj 51lߵgm`*s)~Ft8fAG3,Yn!@%(Bܧq;7lOcFz05YC.ߥP)/+Rm| @'I65p^ϾH=_دK&).A~u DyPLQT c$ynLAuoWF.SzODY[fV5^JImd!=qz~f@*o<7_E52?xc=M[ F PJZŇ`ȯ(B\T)fgFsLu,@c]"m~lv0vQQ^0ӸRWZ䩬hPu X֑#G=[=#[ ՐPXx >׿O/0x HD\N]e2rF=NW &f-}{pzz 0o;w;)E1}6asTȨ$)CMd77`;5uISec_WNp] vbw@7>7Bmm2}DcG;E #N?'rVU89aU0Kn 9坓[^,{ vռ,mP@r|VPlc $0:_{2v)srJ\][{t+>j/\C7+k}{b^=.Yxd?!#Ph '뉆 'QPa6TGq0fVD3 `e0tDNJcj,hp 5jVOi44S1(aL2d 8D*v$/İIF#fT1 z%}[:\f]/ZFnίK=&#[nW^ 7g"\ +y_O&G BQ,@qP fni֢A&2Mُ@MNj)< 65(oȆ/EoK^`ojp%)ADV4kxlқduLM9Wn{a&e9@@p-+[QY‚;W4BE I6yhl(+7N`vѠk7:9ȑ2.BB٩2qo Q0Tp5M@}HLķi k*ʤ&oپG$;#8^7Mӽxٺ-7O R¶› 헩o Tݍ߮~ #SbT`Jiw*4(݋̚7koI|eٰ v{i1Kuu !Nt‚.뢼op;><0:Kf^x~W;/c^?JP͞Dy c:Ku]vPȥ^m_Xͽ%a8ats)?|ooo3@?tGXcy3D~ȄȜHN{UUkF`Ep*EG+KG4֧~2IuhԖbsG9Nd8-#-%#%9KjTTZr"Rk@LT$gUf6-qsOLo+35򲊪*l,g~+?:Ǻ_9zhH^ a OOޒ퐾=ޞe%Yϱ}Ao`!lpBZXq/go/P/› xکZJqfI I-, - %bgCz0tRyz'ʳfˀLLO nJ.c{|洁"\1V\0Dr*a 6ӛJ*&ᬈ3.N" #˘FAM$  (@][F &,Q Չ(%ZHcu.Qf\sJfHH*@mCfu:uSKhd7ӚL#f*B,|V`i$ulEQ h jIj'@TmMh$"ipLer/iQ"cQc!TCKH=Jk\ T2-GxG@IDATr|h``e/˧EhISGbV% 1ckrDjhMa &H]Ě(YQ7No g(j"6rh|AGDQUpQQ"l= #)`t885I nĝJ:g=U$P-Um*0&F,_?BOҝ&R#1eH$Mz%p\D//[w EcW3**}Q1l4y8DSg-JLmQߝ{&ٴ|ֲJ̑ %7[ؗ#ΘLo^}oܶ3مriޙ{n*]S4"0D+&.8QWa_܍n#XK"5}8ةr67W7$9Y|ke^9zgG<3yƵ+/뇽kx\N5ߙ޼@dynQ <uވޞqŸ1W9hwYKVmܶO̡}K9+eMdA3$&' 7qGOf%lZ\{vib ۳3Rw/qϢ\_u}=Б#eJv .oz+7mGu_ F ^ 懓"~n0I:SfPM"#kLJՕ_VڭĄlꕶmN gLqȆKK_dİ n:خHÏsIyX/ė.bC{7'Sw5,jS!r#!]0hkMU_An@w^ĎN<ܸ <,[" ij/Ku,wLl׾CIqzwܧ/=g<-7^>Oi'_6wԵki(BN5Ռ)Ӯ*<>9KhǍ6ު8!X MA2 @D;\ *@bqPdU2&J,}d,jPXX|;-mUArDWbXHPWTT Zbũu61ebJ@e $j/iUt'&Ɔ'#/9,݋ H$9|Vc#cfSYWQQ5)GK >кZʡj*Rajb%lh zf0L(#hӦc" qP`G T!Vd,QBhh@1HPR'h$I ~Q}ʇYtrP "LSu8OKK.B++YSk:xUV*(.å72Q AfPTS7]I\d<*F sS5\\NW`S{S =jD\')VlmeQS,WQI@uD@-ggEb&,̀-6F:0=NUu1%?'=Q(L OU:??,)Y;1*_OBIDSګ)N-J?@FqKH'^x f 8 ՂnSjzw+`+ {fҤ9& z$KA.AK,՟p/1?u8\yR !1/~]/ߓhJo`pܵs nǩcar<o2d`4ԿU6tF'ml~ P6R$MTBL?gSif]Ol{W&_X>^̣wLwE즼1-̜4)363^Dt2>X;w(OA#?R>: ?=I?Y 04ZLS/w)%D!c=?{^Z[<*:oO?ח?w|1^c{wbGn"|v4J'M4,r.POPV\r; QQ.d.1o|ێp7l޲eS9:!bb,~ \p\|('<1q5ib@ > cמ으fPuߵkONo/A87$$s :ǦCKKAϑvꑚVj6!6 0Jkd$JK iJeSn= 0@/׀<y!eyYٔou*ǞK/fߜ~̈́q^z5.C~v 54@TQ3$GJ r!&$v.& Ȇ )8b,le .P5V<\C4IQ6OS~Yj>*+1Q)ST]M#fD2=&6*9))ɓ{*IK DE85FQLBnQ"f 9t5V0&Z>fc"cvPIF{ IVxl2+Dα<-tԂwn?=:±fdž]K"0qۍDؚX tI4#oL}!Z(7 &`}iKHȃb*^C`Wozדl*Y)uo|9 &@2<`5e 9z5,_7imeչx洬~Z:jUxbN[ǵ7R{?Qj*I$E,vr?4s? |(fiu| otSj>+߲ co+"! ]q \3FKWXNs•mLgᜱiu[zc}s:{<5y1o׀+Oxow/Z>&*Z+M Rlj[~5LL'6hcySLO[~Bw i" 'ge, t.qerwM-+XO~e4Wsc^.ծ<fjټo,l =@t"Y+pk,(8Y$U,;?=CdYK,8 +A2O}V_=缔!Y`Zom]?W18I6 1qԐPu3@X`|ʯ쓟3.u7~ WK^& fMAlqxbJcsGf9Y bܬXWy&&bW,q+f>rb{iR'icOz_L.GgyG3칙G^/?xBPd8W"دo$j=?!pgֽ>/ ]qW3_(g,-a3h @Vssa dE*xg1[2Y5#y]O%]\ "iQ:\Jz+kFF Sn=>xcGN c V_Og{F'NtAcLgcs<U=*'pIWo_OW_Hy^s cL%y;p5$[bxEHX_-\N Nt`ScmuiXloYh D$ʂJN+18L4{/w"QH<ܭG(рi2`MB\|6>'1'RQEI%Vp)( :giYQOU\lКَ]=Sphki\+~fĨ+ ANL|…*23AtvBm6n$ ;zq ř~PH=8\5  cX omLi^βc-&_*D g'CD)GC cZs׼]lPC=MLg%̉|,4e *NB6֯J"āBO&r)Vȡ8Kms[&<{F&5i#]i춁΀f2Z ]MOGb|ĶbvշJ0\p4(ƭ60`I64\CW 8xvcSkę Y}/$i$$*r \ =5393J"d=#R 0:"!_g'bƣil-G"# j'Z2| gYΩ4 zl8Mc!a܇hGN@\qGܨ**fd5n2o`ۭG~]ty(W~'|Wm߾.Cdy`MGgا׿ѓ3_<O oy_iF:[͜AKݽRA91!q\ ʐاoj*v[Y>3.xP _S k-V9nhrpgt%aK3CPSM۹m%%WgYW*X9*9;8q7~~ _اe`;;pAzz,4HpС{Tq +br~&H bT֗pQs[܈su)|V _Oo:)#gl֜2v7z<SHMWoFp`M\TK>g7[!r,Z+s]dޡJkZ;zRL `3ZF x TQ}`[I,]ԖMB.kbHSFSKIL^RI$.bG]!NJ5(FݨiE0IG/ҐmKins/fE_tBrz%L.Gusf|).NNML̠ema[H4]vIN/{Oj~TgPk i޳EћZT*zT}#w lj2`z6T8 )ltoXxt{$3v-go ?$#qh~.,FMևB\^o-O]w W/)b6;;éOCC'8fxvzw;z$"JNwE2Y#O>411!(cصH6\79|cc1ct.2=98xMI_=11vGOb̨4ddԡ7:Z,.g78=<|׏+mݱ{;:cC':y|bl}wrps;va[a|[o9u(Vx%g6o}855yؑ#L׸nK. wӿoemmJƙZeJ"dxlbfMWߴ9:ylc2[P"coz~!p_'ko6'Q_4ᡓ;=2 {.u1^`su#sBK#çr#LB2޲i葃]Am۵{ۮ 55t=ց;\M0õht{=[fg￙3k{.a1M;}S'&F[Kk˞ .(Cb 3#ǎ=Ͻ^Ѧv-7<:Io(Yx -ȡ B< خ>+Όё#ND:.4V(& .ee/ttȡTA&Ɛ[N;,DnV.d/+E[oD&^o}P, YA` yurblC&Ng!=qTI8U(H)ўsn\@H!p#1Є}#̓* Q˻gq*(gjiM+,k%:q1AkAz*ܑPnzMBgKsw[y]J:m\:+;Pm[91EHBa -A>66x0ۗ\L?yJCf'83\ "p=1T|d}8[8ᛳtN:z ?6nVdd[HoY{sk_o>>{߫K:x$1xBS;FMwm/6uT7e}x C=yqI=e3Z3!R2$F"Yge7**N@1&,씣~hF"Am%)fF>F'𤧪M׎xP~71K`A?G CY3DPǡ-"0'+X Hw",S]+NUGr\jfn40:;N=HŎNE%ۅ~$5--ߌqLi}[(bnvCGww,ғd H[{luhDklN9 ʀUZ(4 ]R qBe KL /=TnI.jp ۴ĉ=_@HPf[wBK`2IM]R[PΉƋujx(qy@Zldq :7Z12. 9$imS(dnw1FjgX-nϥ,aS#%5>2.ml/ T  [@Amm9-Go%҇/=5q k_&兼ζ/RB,I7HQ1E"qHEXHIoN-Q d<'h~4.:@GdXx"g:"rP(GX=%T>#*:ZJ )ex-H,6+Au(gDz8ץ"8JnG:Fb&G/#<-;/uѮP%1H.﨟NC P|0\jW4nS@ 4A7m mke1#*1蒂ci S> lاܽ"<#bq@ ʢPdnk:᝴s*R+*MPrýa]nFSmLkᙈABllkm߶h#R#:9$!*f:"D[ORF]qBM@ǖ؃І0m y2݅?Qb580u!;܀iay FEK(N"e+eD1i*3óC4"`l߲eGSe ]ਐpQSmճe+[w6 P2?8F-}[ThlqiKH=B [u`V%ZjN*:咋64U&_ӧfU4 NX+otLh jN@ [ghXChRXӀ̆nJhN$YVau8l:)Q(BǕ# O%p֕ VH]= g _C?VPn&@zZf5. 8 x־6+Kuf!)#g`qʌюn,nVU+嬱JF8 Л-|D(!ۇ%,(?O"k+7LJ\TD$PSPJ\p/QL?xS cK;O]T &grVs3KÂ>bzzHg#I [y F꫰"H Y/;4͡DرYeuT,Ϥy.Rek;fƵo0|؜X"aa@@ h"S i1>"rgjZwшp+hV F ݰhj]91^;vOJöJ}XI9}7Z|;J>t 65=H`/& g/$\1 !!>yhM[ef$jDJlWbm/O0gX]}Ә~0R V}X- R :)5!@f~l > -҆ś͛2F: = cRe^Q G&fez= si78IT H$f SdZl 6 ='XJGQYd„=e֙:X jE~/y\_e)gNCvՅLFahX@0+F0/2#Z*]4Kl]3ˇKNp~e iXv(Zmj wA'>>.|쥰IM 3q6)'!H8J2%`e!FwIm}hN-6QzldTS*xSϙ;CvJC̵61g8 ze3Qt0LqN:0]% .K ;""9m9͏rʄ͂ Z5eV*[3O)s5#:1rOz ] L |Q5Br35?0_Hv(zNe 8 \ ?k]h(!H@trGAoT>l$[h WeJ0UEYD9'DdD >TēLu?Ov$b4M4j%qSly$W@e>Y59a L'󣌈F 6޳=!uO:|AxBC8Ec "?XPI&}QA'FV@EM4\ B(N K:Ujh)˧4կ.48tt%WS!Vt DJT?YDΊKJUgŕal_*MDґ#1&F@P(rq4g~qW &Jb *6`<ڨR,kYD~SEj jU)kPEVIGg+%HbkT:r{g,DKSjZސR6@+?j ŖWk:bCԋ('R] 8ڜVMc,!P L/dG'hG`bD!ۼZТV1Tpޯ%@LM(("Hh<}gTDLbR*sLf=L9o᷏t y2_aCKbK FGY?5hPʁ&Z%,xc; 'X8+( '*L UW#&(PߔqfгZQM?)$pγ?2PcلH_\6wDW>(((Y1GeOO`kٱX2?(w&]\6'S2M8/Hoyx L'gx}+c ,WQ]QHHb QX%JEF VD.%*t!3ˬQOSzX$݂ڑ ^NfE\< ){- R6jVJ,sFL+W}rKc.ZHmd :ӭJ;4]U(L*M{T-a)ʆriMe1+ rk34SY{vAИt$D#?76/A_BwT6)A 1QAzl4'|Cg>8}ŒZ 8@ƘFA+rP$ʿM^=F:O[0O`8H׸ii+̰@ 7M g?^OrT>%rKh0#H@&ATQ5yM>d!X`J`w_w?mכ" P-7N8pIs841n7؃J)ѧ-@EW+y6ȲFWa Z2ꄱZ޵ۛ`&r\dc56J-f%Q@R \*\[tB߆ !] ˌX G@ gyYəv\djé~ؒ_3[yUZ3$LQ?'(KB>IY-qcy#bRLH 7݈;&g&ǧ&ffV|˧(ǹdqF\:BٱYgl2 H}A2mX%+c dڦh ۸r72 \ٯ>JSXXwDC(F !I lyrnA0FBѓ#AXQyT1S559bP5'ZC1vIBYZtwBrpȡȕYm/J];fN*J(NZ >lo4V0\,)އ>wqTpޯzٶ l*R[ 1jWAOWOLq?Ĕ+ͬgΥIŞ.ʫy1;۹1lV,JvLn5VP44a|"R3\:T&c q84:::QS-E )A[kXޔ˶5 [-YcqBMvrd{\K1[zSOoɡS]]k;q- i]Ac'gaJV&1Wbω[{{*ki0 |fOLMWInTl`9H,u5!Xp0Im[S*I .@`F<^bE5Z{֤h?':T^VhL3lk& HR@7'`Ԉ`itVUwT< P2ȺD8qy%OE1 /\kᑁ .K\S$is0:6:Ȼ:"\&^$>&plxu=- #!Rxdfg|&1 YV"Tp׉up}{>xt?G)nԓݑ1rym+/Ϗ;SlH:lXPuB)S%đ"ΐ.b"8%#f]I SSQCSC@ E'DZ*DLj PYhJK:9sN t:h [%P,*6wuc:s^"T6x lr8~GʝBVyUB/`šFn,r/^G ?FNg;=E(]˝H;padlrnjL]55۶57 LMPq҆׾ʉ+*yro~^|X]dLҖN:=c~MgLt`mݾx  wwbg8ukPSGm>p11 Z ŝ}[GGVү՜3=7ωFrRtxpnp$s5y*ĭbHSB>!j+ ᭣pIG[1iF04Eۘ+rBv]ݑ UTz.\v=`W1nh_BMKAXe3wO\=1Pdp)ɱ66QAd#e-: ac"R%BڔA :@pbcǧ* Ii+1nEfLRCGF"]PTvqQ)~$pCYPx!F{]:mYH ax O A:z61& 840ݍH',^0]qDHx_ y%xP <<1'ZLM)0`+v7?goߜ+<ŊG%)/Lv9Q է4S6'MXB@]؁ԱGNم{eNLixf]o~`wdzrיּ5eizz:[~~:?_uf{"Egm%10n=*A.}<-`.[viݚН[z{084Ue8tcv䖬o[v]2]{eL5gxx+OO W*,onvv÷;{U(vdkO ͳ{aH$;cʤDyBÅ|uI{&C'Gv xvѝm -we >I% jl[bwߚzCF죭{d= R[U'?+گzٓg̾.;11'_Km-vEWYݴqA#QC˚dJcG~Hl Q X y-N" xOn:'@XH$i,Q9ₕXDo)$1!|(6Qݨ4pS`O0JJJsDF#PP(>Hp5"a\9\`JȼQ8Q(:` f54Zґԥ1uu?鹭5ORS c30yAS' rWV<,/f ,3 2p]WXd̤Gy?ş~M}e~*偋ۊ )^brHL`*W;ZX RhwM!.g!B{%ʮN(⇆1uU)16PgafգK &3)s7 B1si/Ӟ`L<7FnCE )1#eH T2Lז91\嬩$iVY剂9n7`LB'O$aIyp.ArR6-[%>;;*v7Tٻ~eeznv xŚEޜ=a+w"x>33ٱeϮ ىexVY>!}gAS`";m+? Z$¹~yd‚Υv5{ ճߙݹd&cG$ fgfu i [Yy}z_fMg}GMtC=1I/VM~orXy:yTb?!٣%.%/p7;u6?_~%ŷs*Cs˖>%eٺk!/F6#QNi~#1R)^9hhRY,U 70tn?D6&K9{a=q;=-$gdLu_2"ua4 e/ib>@쉞rvz"P%R [ ƌufX4 3*; 5Q^ԧ&$ toK׿X 'F#^a%/#{'qH%%Z sǀ +>,GdhD/VqdmL,0@?^t1ہ8X~(=l/QM2y퉁 D#^꾣A62C]:=㇤*N%&~T5PVQ]҂_$r"x`j gJz ; Ng9-g3'P,Y6MJ4\pio5n۞ޥfDKa&$ZU!~%R L^zhq6:=ً벿ykP[[FRQ#R.&\63ك~zUA?oT殭}دp+6UnDz߿Kw3d#̎UO\W̞[zz̞ٳݍDܞAR|r;Of12Z{^ęn][v:0%nUӧs-ῐ8To*o|>;5{+Gԭ#o+H1*_驕@bwx_Tl䎆67JV&J)[cLFQ̠̌+V}']^!Grp\&a Ub| ɐUJ,:weARaE:g=*h_Y CWEO\./2N.ci0W44T?t6@V`VhW+ zhgq@DDn Z9l ;Cݗ<԰4|ƀSsGDBLż1k}LNλlnO {84`/U)UeH[q\D^Hu{V^mmqy]: 0^ĆSS-Q,F$"B)ꃂ0+#‡7|b"6Lc@ʕ%,x"V6MNuL\]$fk7s"jİ9$0ZEp=1F{m5}x^P"x76rL!dC`G7>>mWanPU`v&] 2SoD-S.؛=Ƶs' 5_of[ 7!^_ɛ(} 5yee%ً[ k&TןgO,+k2KY{)Rc_$?Ϊ/&g]7p$K VuDՖK߲ò ن/3 ವ9] H̰&iTΰa+ L (p!UrE)yR%,xrQ S I5j+qZtrxN/F\w=F: F <gɚeyѬ..Qz'JHG_u @\C8Hwtć !cYKn,Kz#+$ -B?" 98fդg1>$y m1RLse U0Hac/ ZfgOU3-pm98ardnXRbn-<텏*m:]57=qcoynWߘLo8}${W~b*zλU4=oy^WW/1S5Vg/'G>UI^J^[ye;b0f}KEًo%=Oo}=WWzMޗc6ln/ĩAnki) My:[ﶳ C7e/ JiuPtܳޞ!{>#)]WwʧVA_g]}Kr[3fO?wm&Ӫ?϶/*ׁiֶH(H)a X^.U0`R"=lW;/̧X+X S`^aM^oR?0-"ox oJY Kr4) Y k}󅿨˙S+~O"^FkM$Wv:7kc .c6b(&hL5P2 H] 4*sюa[EYH-AS20ۘp۷9PUzc`1ÃAb*qMK90Bz>E%l`\T9PD`ENa^h4"yvDeqV*'&a? cc$r2R B"P(FFs]:&bଥ'ᖥ.QnEf\״0RBlJ +f0ApK匄2> 7䰞|'=aF R!n D\v@cэΉIQ![z܁,&gJ?IVnb}WpgcgK }K~3Wg?%[*~<#o{AMU[_ v l;C#ÓͬR3KV([DuGVa0vYv т\sanbB8%v _>A35.dvY + BQ*he| ^[gN dl/9"N<~yн}4J<=qtꥦ重ۨߞņ"mZWPZz8xB?u ,À7HZgH]H}b:[1Q.2u2c0 {aBndir1x^P`HxM\uR<1 Ү)MÔY /bP$ * Qp.4=ES2 ĀKuj4 tU OH; Y `T{fCې4¬+A 8<Z r- P@ jdP3AP%YdD,ޅܯ'^EQKToK-b1HY/iju{%#\PVk{U*1#V p5!K]Bo~b5#/1̟/=2zyxɱ/ gD=Kono}(]%g<,wl˸31dzjgJ fpLJigkk+3ܶ0?Ê{4 K;ӳCCŖyDpx换JZLc'l!# XfZy*qݒ}Ǖ˞έ/nXv&|]@nx Pfvw"dw! wbP8_(p/Rt =_2C3;:EhS1 yͱ[(+Rs0NpK)BMS+Hui8nj|_v@\=ɒX(|޴*%4V(Ls٬NZaPq}MLguړK!Kd!D_ P ]B~n]4e d@AYt;AXLPTU+szRC4/e)p&Yd꽔lJ1ՙd}s(8S:zj4u#,tR-[ܚG^K0:J2Hbkb^ ƿ )|J=ۈ9OM ~Qw]]tNmd4pifyo#-BSX KOj/-LZwRt\r=%g.sq+,Zpp+k#'.`\/u)\'G63bK7'{'7ֿޘvHRǼGW l'fozn%7{Ҩ^[sKkKl ~ѱ&GSSCNuvvvuv.B}?U)͡^W+1鉭yP+ڿgZ_*-^=ks[ !E3"+Mg*4îfj?Ϲ/ťt8?99~pWWo{GW}833;4|텶BR rE5IR"c|u)W+kM&k1n: b Iij"΁j_JqzYC&duȂ]0V&&h$ &p/tp/)ee'fwݍ7 j.4)6F8!R ]B[kKsKZ`k#Q<:~00xU -"+O Bٺ\(f”0Q ~WN|e]0Wx_ 8i$lAҧgMcX[炧`.h^QtS>yyc鿅BΡBsa~f)38Pn% \A BU_djaZ҆.xXSԏ8mrQdSk:N&7yZUn!Fϩ.F7 rc9\wsӖN+U[Dڥ_ѷ0a7 lݲ@yAH[ .:HdFzS4'q~JNJu}amN:3ݯtZ";N,࣪>o5 Wap2.f4a^0n#zqX%>D47J`nSS^$| &$\Ge<'3bm| ~Ղ*ü Mf/l+4L65|lY9`B(to5We%:jfXħ0Tv詩c[W0E JZS@̎=iFOS]/Uʦ/qٮK3g  =y06v˖u&XV;g/#ۖ#W3`UP(xLyKgzsan,7.7< \w0?˅ sdɲQn| ;˾9?\~6+,"J˿VN|,n莛HXMNĔMvjءC;mZ2 W_f4(9[Yh3-2Ph.7X#G` MMBX0 IrY Che, Sf҈߮fd~1m yٌ"GSנOvPkMxXH ÄƂ1mfIO\*+ Liphz0&f 9 -j(^kwfyNVƅ񱶎־~009>0t!\i *%>'ljo4)*T AAW]Cp2,D%!68eu  Xgv/ wIbjmcU^3ӳeawVxF  M]VlkEt$*n:ch:pfz).{w1ZZPCr:QN]z;SU j^$>*qȘ΢ )+S^ל(X1,=V-Vjb[CغPZ盳"S0)8SMSkpfN@I:Y0Q9\d;.X {{z莗UdhվyZʦ6rYs>oQ ﺸ|tuH%rL-j]$ _؉/`6Ny3.#y ՘{m^+=Rb_mMÆsօX ]=-lG 4mMNO_Kk=-k B5Y?1\.BgzkqBJL]9;Lo:%7/Xl.4ZP !a'(]!,̹xll$|"1Q&]ae`]XEcP c] )<']~ה)` 7LP,G@. ROe8eI+[P]؀X-Z%Rf^+%gc5\{q&:[:ow8|Mo#ӣ\|˻ڊEl dS_F\Pʀh>w:;:wnwYXYA"&Cc\rtuu SIv:Z#%@gꬲef鵳m{Mmfǜ3Գ.mNsH[?yoDyZE ޱu``Ϟ=rBv]{\40c_kHPL"g5ttmv;[Q=1?T_㸭 wvt^|]:۲1"*ё"BEoz"Vt͢9*oR<u_0[1_YU) )LJ,2?FBc\}&PaCOQn#]Mˢ^olbַE8؅ƙٹ \+Ih^ M\2)Q $ظ%ŕ[P33QdXlF^v9d=ͅV4tT 05^,V )o Pe!TiC5Y[^ORVOo*/|0{*wA$^OnUzK9+6L AcOe,T@V{nDIV?qqrz_:VƼVUG/?pD)7<4;9"U]Yk}Ao0< YqZXbcQ71O|›dN'Aw9x ymi$𓣏d TsaE΃8<:-W`"UNW58Rm~dQb,) S68`1,*E\̊r jl2Ga\ 9N@pzR4 Dipf"/5q(ՃXb-Xk{qTQ[wvl!p׮߿ޭH]Z\=`nllt}]k8Zw[z[AaAlP\#t~ FN-p!{teM`w^xgxɈ^NoSԹH$~s+*:)Bj)r.bߴ_24Y:`|Me4&,― ,F2 9޸MJuuωS0$rK)<303W0bH@ (Y0_l*ITm @(]XM\ت%~>6މCs!KaK,cPhOK00пm ݄E[b**]T za(b8+8=[KϽ%(㤁Yi 8 =z v=Fbҳȿ˸X!ܦpyx<^&AEb/~veT%w~6{ӳym ~NOH,Y޻5՝Ux@)}$,MXRx\[-#Þ t3\2W^bP2E2ic`e7cIRFia'߸Hb ILih~J}y:֔9%e? 1Jms)iMfz Q|p&=1@GJIDyXpxho $3fT?-[L/~ޞ=o?6wvӜ Mz.ABZD1RpìJ\bC$`HW ^TiKGxgS.ucV*+m1h '+kn:::lߵ{׶ۺuycȇl?B`ڲ1)+%`]ݗ칤㜟@Q!ec!(ΒkE!{%$ |۬ @134;[m̚ hd9BD8! lF#QLr ! hآǠ#n$ޫk*}fft}>s=dfB0$eT.GP.Z I5}A03"|Ɉʀ(\@ziRk@8~[{/ˀ $ F,RNF2GMDJ8,'oa0DS)F˰EP~BIǕK?+'N̥$)sTE GtML(VQ N6{`_ޗ15ۭ&_>kIrM ;='O>C /KwbmqDFQ ya3R0gKÏ>0I?/dXCRxQYچjB;QD*mT ӗ{!9.]N@mFKt&w"LbNM9}Ǟ~͛lCzAQ$ךrKbi)>Tp6^\Z'_4K*qha}҅9cX@A(7LN,2/a|$kMpѶP!KƓяBo@W<箲XV%[sM]`LIL}cGgL7v q4wOʔ+sh)9V; z*5`O[O4>}ڕ; 8|cOM?}iȁ;%sQ^:08B{;g9r*:5zi@(jmK$ >äixЮxkb9^q {vϵ8iH>=w'nk,,,;vtqa0ݓ4=!(5CL]v usC]?U5`}@p /r}i7,X/8mA*hVRX;\5hTI*!;N H>~m1x 4ykZB#nDD@IDATc'7%MiY۳~+&)TZFBЦP2K2CkdT=av_mK焔 ilh^(#=Q(ρGT ߤMml➸P:bT!Қ KNm'@P2+(hBj VoA4$䥳J-y :'4sDPTG(H!e*BT<&$K&,QvP$TNg7X,Ø_`V#Jp$P$RYOPÇ^9wɤrTITG#6+ l8-+A0å*C8fL|&!?DRĤH=8~NWl;~dŠUj\N,)R!̢ VMKm miRe-R&d Y>j"Fmy/^<Į-x]Ū-(xi2Ku4%E|1\YomU5R#+3B@]Z'*6wbD6ihS"lfǀ1b,ZJ@Ї&ҴEFw1?s̪Oc "-eIib&%+u1\t8|sc]H&jݥ64scWBĴ`s#KUP;% n6BzG 8neuyss51%FU -=0϶R"lSG/aMDutJPGUxѯnF<2iw>c++:##0-'c8ͺtV&!qMqCb;r2JiioOuQC8AeqcjRN 7hht姩anK9m 32yaT[J:ȴ՝bi(/1hvS'47]in4RFl4|tɥ"b['HYqn+A3 ]$e[2C~ r'ͨ?!~WRNti#imY.DRd2 zA0hmg^<%--SaqgقYZ2QSW"%Kj/Ո0<qN R}0=NQ,~R"fZ"aXN w|iDU`fB^SND hX~42Aq-"HɻlNB/>e(D{g$,#2 MEg{̄k&~r@ K1z"%  7rNg'L.WdD$ R8Hs dDA$b۾EL,D;czq@ zAJE&Z;5%NYs.Mر#%i%JQ:VYjq,K-jd^(wE8QexTB 7*!ĽY>yim6Z,1EՎM6VjBĢFiQ8rd mY`+FcmM̋ /4OAuɚmdRɺkelYUm'=DsS´h gwBj Ԃe.6ݽ6ן9f Y˻ k=l!*0+T3d֎%v'om@m5Kէ=Lhމ!/,uUT*_-1SƆN6*ȶ ي{W5'H#оEs5kñQ1fzy~o;~88*;51mK'uf0CHdIGfrv< j0 ~:x/6q>brJx{nyD SJN')vv8gaj2n{ 7"@U|&]m$WuW'~tK\p;rdϱ?; կхڵt~8lƮdxe'Ko>H>}9}|ۛv?G~~;vXW]:gCo%5;a$Ǧ8 5Z;_pr@c%gije˧jMn5IJ9$(Fß{oXذ{ݳ/,۽fsmz`t۽?Os =|x>O>( ~r/H0^ˢxMLhN8fldl,f 6BHH5uD4D1 /]EŽ#)Sڃ6`7O3?ڱhi^͇2$1dΙK+@c$*-&GZ/ܴ/jѽӶ H}!A:HUy,M/EFBt:ڂEQ O3w[6Bsp*äM 6'cPa[ЄAkaeKAHr[g\Δ!%li1o!*D%m@U>u(E e2#PrY6B"I-_ΨJ& p Y(}724Jbe#AtHv[3=\{BoJ`B2_r?X%41nuwqDD_7PlP14N  pY‡ّ\Bɥd@K9<ǐRdלgRx_ħ4)9ӂAkRfXE84#/M I|AfC'JG݊fXd"f|ORk$7CDtg%#k!ȗ}$|& 쀕/%'ŀfF0YDکD"S۱51[DZmZZ)peiD'W)} ~^5'Rg_jͦNn2bϱOl'"8;MMbGewtJO˟}o릫\Q;ysJpsBE96Otl욈"s^f*cTp2M}I9*鍆VP-pwp2L$M?O_0V뛿vp O6m~ߟQ)ۼn~gM47ofףDNTpѯzgGf4{}>eݧt}xG?JSRl~}Do?9gW%O|eyVևkc67RQ6A/&+jf%Wys"Y _;p/LX>{6?[][/S?зMpWU=hطNW-+777) @E2"rv,Ć6%WvUm0s5GJdVk1;F4fl5*?t[".7#IS PAK5emr>'ST#ҞZ ԛR8mߨ5k%25BKKf4:%ŎMd%N\qEf{,~㓼[-VaMu蚂ol}cN*{ÈaML9YqY`\Ph WL ̱6&(4wj/ۺE/IMt[>7|M2>}#V7|*#Jcvut"kbi˘ֿN;I) ۑ8MNdGg -;Ɲad[jOs'3Хrjs[^>zv51BDD2q>o~+H\.d<>t% k.1u~k|܊ӵgC9oƿ- oM9om?zK\k~6_;u]8sK8wop45͛>w7o~D ?݋e܀qm)W~ԡ?w }[?}|R O:%yO^5[#:gg=]hC'Qky9_/ra]~b[q\:oj泦q=A*w8%O77e߼iHc8_|AM_K9K`xy˗w]yM#§6ܽX^QtöܲJMQmN)z0 əP1c!AkfV`k@ȠV9k'c0HȃL#bssWB"U֪BTڶʩ.KRɐXMJ_*YV?O<0:XnWW0JY NS~╆/f`m":`5% Bh i*IގIBD[_$5.]+A2ި::}iGgt. x)sE]LIgIKuf)[ c|`J]d{;?=Ȟ[r>N_~7Toy 8)M/B{r]oygʏm׾MET}𧾦y:Co3"}Ojv_zckY՜9B' LGh#>ċsi鳲o|.7 ߱/б}g Y!YB BEoڷ{HܒW삳(6/I=P]T7y_DYphdJy]ЄSrZ㱹UBDWi nJt X';qĬDQ1MoyKTcCdSJX *0ɌLɎ[_JKw\V9/4ݲh+Gx9+&HdtLTMψ(J0cٕ3JPE)҇ }geEDr+T z^SI*~[]U*0 Re% 0V%;]yLi&eiéQy>b/%,* )#yM|{c994,Jl9_v,3XPVRuT :::S eNDi&\ѩP<{Ru5)}[ށ|Ox>\IDp6;~ z WZbw1)'2w泿`,poo l1.Vz/6,.lK,//s>FB?/S,$|5шES^x~>?yuz~0xWFKӘ:๽뾸yhlԩ;`\Jvo9vQnbʥ+7u?g 0bf_ĻOqA!(dX e@G xଐf=q Oh"K߹p+>pA=߹ xC1@ YR $48!),m?#e-690I HLB>%v9O߽ NUII;xhkα\U62*D xW<$Je lQ'pig`ʲ}oqŇSy5՚֚XkbkX'>N)93Pk{q͖yt .R: sEq(cf[D:9\^9NT2g)#Gܼo4xEsӧ#{v7 \dw+w]o޽)4@a>+rK9\B܉sc6x{X''_wuΡkxLwH>ݷH|}'fMyt"zKz n)n=Ot4 SgB?4 sEG:)fsp}-?EGBw꾭;@4Ύojvػ/GSwmow"*Ԑolz:i]s/¯,E^xk'zM2{PD9; S 9ߺѐQJ@b )5=(GJRh*^C${&E`?a`QA;M*$!M؜@@@p/nX+P*9hɜd˜4 A@\;}1F ID( `gDW  Į2HRv(@Hɳp РYDKutZGw4?33c,- -^On޼ǯ^^}''-LLVo|7n,N-Z[xk<񅹹'9Q{1A'>ݜ|@__y܇ΥW?s%|~Ԁ7W/6O0gzzl~nUēgV89xBq+i NMUDl(x䇪X1A^ 2 Ibx@8:%%%$f%Fǎ qà]pOH>G`@3 !'IG vWVa@s$,q5=DQX/dZb!'w=ژČ"L$L$h$LFK5\{LbgbB)KxJչ%N2D_S'm ezOD'-3S2MHB؝#Dx`pQRkvk2/NuSeCm6zC+a@CQQb?>Ԝk2~<tG!)m<5g|QfPqx!P8ŢX"i;mq8uK":THD @7춐ӇS;@Ң5D,ZkX'>N*>2naf_tZ[[N3a9qNl#ÕɓG{{1kzڵ~cssn0}wc3/?~g.cGw|JԅCnꕯ֙Foa~n3O:xgc*}üOdYY_/kvu|Dffh{e}}ʵ{N' p!81vX~O,-οGa@]mv7?5H݊pK7Wz}&weqO ``nׇ52M݆t@3.JTH'φ4"MHyI`Q L8HȠ ī$u$Pבb]#){ !Ò.|!Q(iT35лB Q I 1zKGF7PY#uxGD%e!')C"Ψbr$O6=_03^ y ]^<@+%TVѻD˺ ' AɶDʃѕ2y!$J΃>`iVc: OIGee*D{ G,af4@/o)?$X̀W (),_s#e!dQ!ti2y"6ܴ%R˅HE##$!NImjEAD5DMBK!sC5Q=ԚXDYu'>1 ]H.NEn1#v8F#Rƒ1^N3D7Gݍ9e_Ko-g}c}euZj-,|K ckVܰFc'@N.\~ƫ}da8ȵ7\˫+7=zKr.^:>pq=ZxۛD;sMآ4񘥅ϟ|Kd'yIIF:O=>:wnk4>yeJ=g͵>{h}870`<I8')-G@͈JO%fq)E.Q9@܂hxQ!Nb8Bb-D;E4lrf}_1E 6 %UהPXy+YjcP5uVQdOĄSO"C?O,9cuΥ@&$gj [KF2K_l E:C2;),A%c_IO@rlX-UKQ)*܅ɥl"q2 93O9)iȆR??AC M$OU x:ⷁuYz`KS|$%֪a(IW9KQ_Ct2WR1m. @=Y8'T|JhhW&֚֚XkbkX'imS6FnW77X@gSb͵kן|4xM~n0J¢ј}ss:g]П[Kk\ȗ:OZ urH~«⥫?u{ ~X?{pʕkyx"WmELJn,ԩSfb¢Ino]!\Nxٳ~{yfevhq5_z ђ~ fn,&{c}Sho~0Cʹ96huUNjq3tgμ)4L&wg;7Wחׇl3Cݎ>S}/g:87Jӥܠ7P\Sg5ΰ謉4̌&Q9+`bu ;8;G2 Yi>nepR}fs}s]5ȢgS.I }c/\@}]/QYߺ>4WK@OZL= p\ aOs)aYYo O. "!x\Q=Ix&4Hf܅x.Mj$c+>ňP,q ^D`p 3%q00_$d RoXٺLyJ=j0䒅‰Wh !!U^% )0̈d\,p$+ȥ7΢mFH$GE9z%bK!d+&}&%V\ i-ިJ>+_sX:q! _MI1˽9 #YiIAyJx"8J&DFbqY N}KyySˏ& Q|TʑyͳB.-3vM"SkbiVN*\DOc5D vAsSD]AkH^GT ?NAWiB^)p`ozh4)N ng P6cc~+9q}m1cٳݹ~\4L9>?TAF^~~oppxs}3 6ZZZ_Y^!=Wg!yl7&\t pdC"h]38~G΍7z 0 @f[ĺ 0qq8;K숏-s ׏=a 6k+/_J9Jtqqp"D^̛7\k>a\x+7@NU=в`~'㋗/==b.^rn]oD,7SKN牥Lq< (/ ?P3e.6W;wEheQ 3=zl1ڵ啙OiFf~fPxy"R`O<,}^448IJ \L'1`X`';FhТVo&|P2,Vp3p]ΠKTD-cZKQͥnq;Dn_Zy$"" Qk`"_`v8>nJy1RD-b< T%dIiw(%A-n8'ÓxT  aILd~,K[">Bӷ C/\FeP8.l`W#Ud3hf!aJV0 I-&l,¶\L M[aџUtug M8[7;%GAK=a$Np[k"5ZkXD'>utzWN0i#i1şka*@N`hK\TgacgfckKv牢 gp<o7m0As<75Gv@EI|CNqw\g?/.*?뇌 'ևZf1pFnpt _#yЊ3  XќV9( qD{d'iPd dFlss YG2xxtof̢NŤ:Mƒ}X[l"qJ4nbdZSŒbr&֚Xkb6OjX::::FV >ΰ}H\ӌ66Hl t`  zfʿ+ؖ ?{eb<3?c3H3LMa \ >T`~39AF 1buQ6g L8Tԏr! (@UfUpÅ&p~sGDͶ9&!`5l%pVĴk? 1@\3$?̬33}, OD,!^$xH@ISIxS Tfch&7܁䎒u''C{&iدRj_( JNjS5&0GH 8xjSp &+։9v0 ވ˧,rX>&;e`2kL*bs7!6u5&H 8ސZvΎAOu!-ͭN7Y"nXLYuQ^Pں)@(5#- L\ )? C7NPψgW&4Q٬C5%AaG蜮=j"@ !5b!l@Ғ?;YO#cׂ75ɸ= c)TvPlsA!*_.=Bpim4l$Q'9'Pmn؂k ّfxZ13$wfɢ(O}qO[fE4IGP3EՌeih!CƳ3=s!ߔ:ABA44^HlJ3EQ4HIn6McߨbħTH$NR%fkC2oYx J˼Ҵu=;]$`f)jM5ZkX'>N˯۵OkLi]M:tQc@`z`nu&L&3#F]P1&~b+/dOg|RAWn? %:.(@80fXL@bQ5b6%%RTg)Ʋ 0S<hzw eBG-yJsaP&~YK]xD BT>Ot2Fmw촇?,˟#&j8t=vk{`M_ Gxf.1uz󵨄>&ec"AA_,b ' lx1Jtk3 %rP\,y@NVȏR ֜&D; #`t@v\{fni>jfC-[r9ᭈk%zeNiY2jĆbь4XR@j"`e)t XujuВN r UK[&["gl a$#:w4&D%NVk.kJF+ i\M`FfD%H~VK ѢS)Amڂ\LU0mWET) TDLJB"(o4.Eg-fI,RFZ""IV *]xu&Ne :}պ$`5_.$gL `GCAʴE/<&Argd\˘*HSf_j{dM]|] A!в a3,t_MK[%&&H`&FzK+*`p5Q#5T'>ѾR4ddO:;2/a5l<>@IDATfjx`V8pX\Hl-3e gNI2!K2khېcjQ)#wz j8q=dWh 9xIntMlDŽ}0y.ʉ]X DzJ#갦9@jH* .D &sr oI* 9Ep~irݦP}!NK2DJefvFNA,Tf Lih aBޚa8)L^P+"J2gxDA, Eņ,B%)ukleI8 kŽ(= Hg0wJL̄]x $ ̰#VdiPN͊j'.frC,a[Y_STp.TVEeX̒-TP(!X,^da3+B2ȼ@u#H'i h/Tx)]y'v^Tc}ޚiLAjUe Xl5NbRwuz %lz1' ͒4|bRrE3܋>L(Tœdxi@; ś1x`>Qiy4Xa~&]O\"aU\A#ŷӪxgO^T0b @i<m47Pҧn/星=ޠM+`1H(1t33V G%U܃`ҪrjJ]kb]&>>OAS0.G'O̘AJ2FfPUG B?x #>WB!.qL15JE0Id 1f'M)&iŒ It8ԙ6IY''Ha ~);Aj axo[ 2%SR;`#s/nۚ380q+d4erCSZtZGSBF hћSk#排q8r?W!鸀{jx"a2EQ'd +yvgmJ >LVz<197G=ƙp9BGUT$Aw^Hb].? G2*Vg (gB0J _ i'M3%M )'l J`4_c,?,<G*0N!8OĖ41 ZС}6 CSJi,Twc2@)`T?GD"6'B c-ZOVt R$aERl b3DXɅX$ ŠEr9b6a! #LҔPBrCHHA>X5O$t5gA a2+"`UgCk<#Aފ HYZ%fL}N23 GTe$N%Ɠ*0*F6mf|F<[ZXD5lJ@^*ë́"[ k&_@^^dҤj"TK,kP*n ᘂS%ͻhJcrI 1܄̤%Gt8;qgf$WRxⵯv2e7fU i*H?C1ey\XF:<]QM4utyNJ:ac {c|F7T=d;MuxکZq>MDE)pDs,C&iQ6L3"w蔐Ȏ֝@/ #2wb* @̮VĉiRίPP-g^ s.sbљqjJ{wɘw`#Iz\a"7Ĥl눤S@4/ﳾY%QѰCW_ţ6@;Qt-PQ op WX툦JD-6E'GDia P(^ TiZW5*9)،Wh1Jt1e;1*P%Bq[T5"@_$wZB[vh.gFw!H('tP6'`>d@EJ24C8"ҟ&ʪW,gNb*ᯎ#u>"34!*G%{fdnZ Mj2N邲kˑ~%[- BK_ҥVm06z' wgB #<&OIp@COƭM= jM|&Ʀ5b&$>&OtQGutZGwaY%, i sŎuX[>eu-}L]'1=s+*䌋>^c2aubG<8/RUl¥ 2)lw4_L;O0yL@ 뮦ZKߊaޖ|Z"-afm(l wWJ{Hi#F!s˓L4z$ڋY=o|fW3E- L/" @K 'm )io[Ml.HrrQiau& RC?K޾) |GW&+dyHh#"^(˫469PWJEXG(xK|%Cm,`g$h7G%P>f*+%) (ɏ 5%KfܨKr&R)z'^~&$Q(/')hJW$_MԚNQGJQcpvJR֚_cC#k,QCE&֚hI`(O*jXGIHSGutAѩCP:OTڨ:Ot"^tg@Ć=@/!~T25.ve?½G"9P^P4y 'BBo Xu@6w$P|/nHP!b8 u.R@lh*a' b+.H '?h#i$d.DV!_I "m&g/6a.H x8P00jH!-E-৳a ] +o^HF}f90X%Xov!I^FJ:5͍p$Kb+\7F7,L!4T@bU>rhA@=MmE,C)Iw M| u7Wykk ?I pxÀRGNƐU0Os~39-ÞXuO}"z:::::O}bk>O-Pm<@,0=Dx}ab>'6 bSbAU" %smS$H*c}op,<180Ez!T`" "![bI*:toC<7Cf @5)<&9slj?eMP+8d 5BQJ K)"$B4[y2)sHG.ˀϖizÁ 7GE1䦤UG'@ZT˔̓(Udj "QJzls-DxfgH(b)ʛBV 蒸bB7 J{@I,@ԅaSnDAy%'=b}*DH|dnt߇Wj\ZtEYCVZ1lPkM5156,-?6O}u~D-jp$PG148qŖO|8ܩS NuhAZu5:"ԯSj"=e=t bޛՉC73Lx@$'(Bq≩x- =zJVIp;ݸ*gىv(Dr<\#>qGG/t4؎Zkr&6=JdN29,BHqH} /F o1qn.05*,,§,6oAbT!Z819D*gF dV%bjJ71ø#Dry4D̒a$j8Q4fS)mEyZ.y])̈zi4|xhFl: pϕPP a)Z\yC?ĉ10840DzS9dD;& ԺTdfAEs )Q\ u[BTl3T5 %yZTvUkbX\&˶FQDFOSIH8P(N贎Nyk<+bs 6΋+0x9(wV@@2HG\7` d4C"%^į@kFIo?J$D"mŘ\~;2IsaLdyi,:!Nr3jIO6ib!Vb$M(lnب=H\)#+Gb䘕x{<2%In9 r&Qt%!]pYIlFV\͕(ё9|ZhKi"J)ak*D (h5Px2n B I|V 0§h7E U`ԎRdJr%JBHe%C$W"a\ZG5ckM,ր"51֭5VcX&֚h!زp>v\PD{{҅;"(.8CѩګSmN3M7CRGutꔇw']Y1U牙=; :=:z57l+cvU ݏQU8#snF*,M܈qB})ИF5<7Y\}t_R/.Ӆ U4$oo^=$?<īMhgن !DGQl qAlb B%N@$ik;r<&<dMIr h0OmZ垘F|3R)*#xebjŝ`/9`+kAEvWc&~%eDbQF:j[19KlZ (Ð34H*=qKV)ZkMFZSjM}b%j>@FutZGutZGtyb'ybEl~F2|t:EF 0/͙N=A&t܊3ۈW #=L{%LA*~  7%PC`;& %SݐEutZG 贎N?yb'ybEl:> `1#@ D?  -ZP@ A״ Kp_E mSPDJX'kUG8! Ys+g;ҔRQ@ cʷZ$C)Bb@paY4ґx:9l)̟b^&e0^jSEqR&حJ[9@,KA}E"B6Zl&c{Tk"$jU R@ZkMLժ5 A}ZUĘD9$XEFii,4::iTЕRA(?8:Og+b!6&\&@k3Vd(D7u2!b*΅K{EsW](/؄WzG D5A!dFԐ-(82Nifƣ/1fl|0QyYfzIgĂ,ԩ.ȊcfPU06#_Mű#g-k"+"M*cıqWx1@gl/G@4PЗ* 9o #WE.EZPbJ ]cpii\э97*6t()ˈʎ~TY] =oրɶ9$ ;A f&WrҮ:(PW|`ĖtڅdU8h ~QLP-PAo ;AM `Bד,8lG7@d?m % 9U`= N Xkb&֚XDc'>NiI2欣SuXu)L@+bظ@"~-?@`Ht%8" !8CAFI((W*9$z@Qq%Te)e&eGl35$k1ɘXw(a2Z6zc(b AsFZd L ŀ> ɅXh;ù?k{ [Bd /(V;C5dLAAIb}I䀂nR6]kqi7i+'\7l)JhxjPC~utZGutZGuX:O coĆy1h P a >@)pf`#]fZ+"_$%ra .D`딗$2q U1 2&WdH&IfJ!EO'edBN>H mPɐB)}e5!ۑ#7)PBoM",9a( 6F6b IK6÷D hEa1f7𖌸Rke& 3 /i2EhRleT,J!\kLn ETY1|;R]`PDRQv`<KUCf;9 Bk% (Jc΁KZ0%zBjMD騳ZkM&؈p8>1xidja >+}"QGutZGutqC:OdQ牥F#:OD&T,repg9#ev/~TT_l@#HI]$1HP X,Cp C8Hh b+%2g;#l M'䁙he_xJ]>̦g> 8QY)jb:9a'QH)>L˙";N XܪƞH!@'%Djɕ:Շp߱6.4gDwTca׽AV>xJ#$>\RJ!7D&RE^1uVT͈!-9*`җ&CPYe / *5g8Kᣳz6b+čn42ӷnuZ ^GF%͉ 1¥1Mxk(1 !utZG 贎Ni2 DZ:O0LG+bsW!69Հ s~mA ? Xb-+&⍀i N&\/dFe`C?w{Q MFC2% H0ĕI r7rme9)()bxe"IZL԰"PH QE_6(^I-SUdBTe8{c]ͣN'*i1zWvZsml H@xBB;gT8T*J$Nd:+Q8n l?kιx?׽{~k{cs564+UƝ靝FYbMN[ubv͚6٩ElsqlU+5"$轠{D)ϪX1CGj  4E;a)Ca444M jRd`V۬+#@H.MM4ǁCnB5_BÕŷOvKmuR3v>_>RR}~̳15:mPϷ7@}Xo.dP#fd-O@='"%lY'OW‹R/U+.Иq.fNwΏASf%>RIEosnK^:Ios9\\ #[Ս?R7),4-SHIDDPK2Xk9-o={y*J^f qQ' rk5`RăQ#kWDF6jUJ7:,LroKzޖx[>쎉wLlc❝fwvzgM:ъ:⺅l̋X& 6z Xy Txm0|unCHUQ)N@,Ĩ+ġ1S5\ a33.A#W@(KhJ79uY"2 mK5).HД.lQ.N2Kk]@_<J1%kB7$2J<yBǹŴ93,H>v71\J#'u?(gf4d RPh1r~O.QB$Y 9sDEGMM: * ~&njB%hT:%}Y`,X#3"*u(B|)$~)̹lIG/O c5C2/hͧiq;B"zrkE%2rZ% u[m%Ehu4;&Q1񎉍g֠qgOT2wv靝6lIwccwx׉ҋ;=痏={d8c^Qx:Q3qk \W{Yh(-=dLDQe'uiz IOhj3C[d[tSHIW}Tݔ&1+RFi'#g ㅋ)[TIVwu3u W!BR*S]m%ޖx[x;&=hx;&1wLc1hKj z- =/F "!"k)Nڮ~ۣe qîWU.cBˁfo\,CB$ Cfg@KNG Óe fA`J;XFYn%1W r= DžfǭleL0"E!tajCs^nlg΄"B#0t7+Ҩ<"@k*D!@&ա1(y6Hd =֌̴{4&ȁODVKxY 3'}X"l# BG&O* 2gʽ:5vpi!=Ww$t9f -ٸ}Sz筢k"-nK%ޖ[k(1_=Ɲ68}ĦHM\ܺN3=;u׉:ŻNllEyKV\gj-W@L LMB40T"DN*}Я(m0Rj-`X;dEQmtTӸwwW}$:7ocu)<93Rf0*L3q]gg]yKQpu^ @]1P4.}B4[p'&Y",9/Wiud,'v<~%sS,g1AKhy*+ BXM)A"YfEIVBh]\)+=UmD.WoK-wL1q33%NkԋMT_qgL<{SS9ręv?~,#^'EgЄ]'u?햸%h57%>0S,2]AP0(X >zV{=[4w`?KqM΀x:Wu-!"ιE`,b`Lo6+3'DV@v `xzt~_%Du Gk霒Njs/H%]2I 8%t tALp­ W {-{o7}y}bϭ.>;o0$nzkdQӀxc%lt7b8 Bg\s{5\6ތTFӇvc 1מH 2܎%s2T&6U'xfR0- 8n L0Gy);gy尧FtaJxS w&o/5 %34}LEJdi;"cs#Kd%->:3FQ KDչS%u[m%1wL;&靝~׉-[=`Ce:C%Nlt׉wÌxf4)')%iHIʚˀPpR q>Jׄ7.2bhEr`DpbfDKdb(a:8!1Mr[oIR u(vR/ْ*+8|XE"2C~;#8'OMyY|w.1goYS`Y()ƒJ.c^< +V|A{_@D_$C6`U=bŅdmX:+k*'IJ< 8;񾗒J1N6)ϔ^BbOJqHfx2ޖ8VpY:ܥp[bhJE-;&1qwvi/ӂ靝٩9^4i8NsN[,}wLo5>{YRW)3:Jw׉f.d-:Y0 hVv>XѶ_%Sk+f&7J7pXjGC76/x w'K9OuIqT3vMJ}כdxq.KG9&VCyn~UG]NSrÌnOm8]27JZ) 3To݌ h6z/vraKDv?34?$fG'!it+4zlLՁmIacɢA>pUr\Fgć7{! y$$j<Q5BzwwllU4S垗(h&gj >r (pc719[ r+/^*-jm%1Q~;&靝1N)DßYw~ؒݻ?gv=[d}Lk}̾_w4ĻNJ&Tb(L`T5E;:< e8 Yz(ԡ>&" (xA-Nav% cuNb,8:rKQ/^q’r).XE\CY&Wz0d]o9҆5}GP HBc=]&ǼX$-SURJqT^ "2Le賐sNmF4jjz3L)KJt3i9A 2IqbNR+.,YĽS8M]Arp-ww \#\dwAvqJ0:VU,9\M2d q8Ĥ?Ҡ 2AiGXx8MBnK-Q-汦{[ba]EYhZ]%o5&1wvzgu73i?:U?נۦ)~w>>=wR`w笼ԽnKi&{V4zZC*tgg_n`y*٣k>6#.d%GDnsߑ}͚_ x繧4$|+'8Ujdr0l+䄼$l<2-ߟ!091<,o+Fy-CWѴDc]>#Y$k -S!XJvPq+)"i1I 1sJ$s˻O&>"ڹ痮jN<(#ar%NǿGGS`|H\~ӎnfj'G8>gdv.ufP߳ǶS7nn?!ܜgY*'kކJz;SH'=|GyO0US,%y{Ang)Y+:nlQ ΜZ57]9խ_I/I`la8-+Uѩ>&ļ-W%ޖx[㎉wLc❝31wL>}wkeѪ~z4z{z]::U7b aR=L@&xXѬÌp(>0!CjV,h8QM!T*Q:%DW> &0Qܜ I(qٖa]˸it'fG޿ (՝scS̗1Yj8(OqGruZRț% ޖx[b=m^e.IgSo_r ^?,;&f;&Bu掉f1_fs5wv ġ4EirgDU쓑:Q=06~_0+?>:K?NĻNv~H{\~BP^_!řd.SXa5 C8Q5xi/7'iPA|l˫-NWۉ jx85,N]e\zWrW0֦bq7{ج,ǩ5p nm0>90bOks&ɖX(v{ya6xgd;6,U#Q41B<چ {pK'37Hra%/Lm5_bS*A\}2~!2oakTDPЌ2s{4{{BH^*| W x{Ǘ'd>hϤ6iKO1e=T%lɎzm܇YWz֮`o/4V I+oK-uwLcb&wvzgwvzg[@׉&i_?o5Z}9?sw ݲ"5cKVwhIu׉wCx@@p&Wm6~ ~ʘWHAit!{J0H0T?Љ(=Qj0ǀ3Y:BdB|1 CvS"?!=HW'h8u#i0Q\i/aIGt)IE K#ejʨx*rJ\ya"|gNg"wT8Ƃؽ(_a.7&elҋL-yFbʎ)+`\6 Ȋ"=l P4q# [e$Fy_3]2jQkqF5y|QXQՄLUM?WpC'H{*; 93R{мס^ r>mU,ZR *xʒNag|[W/mxӕKc❝6im媉ԝ6ÝnPOiy,evrzb?ɿoj˛8E4>5f݃lWAH*5j!PォKZߛm㳷f8GU1-1J%ޖx[b z;&1NN>&o#_naO?~ncg˕O[Mu]'u" w0"6ρKm`E-prdaw] JpHDc!)E/}dpaQ0-`I$ gKd#J|tzvJr2`RH+}^&4 F0`4 1_ML i:DLx]Zy|t>_uV()f* 谉@OY= M1:$ASbєx̤qp!1 ;Y9q'SEv?GxI'(ġI3{>c*3TF:#KfoK\߸-"wLT'kuwLcbmj1ܞeud3W ė)kris-6c;;+]աK#l[=Oܟt/?//NFʐ'yhp׉ՎN*]'Z[g/<rVK@PpF=D8 ˜P(OZj0Q+L6!o-ۇCx$ =K|g%#F77jHfۯz1;$y*yai^yCٕK0s-QNpٛL'T}cϩW;)#Pf+6ܥ(cbޔ)H4%%NQ4]rp^XZ(M&#\x$Kd3Z'.ǀ ^~4+آR͛>>+йgRNez)QER`9E4y4r-Qx)TU}EFg'/lW:W._ ton}͊DKul$PEѐξwuJ`>tԐM9cP.Eff$nyJܖ8ܖvݖx[:]鎉ܭxMN9蝝։,oǧ_,zƵl'NrJ:Dl?(!EDГf1W#Ʋ2 jHH^O)tb7b!+nVN!_Gʴ @I `1 QKRF7Akԁ$G[xIDBS2:aM"(\n,9}ףд_ Sq5pvAHNZD$S^Me7}):٪< IJ;V2N #1Bn#)N>3l0\ƶDYɘK9<4Ju (b{)okѤ:*$v&I"}óG%2MaL+eXjXZ+jm%Zm'ulaS9vcO}xC5SNT'_'n> ?~싽j4VZkp[qHZ^:{#+~Mn3ĂSP3&.L\vV=ސr+ g4jT- {u==i''NHQҰ=B&P0GXaՇTgHWAPxY%; @N!Dua0η' w}~q^LSڄ: [ӧY8}7o饥aLU HaLd"8_7m_:$CĘSVTQvcfmA1&jR.}U D8 @Q}|B{&6vxPՠXW2,\1Ca ! MD>TU%EZx#W?zFOdli+@yR#,;1ZRy.[dD7g z,H VSQ+8ݔ@?T·%ޖX-;&1wL;;o:&&/w„~W>xճW9xik)NĻNDl& 5OQN3L !T$"qRB8F*maIADO&CnWԗ3Wq" 60ANtSMH!?|Xo~_>~zQ͌1Yܖx׉5wؘݚhoBa) ہ3,ऎl4Os~{뷏Pcu-urfyz舵sϸ]N#vk^[hE-o)sfTߞ]oФ*{iiIf+3Lk^u]<Ж2㼥Ttzt҇c dW8AOsy+u襟mSUwph5,؛C")lN7Jߩ~cX@p i%aV-ҟx3 3m-YSMx+vASYd2jr܎+>,CXWdImؒ :N!O%߇9 zU}Аϝ(r&jRW!&WG*JL!:Ge8d[RH%t5T .YmJFi(WȪmB2~W)y+gOƼ- %`x;&靝٩IIc➓y~?~G,۲]'u]'P#6a75,C@`2'&Ŀ0:/IΕxByCu!<Eczs/TU 9%MD'eI'ɤe+mEF2T.(GLVG—\fDQ"Sm\Ma&U&KY'1 @y@8$)qH*,q5(#]9HY&#\Of>R/BM0Pt#nWe]$n3C#QF 7WP#Lt\E&iZSg1&Twt8[Ka5^D ѥH]s[mm$kC55Q:&wL0wLj1qCVd;; wvzgoٔNfwňFe.['qDûKw~tۧ7tcBRwS:#2-$%r9Gub:ըqAlx|+ϞeOZ{Gwu߰K>N$ta5'{zu:k' #p?M( gWu9_ S fcuksɥX&HuN۱~`Hu1sh%7k:l-b^_6dA_iLΧo~ƸKֲ=؏2QLd=Tz7&;{Krɘyt0Ӻ@9Qa!Rpyc&u*ƈn%2ZDիVp[m$wL;;;;F_߽X,vo?`<c-':V Fwx׉[刍M-{f  7 ͢_Ѐ4Vu.zZ+Ԕg2!wY4$PVdH,Q<8D#Ǡq~yNON氼"GJs(J6(Q4 S!%zo8?Ov2yE=Z. L$,"R9#\YX"E XΌI${g\l%lJ 3-2g% J uS #KFq/^9H8%E(b/p+  (lWw;:!J: t\AElrLb\P|S̼(G0-3L{f*,oRX⮲aJݖ*Y(ݖx[m.B1hfrg&ՄfgԴGHj38lLE2lxgvgU;;.D%|?_aڇk$ČΩ_tMw"r׉CnEv-ҐhC{А fݧaK*C2,U@@JVYw0="( X-zkADyM= @n).P'f~9hRw&K*}.wH=I`8c!'dLv^]lѥ`5dh5$I"YYREez&Yއq 9cmS L1 8f!:T]Yd !i=}?ͨ[d!VҩlU ߱&z;IgHŤ:7x'*~K̏mG"]qg-"ј#"`ؕ>:s!V¦)_:_糫ޑ.!N"΄KA(3Y| /=YԧcDciR TʅpzDqWHfRqzI鷳e%M*hݖ82mRՏoKԷ1Qz;&96G076k3znӦJNNzYvM7['Y'z R,[W~ꎾ?Gk)"_~׉[-LHuf'[n9 !҂ղ#ZO`mv[m%1!掉  !wL4j٩)Ɲ6ݡa靝`׉-OZO<*~K§|]Z~uwx׉wC zı@v8}wҺӁ#rmX210# X0P(#$D#Z`lDy# oQbi qDCƢ.AԈ q- E\SLuF&X&A#8R=k[9[ΣK|=rF~ɃGm1 s*MTKD!Cɒ}ƕ⨋CdC q=I4|-}qp#GzvҡDFУkIG9_9BvZ#BkwU3[fpbfAWcYQή]v<.gun0I}VVk4o=T⻮JDfP;Ȫ S9Dv-wLaiYW011μ([e=G}靝nDila RjԹӆ;;ZU;?3?~vo6j7347ſ:Ѵӹz޻NlY -Dl5<UR?|lZ?t{zp`K9Iq@&9͑Lp61 O7jfA_@o 矰b殟pQ$~D ]EA1Oy%kKu%LjinF4!)晴4#{(orm+:<t91PתID^֋ F m[ix8'<ˇL@Ze,@L<4Jp!tuo14*!NG sb]fiDT6T+*="/`^]B}Zkyա@[Q *i:\%B'c˅J2ҬOƭʣBDZ1ąv EJ&dfLΜT9Ϸr6UWॗ.eS adޱKbZ8~KlMCNmJ Ɇ.]&KDs T\ T,D-3caᎉ wvzgwvzg&Y'Z'z(/?O׷,^?(ZBNl ׉wÆ<#O^Nl1Kx)x^ȡEZarG!EGTt$ ѶݾFU@ߜ`:_wXro8x$JhmFryDHΚkd +򦇦$54i2 rιq"KU T K6;{L"P ta]ê@o6ѿyT!#m}'(gjO6xI.eNQNWw15' , ٦haMWc -%ȑ Om2#sLH#Z[s:5G81Smplx'u0f-@GTe #=">VG2m>WҒtŧ, @lݑ_05zKģ*Gf6uoRuU&z(%-lV|iY\総C5m(g0&m%ޖx[b];ocU;;?٩y[$靝׉MOV~OyOw6]'nUٚ:ykT ؋@pv EBAeOeyƉN~bv}G#wH cZwSH#4|BЎs+\}˝1|}Q`ic"+LF󋆇1=K9򰔩PenH~H_\?-9BKzdE"pT}~Mb@92K\:jv+3duzIW 4LqI`XyOzGgWY<7(MG,n)U`rO»&d/-߽L>z VU P K<ƽBP<,V`$ ;W(+&)U]5Rjr<ݖx[muu-wL[ɗ@>wy0fg*(r~_UȯlnP!ߣef% L^.>\g }sAurI-rS,38Ձ.V(7=ٔ\U$_*-JYm"1X`-wؕXG i-%-8ef,ܴi'%ޖx[#uwLchygfwvzgwvM['6Jqx[_}p է?ňrDiuu׉wx׉rf@0BRR_|9 5(M@6Z9`tDP tLΐ i7b,~"Ͷ\0-5I!*DDbB '0RJ~;My0Jlp邀#$ /,hY1IS\nE TI"j sqڒ(6*F Q:t@ф8:bqw1u&ߌ4xB}'+1˕~Jd4'TjRZtߕa\a%@m8r6IR!u#s<Yq b!+U dj2d^":C9PIUO+J]wEdQ?zmJ^Wd$7%ΞOa"5fA%z[bvXSts[xD]b󺉺p}ĝ5鸳MWqV#wv^'kWO[_?wٟ5y$lҲ:Ʒ)>!^BMu?>y;䇷4 (}0ܼڭWo攕ːAM,iLDltƱ/~^; ]U,Wke j^er '2 6A7Gq)raZ:K)tQ{Y?Qܥ;q;+(b~p/+AN(+>۬(|%0/&I=#Seo=" J3b'@r }:S)(g7r}ڛAIU KG;큧w۳d8iۉzHd4)+W޶p:#D-Y-86.?ؗ-?ʔ-/ >WȲʫvy@(3ؗ/D "g$tEсa>jAS)e=s[˜nK-;&u wL\x;;5 fwvzgߜu?տRq{}ҋ%eNv׉w|x|<^_1HeR0#H-y3B"&=.کl]lAfVsB=oNQVFHre)Ȕ}=9"< I :s9 Bl*\:jQ<d2ȻEnJ4KR.}(̑2--JZA?APq hbXAcL "3S쨬RD+8 se  B8]J(Mb"VE;.feR"UD`[5>EW%Qy^Py'tlGQBڢJl)Zȷ%2m%ޖXGv;&xg MRN7nx:j8ǜ'_?vvj 륻Nij/`*,u #g̛3s h 1,"l 1r.qHt~\OR_P D5@vbewNbkBsX'=@ Sk5˨ K?. {&E."r6V`J:}T(tA3Xp<;:plH ^pĚ0QۂSUM#u[mҷ%ޖt2wL=gL|@&wLcqgwvzg-L1b4hlONDǒo/Dw>?=  -fT0cNZ÷ y~]IC ϧS垤{E%@ez-C:mYc 畗Lvz,DŽY"hᵝ@LS7+߶ 49_wrb+!uywx :02=>0$GYʈF)C p#jY{p9rz>Jssv)j#xHAr nBxTj şt嚖pSZSˤS&D462>!XDӗ I>pjR.l}&@( 3P%T\U V9N,$)oE`?n Bɹ<8UW;-5ls"8$r+ԴxQM>cJTx3Q2-$*\0f_. m%jW*m%1;&6gS;;= 靝6ol零Nl}{__x6٣ZԹDw!1÷ya/ ( uz }00ȁ"ԎA u$r$ ;M!&bBb9"U}OAʤr@0ܥh8ѬD}SCW╈~XOԴYT5>ዔ *U<R]wL'r ]~$q5=-bAA"=lMQ} W 0%&/$wL]=Vb$Y"6;cT\)=lq^wd#\B<*tp?y"3*2*uczY,B#jqjUOTe,`^u7k .ʃDX qVYD:0B!'nATG֙d,oL#m%>W1ԋoK]W7PZ?\i1M/21QwҴaS&:HӲ;;Ji+Ԃ_ >_+??Uu?ktÙl}GJN|Z2ݖx׉pVj* Gl_hesAg|xCI,(H8m]e$$p5\RCk&ޔHzF-`V(c|J=͖Xs2kDӇ3ic]Z X%KܞrgY{Xg'et:]j Ӝ{S m#R# p {9C2v%]NVh&6ֹ,΍#*/"0lu!<, x![zCeHӆi 3irC0Q} Zt^/=ʛ',|Cç3']Bs !I(i>WGw3NT$0cyxPx5jX(T#oog.9ŵj+CZ"HO5=6v _ RT4Ux5[VR+VUnK-M ՍoKT wLcC`pgwvzgߨuHf{c+o`O>vű^ĻNo5bכD-u;3 !8g8 Hh` % a 7k%)dH4L!km 28-7QG$)HXV6c8{pAVaa=0d%>¤> 4M $i嘔ϛ8܆VeOI#\5Jf+*pd)hu1DTUe|mZ;WY q‡JF;҇!eIE%XJIka"Vf7G{k㴕8F%(x.EG#jb+ \w=L1_#:;\V6ʺA73HߖXYܖX$nK-u?uuwL<<掉wvzSU;;],N:Q-S}ȯ-O'g";@~;Z_N<6Ħ7[7+A[퀈o@o9Q䯕wC,mfRo3 ]˳珑=5H+#uUJY ۬ƺf=a}I[S3o6p\8 7Ѐ1^z/_ތ^pԮNKܺĻN8D[GTsHU}ի}ĽWў~GnF~h+r+3QLUU-o5h#WPANcxM6B,w v>r m?KsD2&=7->qd1.I!< c|Ĥ&NDy{&Ul}Tn?oOe:#fӫ$'Rw>(?Fą$pucg~EYtr?yOibWɿBK\i J/%ByShV&~bW:4Ny/ xjOlYB"@YG8d9MБkTL|B8Qt O6GHdx kJnI3SQfۇ*W}eqJd ʻ-5Uؕm% %֡1Ɯ;&i;;5:靝6Q {7沿O/?nՀ wx׉wdی8}s-(j뭛&:ND[7hL[c[}Blf!~:&/^F <K\ րYm;hzSQ݇O^"? n"O~V=sj^6a&<ëDssW^P7w:'iL޽jOr+(|h9ckvi֘'~RCBJ` N\RnX=ҽ#:qK[ieΓ qX]sy]%cbN/ȳ'0$|) ;IoFlҫgs}Yϴ$0 /ٱP.P'$YIK.ۈkZ˂w< ‰]54hߙn}~KkfM:CTͿj=+dIyj.ThCYQoKT}nK-QG[cxgwvz;&~DSqu@ ?S޴`o2,pgp׉l6w:uy8 -bABC+ L Qog%"%0F`G#k(M%|L;BD۲>tsE00%*/ dESɥ/KR}NhNt @.:4N\QPB/@y`A6:Ԅ}ͦ:, Ф#ʢ'$qN%M)Dw,鷰X2T3 T)萹RQ5kdhD={lUI 2.{<}/YGvG"WH]hQP3e#>HSa_c dьO |R{(IJ)ZrWȽo n:Ө,1foK-x wLc❝nSs;;=;;m}։Yy_??KrxSҙr]'ub[i7;OqE. TKRm[ Yq{#ڄJLsB#g$KIaj^9jy `&ԛ??̨{j//Ii 7&Lr8#97QwBpy}r3]mD׶g&QȁV̅&e1'&G3+/BJ]M3o c y`b ȔX> ~&2#Ȕ( % 'S]IF}H6Dy%K2xކInk6<=!Ոl#rQ+&\r~S}+VQX~ S^.!uWJSrhyGR[_:U+,X\eAIj`ȬriLQ>*Gγ1׍1o&cё.`DoKTnK\-Qxu:oHH?.믍%wLc`:qgwvܝizK-.$hi}gy̗ŽԦKVɳnJ:u-ז"6O6i2@u\{j d [ )e8ͼ# S0(v¡gTW͡#4sU]SuS8wOަ*fHq[|54MO +eyP>ns'$SNUL7=n& He퍱= H)+hF#6%iQc1w{vs2p%vl9筢eVGV 4GHƂ19"8_i$͘e\2C%18D@L<JQdowj*_yE$z{lA?1)>Y_eǔpP"]6FSa+93$GBv[" nK\;-Nj5 Zոcr wvzgwvy;;5][Mſ_׿W^?ȩu$2ĻNl8v׉-+f32#ds҄dKQpr.4!E,:KDFqrؔ!"TFk KH2qcSC֋w*",4$$%I܃ד'\QbQ&G¢GF`H.;Dz”]KU*k + mVF{{y}{BZT:qzi]'nHkDu:_ Ѽ=>(]`k3l^e:73ӧ:Uf_ 4RwQ1{hQ(ozY3TRB9_y$uooI P{փLs00:EG JPM̞I&hV$ގij̧FVx:I_p$}\.JfAQHh~i/S=v2-|,a~-`ʛuvv8N2J Uv7~]ͧMN@%."4݊M8mw> DX֒[ZۍҎvdQ$ 0z#u"Bހ98{-agA{C=1}7]B IR5VyGMޚ'H@S'Ӽe4~MnRRnkTڥڛD$ \lwTV</|"[bL$zo־u9M>e-(ԭ%`DB{+ot#f9JѳD(7'媊 )|8m`2ۘBR.{َ٩TZEg0#7Sj ߜ٩QVx9[TM_6Og<;FrՃ, TVcl S;;I~Dw߫rW>~|dZ٣Jwx׉Bf?>: f99f%b#_QO"c钩W˿&KQk=z4 M=!wmL眣1 8Iӂj r{ے^g``aQ2J~JTD$' 0OD %C"dAD d&ύOjOaէϩ]aժUVZ=_4"'xJ* qT!AT!fѧHTU`ԍp"d) *ZDB$`/^>T>=KIkzߩOD RkVA`Wp!v%#TLY>D- aֱQJD8'IJjӌ[>1"j[D[ I*?F Ua@mp 腪D2 LP64G|D\.m5O挎NB<–g^r7uBm{b! H[M$r-IcajCaLtUŬC%7vD-7dj04Q`Sp4-g_( vȌ=1`$ 9#,"_#R%"i8~cbԱ5nbIf8!@ k,cB&HQu?;,ik=/@ =NXlVeHW'BvVV<\JS&M¹# HbEq"2\xݠbK+BWG^^^ߐZA.LET4*&&ьE}7!DP;Ac; $ih B2B?Gmŗ)Xץv T:lm+SP. 8KL*F |a "֖tK4PuB4 ĵu(^}EᏰjSh$P^=†R(ս^ܛߩlB4f$SHjt;jV۞Ķ'=z6&*6ގQã=tE]jwAX-,UR]c";5uh_^Y_ۉIⰺ,.w+M _.՘HytH2918efss  t hJDщNg|fcq,d{[ 19>A;YBfF@&h&ŕ%.wlxw}4')dlhg3MGlK++;;M! sq;k;-rW\ôS2~rx=$)ksKU*XGhG^Fpb3:|0>Eh]9E?i)pIqZ%Na̍ZRujiu`jua~q~~\X{BwtkX;^M &&FG:4ڔuN>P 묮F0zwqh!vJ`+Oi"Nk8̞Xp:gxκ`aK:2Py 毝'1{1QI#[ b_n΀{phE NC ^5'bhCyXb *ŷ Ec18@b1GKEc,EӐfKiħD2jV`AHdJ)| DRU2rԉ}13ɤ@%,=M4A23"VHm]J˧ QrLv$H 7K4R6,0q~F꥾Os t믧 Ttԑ8@\%JZr%46Qe5mOl{"p]H1&Ĉӱ鉱N'ukbCs+l Ī و9Eo"̘ W<A u` Goy:d&?@iA%S, UTHUڱ2/Ffd6z*C&j^ǡW+>Vh\)L4[|d$ZB( =, !˺X>iR곑%J)y 4Q(Ȱh+XGĩIR! LV8".b@Eyb.G.e x^[It$S W!h6'X6@RwFZ4y ]x˷k"x}1 Ү{sEaihegypT dCm(k%!XS?f{G"RS0D8/`/ESyiȴ`W!Cm1*z{3W%H9sTSfh{"DmOT2 Eѡ3<18Cl0DȶL AݭT Ю3,?8_ |ĞH!fbp[3K ssCkkR zDǭቱc*X=o( <|BL }H&v6s###XXb Ct8NCɵmxTgǮs?p+_0]_dXLM_Jy|[љZ\Pvdfbby{ErnG_sU"DkM;uP56ޡjzg$EUg(1ʰ5 Uӝc3gY7;WuX ѿ7vjQy# -_`wWR.-t:HI9f<Jv}oa}k)> @q K~ĢC +@7$`Lbƒ@I!W];b`cQ(dC|7ͧ2Q)SpNU,L?ekG%_*+Ŕb8PMDTn:4IHuK5' !bZZ2Ϸ(Dl[(8$8CaK,},Rc2Q()Y ]AJ4R EԡIZ1D]Jg6qdB}h6K@ݫs¿#{Ml#R<֊l pA'ХmO_EڞxyoQaLڵ}7-]|,W_-.TYM DT؉FGu%2pHϵ1&R k@ڏgfgmw;X?vXVݥjh-[{lc_ j3%bsiiadhc mȊ*DXC}m@m r1oH2鮿^2ѱNp'^*cFm.gv5.-T#(poKHweZ\ ffdBol̝)!LLJa㘏mٻ&6؆={C V\\ZP Bg*]5`=.ڶ vZTh^viZIյ"ɼgFnr"yFk+PƮn#)^Qqn |]U(G券oKEkn`I,G_y a%y%mU™N 瀍ҒR n6L_xˉ*˙qfbkYdG~&%[ 1K9r Z5ߩs-, ۸oAP!-I.rKD0 f!$/,Y8"[FIo:j :|a @!g`ƔL±06*jۥl BzE]8Qi{"m{bF'^cjbzW>S+o|(;[W}dQ@zkX2*UV0.pkjLD:0)ߪ~˂iu;ToX}U~PW/zS3pǵ8bxRo2&f .e@kJ,C[İBi*rLTaHCQ[YI_n~<>$x/y~ ?0s*% kePZqƨֹ.^;ܻ?(sv'aBTvo_W9~ew5Fq-lpJAR 5#{ynwyqa~GtNUxNy^ln.ct0K1KkL;-[F|$b[UW\R>S~?yIGwq~HqVM4V;mtȤܥz6\UScUe6i_|9{qByVFkjp t:`EhY Q1PFl DVo ;صʵ#c#E# dNed'b+ogUj∖5IӄpQGiC"7wЅ޺KKˣ_dj!SI@PiST*2`1RF ]IUU#Fڑ~jv$`kmOo{%{޸℄1WiL=/{;m;OV<n*eJb!T鮚vzJrk@ZP.!( ̴Fգx6 o f. F; :1@;Rviq;ҦKL6L;}U00B ؛Q1ʣyrLBMPe2Rګ8x4,ͣBy= 5fVQ8Cף^3"&EHrmruhzD 7Ih>8g \JR LjJx18gJf Ɣm1XBSҠ&oh,vM2!kIBX/  frFʂVDIXFK+QԀ7īP(F֦x%T̫W<)%/!yo,<d&095OW`%B)$`v4#)gG>13.Q۞hn{"\Ķ'"1i, .'ܩYrRu͟~DrN_Z\Eo@B^1;,ã"ܱuӈ'p)cw! V Flp+,}RMdeBF,pX{Aax(þװ;ra,[üc>k9wU76LNp(dY%Ϛ"F4GtOS`ug=YFa_*P8NJUnT vU%r93fT]vT.-tFcp\{삻K s]XwkCc,uo~Ɨ=.W%CEP3/ХU\Q+Jjj}fz/ *]EHM|w..v00P9yR~ڐQ:9Ź C "jHET 5A~3UP+2%\B њ-6Z뫛`u [L TjFЧN[ԹZ':cs[y |[Mt%y"m,B':!Oi0,ewD˘XbP %xCJ$&ځ^5R>; ddUpM1xU)8k&'T"a<߱Yy {.hjP|g:Z4žbf§DM@7ڈ05j|TT!9+DXB,bXSך"tѻ4183H"LbU! !Hb͠S2M@nYI|@Ƞ"V$TS9B)_A*jte3 %*-\ԉNMkh6BMbߪ3&GiMg)+"AQL@Nhd"$=B2ɘKQ1d5$;TS÷2"-*;-(VV*@-TR ` <=Qb=1Ķ'*!jc"W81ֳ^!^uv|W7U}zj|ᣪ}-h氰g&[زuD^5!ڝfea"ⰼ܍D5h8U;lA_b=LAHc""_ Yhy C!Tu2peҦ;Zԛ6&>ta|cW0SSzig2+4jvZ|HT2+ @7=b kK利"V<^𖿎ukk'x‰vc8,KK.[\f#(˾qD&:)nkjͱSSۧ&X0P6Auܠ<5QߧyW6ۻpSS٥%Nmkx }؝vӰN?IuqJV̐VWbrs2"4MRoP|FTٹyoy b(g!zc߂@:h(ihZ29-2'7ʫf]m-Cގ@;c$dVi3*j.yb3/3\q;+LɨrAHN/D?I[N@~_Dg"{0@/ʔ%w(##Gz ӽiCΈ :|;OdZln!Ô]-,Y?4+ w`(?4("g/Ypʒi]6|02 N( PzR´[L6; Mjj@;oAKO^b :-TI+ N,A҈lHIMBJUe OAL bf /%+ydF׼~k!uYSZs˓Ey T/* (J\ hkoB'3z]y[@IDAT*jX5og$iVJ҂S sv$7m%"ʶ<T|٤T0<&SBY.B(OikN#(;"$k4.]Mڞ(p1X۞hPDD*L5Վrp7N1@)PˏWOYX|QQlZGWϮ$r93PHØ7ӶjjllbbFR>Scޡ&ƦSn\= 0 ,xکtžۧ61%aS!J<Ƨ|O?pY^w<5AZ|66:Ϊ|])$71]/kh7i SO< O`Y:d}zbrgxyN^VKz)'35y6 TbbbsOmzZҟyzs\}jC(L8<>)CVrH Y&'N9D|337q,V2fR [r $plfhy㠷{vrI;oWQ Xwu'LJOφ<)qCXƹ-:4;?{hA&PpOU- P1V{vm*+帔rzMFv8Բ4<PɉOܳݭ$Ø2+W7043R/wTMz{}7wӴz ?4Ʀ0!MOL\z^6} jGop{Otqu3ot)'2a۪:`˫MrHY57^1;-YȂ'S{uGTu|4&jkcM_z`ivĮ!alNc8t0\^wߡ-͗,{>3ՓA z;vN xZNrM=]X Xg<Rn-~9V/w惇8ffmEk,BcqS0&w2zuOZ[07}n<2^kUs<7HZYXNٳ{#ח[]qaNYun{z:[6/m@uw(?FYvig:s+YzfuY9 L&ڽkgIe򊾻-\U@+4}]yCmz9⠛)nv>#xX 3kp4K/sjSIv:=}kXg `o@ <۳k]֥ToU/ny{v_r4eM ow siifrleZFScC]auϹ0s͈ g+ IGi޺-'8r֙|ysV_GYi=Hw&|ۨܲ:-O/wqyikvbi|iŅߩN9S|̾"VUl>Sg.s׫nKXKM z7x;AMm/e=u'߸gA2]jk0?=k)pȒ%}M_ H+g3<7P}l)_#7;4wdQVu-_vֱeDH[ep氇ƹY ;W@>.7Dr;pϪZ[4&̽x|顴 f瞲-۽)|'}_&usb9XD+?ru_[zDSW@xK)\H*<*ZlQ#I> U ƗB8–Jf+ņ"eXI.!+gyU#=NH6.B%g慦җ:q!6i&I gրR0;t )m6Vaz,;0ԕ=9x.u 3d̚W 涇MD1:$2Mn)V5ykrR$T"<5'=DsWDs#OJc G٨][A-!C,mdI10RcJ<@6pqؽs^OV} C{mi'fS\y`203v쫮ƅ{/{];;vھ}1K}oEhtja}݋?Y=J~&٧?P޻Zz3X|K0ٗ#h2O͊{Wc&-b 3~]?BX=%FGk#!pz1qqգS:,{@;|΍ɸeXO>Y[;XXjɆVj+THzGTkT<8rʨ ¯ )ȭ{~z|n}yO sr`eRVe4/#b)`ы`y Af굸z6ͨ0Ÿ4@PEжD0/"oF0'byx& BH2faI0S*s)fE n-ĜЂRbYyd0^g 5ր? 7y(hcPR 2$3K$ř &NzY.C%p@z6ň$=TO`ʣBDh$TFdIE<@j ^) ug HMJx*&(z)n 6-#FRPiڤb1R['X@!@c  Ihe73 TJc}JUCԑ{J:iVc}fjCXka0 xI 9\"2-"X_I *IL|'llzB:^Q,*@KJ4)]YԂ-nmY: ۗ<9D0̶eYOT\|! *`qj-H3?p,?M@Z@{a~҂o[yʫ3[yqhtdl`se;r䪷*yąaP_Ń|0dh\ owfLc8L~ wbtyֲmwbF"ngmUF3+/8?k,_qd#;!9W+EQP@9IIw{+DOVd˅ 6Z,s~륨2I!E0)5`hOIM] ivOh!P`M+K$I(%IFYyHz/ibNEH^7l,@TžARjazR#X$)>7[8AԞoX,J HR@!o-[Ti64)K *T Ei2*iDX-L]n؀XZ UF%yv3e -(SR0Wx 4 v]BAJ.H:bu:|Ȼc(E/GVUV?I_R`,}Xg1Jj́ PkDbtPв1wp 4 t'*YS H!+%u+Gn@ Z+ZyʫI^!@1cC\ԉ Dmפejmو |3=Uu} vT_\ =C4| >)7(ulae5;y/:GY>w0:TKj6;k dyWz*koXVm[K̋>{pg{ͮnHuVZ p}]M)*KߩؓZկT%5kl>9[\vW_t'|i[p'J0XE&_;^NŜH (|0,o`A7!4:vx*)2;idri&QS4yE1t?SS9>d?$aE!y a n&oⱇ1򒇳B(4!8')+)5Q&F0(ڧTB&ƈ} _!j% m}XmMu$+. $ݗ^}˛u=h, խO377we<(6͔g={Ɛß竹9aq:7֑j6˳1$ ?T3{vM+7M.=*L0NR=qt7Gu2ej?w/~W)`쇪>aulъ`{7rw6@8:/m?U4&G=:ׅɁz/z `_g>l7p g.E$h* qP#&BCVBfE8MJ 0X8sf 19 $3se*Qt"C8&Z c V "09T^ǧn&C@51h HEͨĒPM8O~uU @1`2!XcD(UdpSZJ *^ L4&İٵIƊ0pA>0@WX>v)DV"%x_*c./k P vR o'}/ܽ`w{4.^p^=Npw[YiG9D_}>V==l'S2(3&()\4vv7 O?^m. >-A,7ɋ6o}i}c!np;WR1  .m{'e;꯾X6_~mSU~'gzW_ǚuv7"XJz7Xșq`㷼I'J5& fwQ nZiJa@G֏Z\X`(-^K:E W~ / lp9[u&rAeF,AڮJAtV\֜:3f̴?hed I$J2]'E Soq?*q.(r 8V#bβ[S[D,`(&̺cZR͒:Vbhq9HpIa-Yq{u\r]4V F!V:HSTjf.YA@PF>>aMo7 2$H3T^x5W\| KPB# A%%%՜@18m8Ē Va"Zz HեN\ZN H# Bj}5 ?9vK\hK1k ֪,4EL$L d*ty e3'4ቬ); | 19*ĥH; _fqeN\N%}@:F166p3)7b 5i Z*aK@8_!(3)|4>ji AxyPV%)aԌģ YJfe[0|֨W+{_ + c^t}W}ӟ%r)P9jvSδt@QMIψra߭:ӯM_ GɄq籿RdbX}\仭aݎ ^xE1.dX1!,0l ? K<zK*5>#Y];r}t(t* YZ(!v>xÚ& iDe[zٱdL _?i\)Ü#kZ4Y$#t>u!-3сk)' Lc`FV[&: DeͯgwwOy&GTdX&(- @$dRipЂ *( &DG1lx᱙$)'O1ȁa@,7V(Wc#R5q)i4x,H@@Д#@wS,,G(D!JSHUգrHbV$_+J@,Q (4dP20 j kp֮2T&Rˀ Y@ C'-Փl0R#F-Q[VD.q.E%1 / fpE4$ē6Z%(lex3vZd|Zxi,{"5 "A fov*C͗< I&@=FLv: Y6T͗9{jA[yʫ-k:!N]efͿcǎ|7 /xC~5°:tbEG0)vpFy` ^ 䒒DLO;O,³@8owO{(WǫMc=. sk?ҡp?.9-߸1 tlk+/)ٹC;hR,-OL_hyS4!c#"{sTW>G]t`MMoR#>&4க[q3~[MGQϮvXj+0[Ҡ\z%5:ynh{q:x̺7ڐE(ijV-hVRiM/W" T akr>^~soNԹMyv>H/Vr`/N/TGgn7eh́S 3bM6evڟ/6' u'IT1%0b0ׯ'-d'Wd($X`x $ .VVdb*QN婐1zn |D* &ޒP&K"|}dY(ŠGn(poV%&R`$0lF< c!Nc4zZ.z-;XA5IA|i1-4rdMBo)#A0 bD &2@%Lr" gkLCSCmњ\)} 5-MRp,a)8ǁhi>'*Zu5>quSRE6iIWd g/Yii `V"&*E]%78(siWCf^R/6ǦFipӀuDa\gT_tc٧|w]@.F}㤜xJ`MV^A+?+ag:]I/r:|3e3܉ oqѾ@L {V/[† gG}'"u9|;="_|E]rӳnsXÎfc)T@躛uV}[\"  ֻq D۷.-\;{džGiҤkC]]0yD́s"&vyj#p5J,XYo{]01<5Þ.|k}Niydӹ=wSN9a:ƛ^=oi`s<ܠ%9b>+mϩ% D 52xMĜ%xw7W6y;o-?/ɍotia7^[Ng0R% .8> *.(n[_F>oG\0bȟ#Xu(4|W~Uk2u6m&Bvo܃{la&=WtUjW,iHv>s霒^hҢ.R5.:B8~F,{hhŅ$Sq juZ8"/tA^XBD;!%m}m<VcGnFef Y]31 J$"% Eh@KUHdEsmu%> JRDĬ(Vne^11U4QNiH";`):@eK(+Ԟi%@"j0@0 PGP$QpM 0EqA]b"#"WV=*\S*iw@^ 6׺}+!BA9/t}FQʼnZ rAU؅kULFl %Tgpd`m(O*+(H (2UG,RmYZfÊ >[#$W\.iP+"–0H+8WF^ѓLYղmw? 7'F1v:du7_u&n@5X\Țѱ!`a([ɫ*Y%K:f+]oL;A8,_glHغ{}K]vœSSݰk "/e#NGYbtx3!cŷoHrZkb|.ʔՃUW>}+%qKMw-NI?7ۻw Ku[MD=Ê62fqdrxv{/ĠozTv }wƵ Wy`;a&:Qߝ?[\3KL5s9{9hYyTq -;3]le8" Pk(lf^ rDyGP5iP9n@O/,Cyؽn_! ~i˙0s\<1}c*&xX `@T| IM, b 5JC I¹Hc+-+?@Y<(DpEo~@c> Áks鐄PV $)%cwA_UXNBD;FR t^K~@B$ Y3dIÏhku8֊PEg1WOI*2MIu!AIA 2) 0tM y-PqIf)״!O ݃iiSmKY}Z3JVPr bǼ<B5[x#\҉u)s3c]PXa<XrfbAϊx!-\7UK/X:"t3?ʧ>py$5eY _ilxy5+MdZ)'QU-i,D+Lls1E!(RCZRA)tT|XSmw 2@yǤSKRmU+dV^]cJX*7nFG}3/us;ܻzud̥^re9L$q c * /έk݉cfЁ33SSszKysβ69 'Tob51M&V޳g~nvd3>!"W~Er,u#+c ~ǎJq\ Ͻ޾7xϱY)!wE GaXtAO'6)dnFl8 >-"\5`jyV[|!}7CoКF׍zzϷ44҃v/-Ռ5mbi>!fS˚`l\;ww}jE*Qg|=5O}7*+@;XI>J! ley.̠UTVuu5YaѨ/܎w{[5{NWWGKi6pN#2ʷA w>~VnD<[ j ':[ʘWYb'_',q@4S; 3tp`^A'{;@\Ӂgc%ʙE$wdךQC0e,Te8Y7 M@'!oZPZIV%E@-( =YPTi8/FY][:T(":V0<'ߩ8I(\LW,m G|B ɘ"]_(*={Y%D~h>&bG*XÂV 2Ej|3fBs`b& -STJ΁ ;)\%~di|`,]Sg d7__0>2>/#ßрҾkيêuχcjErϬ 6E8CMBܽvkc?nd]3v#pk5ŋdքH,<̎Gq4Y6>1gOqwk3&''9Sl!WN! r=tᵥʆ0]^.DBiv %$m5395k烙"7E9~ٯF{3;0U&Na ㄹ7/3G9%Dc2Qx4y9Is-ʸ(8M' XM\gU+8@Ĭ/Szs 65e9JQЩr4ɨ"5?;j`k$)pbjyJHKPav] Z^yq-&>OŪil nY/7!2=$†Tf5bhXa@)M̀(ڳ$d 0F-BXMFl̤,lRt2)Z[)jDʙ?Riq"0J&Dr_j$^dW#A]ze{/uC_zɥw51Qf<>1Ƣ;FT' uW гA<(>xDǶj[nPY2F΁pl8M^YS6Fwd7ɎWͿ]5u*$㜬,Žk.!W#|4 rcd@.nk]{mUv6iǹ~Ĵwwѹ@|ځO--Wg?)ٍTmC/|lFZ3ޱZKVVFz95?o_6Mn 1t@<^Zxȃ uRCVn?[P Uz_uySopB7g Z_|^bƿ/w4Ӻ2ƾq?" jr5kbȘ~W`6ix8Qd`]J_XM5+m<JɈ3ˁ` Cry@iLn@p4kŔmq zĘb/$j]| ABt2Ä&`L c,Hٞ.|TDf,Xc=xi?'> ٜXJDoU+ZyEߠ;^ WPܟZW? "]9j{>9KYX#W HE*g brec32qo|yF z:xθYs~ Ufp< xçv1VbCdZq<ʞR~{/UP J}rMEO ))/1 mp7: ԙdpp "uCq_A+PVC{k{.K4tH ̕f76#h BԲ@*S/1WTI xfͷyo6xY# ҄&bT/[Pj*ؘ @0ALZD!B*H"qR!EYR~WZyyu+bG[}?nA毛v- qfgɶ)vy ?wW9Y̒#BW=9ͽz{0dldt [s~p[TS<'߸Iϊ򘽳::v^k#7%wc=ыXAijXZ\\9{ƢA*[0jrӡ)ivu;wp㏜tIp3nF.l1~\vn} %pj%\Ik HC`t՚1ّQWh,?I S?W=/*om`Wli & RP 5\i 7e@/5jm(0H"Eu%lBT|2'h+LivUwx7M8}iص㓜/[ fk6;CN_6/ЭOmv]z/>x0;z3y1;Pd}}.袹 Flܚ?6n쫿'Zl)")9=?qr)Wcع|ݻޱݤP1ʯpΝ"B<|uIޗ /ӝtƳHWssS욍I} 2:xm(iwn~hƭ&K~knl/|uv?L:R{W/b{̥+~r#ޒ1Jm O)sd-$y.rz0y\ɫ c֌ N #po[&-j1s/^{}+y~XY?鯌YJ`[sA?+d1DzYmǗ'-\뛿ʤ$58LTG|DRkH3N a4 .w+ 9wȸ9Qr,Ѐs0Y?ì3%!C,Ӝfzd͌EL dPjxQ-mR@A(RFZ @eIMhE*TN!`N c9+|2y?n$zh43xNEɑbBzI!`T'{Ȑ'R)EZѷn)QVUHF DcI> ?ˏWkFA'YE y8 5όR$#ߦgi_ )(#Gi؃8[ 5tilVX*C mͧd=Dc-Fv'|J vB~%/}A1SmTHpsÈ֦OC ԃ…/leZ¨e祙J:iRk$ѠM-V5eLĮi+&\{SOr1 <.[knK'xiiLd5GκFy1`;z2n߯ؠrd5|,x뫫o.鏧8߻Qm$8 Os#\U>]CVg.:=۩W˂ɇMދFr(+wh{*wzy#;n)o]r׾h ̟60B~oe[7z|w)[ގ=:vּ%OՆ'pumwjf'N5ֳ\CmjNvr :ysshۊc|Oƅ0>(X_靉_91_* 9t{GrXk^WIj'f z0^ϑ O3mܡeHR0n̉X;CO$`B"GX%+?CήIpSGwr 8??LvfAY@EX: Q:iQ^IU~2L2 ^0QgZPF`Ar{ւEioxj=C)@4<1 Lq[ O]$bO>0cItͧŦ%w:NEEp-69OaAelj&e9TEɄDok>.(E^ԏm|qH~QwջWt0.3 ķ F'/79 9+= ._4ޅot| _nvƋSVSq=Y4s=uիMN=FN-ZQ>փƼy,>df3Xuq;%znd lAc,zЕpx|()r0*@ Y/s@ui]L"DT-<Fp#pU%-;Ts%C"Q| ©~"xSӯO.D$"Aap1+'H ' Z VB 8*No,¦L< H RJn>z*b~ 87oPl(d2 XiT, zf<԰ HY N+/[)'>ij~UGv`O|ޛɳN<>#ü=߶}>)'^w~i~U6G Wt߂C?zu?}?}oN$YؙuԾ{:`ky)i~<Þ-q"O1P?MJvxwHqpFS'>{DzϻoortGꟽ{cyB/mvo^{$NExKQyWN'?ǧ6տ}_ ?-ƛW2XmEɶmq 0%ب4~WC/^{v}G/}Dm#$Ws@g)3?FDp52 kb9]Sovub%oaGc弿sq"zOjP]?A)E|mx]L3FF{53wwx)F<'7f[`WݕOR i Frʵ\%Ȉb>%WD5Z9Q\'6?KdzlΌldDDB{$Jp1򟼛I'KIKͨʥ>p,jR_uhz+]'j0sPc-3_RIKD$j91@pNV#WT$L)2˜;}tX"awHV$ R0EɹEi(Va9MQ?U՚\bS"RjP $/|GWA- =2+;:'WPmlK@&·/zaUNbH=wV*p83 Xi (TT{ˣT 4Wg1(?M6-c+>kߺ{oQ{)-U^'j+e;}̓~uSzӱW̏{}~W(uIGv6ӅE/g& XX813>Cw{[w*p,Z6$UNCq%Zs5/Xq+jfDϟu6F2#mfgn;ٸ?{߭yhڸڳopm)WSSG|[7ywh+DD ?n{]_={3WuX_5P=WdtL-:׃3S9G.3wga փm=փKi3W$H=aS3YgNV[s]fHrL&uVyYylLB"6&HT$g=nZ0Wf(M 1yB?npGp^>r`lOdee \J0!jR eUęgm9*يEs,ɦ? 2 5[ 47ʟ)R]`T)Ĵ:JRUGY/9It_ʩ".0k?uTYL'PRּҾ%YegVyVLq;<+%iE9GU#JY~[roS-Ug9ߺѡ(.u  2C@q"eÛ h")id̄/R_v5 5]GKhT5,ӈ'mHv€O3/1U8ɮ&i3O-Y vΤ a.μQRJ<S ꭲ@ º%?S%f& jYҿ2(݇΢UKz#VU 4S֜_a]cux69 !3i gFv@{ѽSݣdYm4|jdF~'}RWv\md-9qCFͿ9xu lIı:t 30zi;mdD&zI/L':gWz1t5>Qxdo{5tdXG'c=ohփtI[[L&Yb實˙/M0Gqt~,9Pf323iǩf *5s4EN+6!"NH)V&_ɕTAڸ0yZ&!A2*PѢ27`:"̺)&PLG)pd$(,V9M'AYRT {(+A&,<+PO6kkb=ڔvBXTh[4)q UQ͊xSm>)1%Դ%V /2t7x~*1al5&ŕh"HBőt#֜ÀO +HJqU_Ȅ} KN,_^3HiGjD LC%JOCPl?=_V&RIV( ?h(ԤR%bNgG OS[gs!5*4soBicH H: A&n-hbDrU"seFġėv*ӓj3Md$,]({R6BE"'HjE_es7۳ѣwھcǖ[I~>glٲk;e.[s.Pűcxt ݧ?]Ch󿨻pkdAd4cg[;wME 3Ottޯ⇮& 7$~˭Fx}wܹ}Vvǎ]ow?;y;JuYw}\F>AU߿_p=/ku$x_ ɻ .x-#B4b1kvu0d‽Ol֧'jƧ ~׮$P:ܷw];w'uORBW [5h~1j$A<2?{_y?s[2 g2 zż3[nrsl,sNYWShMEbW.M%F :"*nI>E,!׈dvm3'kGN!&e t[XUpxII'djH/(57@}3D5RNV..z&,OcG"MZ+v,vT&Mp?O yYu5i' Ew"TRI(d@a|HQi ^ nԓ3@R)h4?Kar$F!"N?>|"B{~Oiẝޡa&♊@@a,bB)EhzIH Gh2)%AimQEk1F&KQd+^f|kͩpjiŖĖrEa4H 4ֈYDIfdn@TR6 ^H@΀5P"g_ORW;im^8pÞ='`^ضu^`j`~?p @?1x*N8yɓ۷QQL4N/ 3:R'"B N!ڣ$mvnnv8cyڳ՚dOk94MHyMÉ `ܶ!1&;z}k'.Ȏmݺ) :r-]|{k`MlSXA~Ãd4Cq$}Az+=AeZCYx쩩W<>Gl +K /ٌBm=փX C8c3,zɕh*J|ɍW@`> Jz4d/ree|*pBn!9vtTt=@ꋰUBk* IOD`$"0T6J[?`u0m $-= SC ]Fla4x$ӫ kqҐQTM@%I Lbe D ʨeەRJ/XQEt/8Sj6P1_5Ucp3:;XdX;~c絇ɝ|3i[.4CRzhǾbZ\:v4+Uܕ8tXdV!@ Ws}n?XaDOI۷r7큓ǎϑ_Ӕ#O,1ȘjaHJ=hGݾ޶-L& n=Hin073T V3Ž={nvUHl&|M{!2 3׷;g~ꓘ#nPfMH0;c솒2xVCNv>|C#mg¸Y2q%bp{>ao/g͟bq{b5ta3V4.q XbSfҶD.Qzp_aztJ5qsff\f(f}_i 31ܭqMt 4g o z)dym,i {fV@W!n$2?*/)/#۩8rJb?Ta‡U,W)1Yg *R} 8A&2mFCBS&eQB]J/)5> "i J`䕈 腾bqɩY/&Qt,V.q Na;M68R`){݆Sulr@@qz0pBbB5f3 CVxZCj0UX,+QE&@&TBF&B8iQ]_Sfpƍvj 4F6WnD[fO4v' N| }{##Cn˪OfzlMR* J׬!sz0Ǽx'+frP{-\u'>}jMqm+u\݌׍7pcѣGu Ofx|ӟu]wYl72˓X4)ٷw/o:ۺ}̀;i|{_bi[Щq { >ɗ^O^1!-%][Pޣn6T] tw> d_-nI/M:257H_$z(C$dEut= j00o` W~L٥ /-ohin@3$[o݆ķDeˋKG?I/)k41=a؏Ce؍{_?u+>Q&Jx,;Ov49v()O}{;r"#n!;Fvq]bt{rc[БQ*/$?`'F%6G׀ͥD$[yvt2'p VYU r9M|%wquXq1x%)p?__Qփ m=MHaN.ztJXH9]3 ~N[ D'&/\=2"9E a]GX7O'ӄ{euO#ploq>w0'~5S_9De7>)9NVj)k<)8ؗsźS:p}G/Xhb2"G<~:2<ʣ…40T7rt2FW$`*ϯ=)֬Jcu! HJI:d-MN$6DކB65)ڗPokQ0¤J \~ waO{LTV0X4 H ‰|p: Ρb TLy,3b^ѣzK g&|_G IYJD^M%3je#ȍ[c-eYCcׄ,Pe4(ˀT%-mBEAAӫ/#9{^2Ŧ2uP_TSƯYWne8h>_;䯖7?z( /2[xZܜa8ypUc9Lsy ̯ƅ1~YTs7e-IQ㋿Z9K>푍xa'vq(;t-No:8?ύS2xQռOO-=ojvdxX HT=ܿ&d}Dayqy,o^Y3iY~T~w>P7ٞ,ϼ#[* gk.S*Za|qyO@b?Y$1>1:X]{z  $1=85ի'߹};ʜHɝ\8r} FGtmH;G8mWP*8\Iu{.vNQ/"H04TDÀ;m89DNXk+ W|lmDij"7®"-dݞkzSщbpQ򿌬ކjh24q˙japlal&9BnLPERT\)*kP0Qp>Q&ȄS<{tNY^>Ȝgev)wt"\C8 dV!Z|]=sc=hػ̔0?}c1/߮]x:Hcl#nz+"\U XSSn[8Me(EeԖ&ᾝShʐc %LDeȪӄ<$b3ODq19e J&@/dG 29sbVJxO)] F~rY'p)ֈzU$$ }b=*Rv >Y ҃ /5XC]`Iip%L4d|jx2V2a\ި$^4C:S%ExZʢB\JƊh\$̦gd98)|D-6LP 0tr.eƧI4)0es=LHs>aC]Mh/ȣt$5 VHL3#!U䄄ce3ฒ h2p LJ0 A0 QtE *+ R"tvH4IkdM۠RmŖ P$+){`7j oV2$g OqTrh}4>毴JX2fn (d]BFC_YΩD=NG?;~* YhK@h'vE,Mmw=ه diB_$WZNMdT<;R]xw"ʞ b"`(DEn~$ gO7SSL.Dzaf-`%bZSd{%AA KGfn\ܬyeL1$1m>IYd-f_ r"ey-[+[C(}ɺ]]FG}26+eɼ pq&4ihefq[p3|cvؙ^QL-wdQ0;(ԡWi{8iE<<ť32C @,5T5 FO&ﰅ!L4 +ژ1|ܠ"+ctALX ?a ʿD̞;(i CpZDJ@ ?URi"xu>TLVϼ*4A OgEX,R6h)c!=%}ǗĨ :2" ہOԩ7Mf0sRfE6Ȩx[~0V@J"X#H#O-ۥ\) Y4)b&QVt1ϔ8̒ ӃV ~#ڈ&U(C HuawA"7WH0d~S7Εz!II:+/6*7%)1*5izHef9N>J V[*g/2ȱMTΐ l4WZ@n 7jqW[̅J\L"dB4Dr e(v毚j*2#+^ln,ȍqx>kA #H8 us&R*6)d 'ΉgJNo?)3L[x2%j+x玫gvae 2YIr&\K??[1FrKoħ).t8p4W4Op;[4B?@IDATCCe*D 8!3ܢU&^JrLȁfH̓ f5! ,c H"pzg_>hpxMCK CEnA=3:L(d}S}h :_ZupiU9w3 [uq8DW|߼|x׸U[o`pփX=T[:tփ #B\'S: +'q*7dόg|}Hxs2A)XK+Rܛ R)E"mC j})Op%t(҄܎4s:(؟E5 DĂ D&Ws̴ MO<i璂jlA1 Z!h (Q"B 4 ZߑjK#Jw`Kp {Q!}8$0K2 :-+P#(\TQřQ;h: &HxI irM ODuì]WҀ\JK,-^Yָ\eli픜eF}B+:TQXpRwPMA J(É$ t҆"6vM-Y`~dKrN*T\sK\sń *b_{(R1DdK&¢Jr4Ŏ(ŝj\v\htxqL";o{<(Nq`a\.3 &<4R!XZD K5U|9_1XsX.L/ Faǚ1bXRqPǡ 0owasqDCc3XO 45JB-39 S zLhLGA0)!4I w P!h @2X\$ܐ_8@d 2]Bmը%5:{nM_1  0 )39UTWFK__8=3Ͻ P@?f~hK8I_ܧATH΅76odz@)&eMH"Ȉ"Q@i/IY^[0yR~InTZ8U%iDfh/=z%COЇ htjBBdB~h1hHIr%/:]N B~v*^#94WixZLlt "M! Wik15{f&Tm9]+mC _&Ah]QTb0iB!kI٪:xwkD%+xi].2L`T]0"KI33}43 (Kyd"=[7WkXq@ \W BsT4 LD6SWZQ3Aiqgc[#x8*' g>$kqw/5SC8҉BOzP?pQyR>Ց59hU^ot)0&Cy-_ڰV:2)p:ZbݙnO)a[oP?8N,rpQF 4X5 ><!.(0 c%S*_8JX iO3F% 7=xuEh'C`W^9-i@[%1XS ֳ7򏂃XoH_aD/ 1? BOK^%) ^ –)trPe @wdW|ޣ=.-l)㚟B!W|:LaLNd7Fˠ(Q2y;0ME𣭙(ΩO=ϰ2bTTu#8V$0Oc"(k[nAHsN]hTi{T )ʨzL=ϟ/K 陁)#IXUT|jԌ|)ONl1/p2X$)FI tPIVavQWv՝WW c7ռ^X1+x'9yw ^r0kZU e/yc5WM[vX y@lr[qxzE)+aNNa*~Od2vX <,^y' PW5H'`I&=ԈJHmc>iV[u8;48/63u*@uQ'/5s{5'$KeʣLN9r 7c| UyJۋ^W^Bm$,@NN33%Yq uqh˲]!*$h7NXLv!y~%vW%5 w(A N͜BbFϼ yT:A^,Ä!.W̍C;0jDdB &I?jWaIUHPڻnt4S#4[po?Vt* I p##Ħ{ !Fݽg1Wtsz|*d'V(bgnQLzify2!XT}hU"A,pn;݉?r.Cgc>x5WG%t0p7t|2%| @5sq7ʃd[ J-'jP"NRMskAYDoj L1:gĸʼ6WLרxcNNŹ3] 6z*ӴzF\^`ӖACQ5*($C-UnZ@M|X6u45 rn=# Vp\%D\~=! J?@FPH+ DQgp9eThjS)O"$>(}YaB[r4)\ؒaiՈ{;ܕlֆV*6rGjE# Ȕ&qYǎ ,9XC-4@F5a'koC G_5uh_6ΞʌH$@ķ=R~Zaɢxc.WtǸeL3ͷe.rCB`F|hڬH[R u'c!C/"k{1a"O\x- 06'0ϧ+SfZ^ҔL㎤ʑGt<{vieU|&NTy!%~,;_5ôLUM˔Z!Y 6(@FЭyQ>$I졪QV5.65-$UD17r!E8p+$mH!,CPl]=Z݅5͍J>0xL!h=WAp(Vuo/(D|EFIӓd$"Tx5#2ZҩuEv(X,dhUyp/ݟ qisy "WCӞ94(*8F@@<99xe`puvMvaFZ j'q2㥤G3n=2Wr_Ŷ+iFUyx3ǰ#K˿+փx03l_ ޤL8P uzE @pȈ0P⼭tʃof_Rm=m1#hSaQ[0֜Ѝ!QĘ$}2,Y805 Vx\o;?҇ji),hӒ΅!`NpfHKF}ArŸꞹԄ?f9 :ў`BG%rHCXBKVaJ%vʍ9JDq0*-~1"%Pt([Q÷0N%cwifN l:yB#vA,' A2h6 Y{!E@"mtAyv2I1D=Y͊3\h%گ_R>{5(T~6ժ3Ã) yACgK H&O8#g)wLoh 8Q:f;XǕE4vȚ ?[Nf; dzFKjGl˕~1^@kxG~| Ds>S $ƒX$ VsplP~>2jD7Qs60hz{V >?hh._1$kgRp KtnRpˉB8J %RB w*RJ)w=Qx~y!{_*M0'> d85bLg7RP$ UNGGrZa ˢJ .`!] MFKQ3X-ݙRq¾ :bzlSZUMXә3cc F 2IAҦR] ԈZ"O Os$/h.2Z6L ќ@N"LAx!=2 a%$ݨ1+p_Xyx2G2q ,JrdHUDƌwmkkoU<1;ϝ=da龶TXvTSLm=`g&'㬊]JV.˰njPc%k,cRիA3L^D ϝ8O)r)\@BG!S*xʤVys(FR.a3 Wl$ ^!\CXOk,%Q'h@~JGYyJBP44ej=nJ$-:@jJ`EFYp˞QI#wRU.מFvѽPiOօ.2Oķ}o^fmƥfvr&SŅ䀁#(]xQ^QVU❒ 4"f D, F|M9lGD B}LG1Q %ˆ,I[5J\4+ 8sˀPohBq ;**/MӷR$]BIN̾e)>H~{!|g<S[Dl.h$MU9 hiWhl< _s=|3 z#ϘL?=5`D_IVp J ?ͺI%h̀@ J sE]:X~P:J_ԭpΕ5XNlIxl_se<,6*^@U@ޫq)!Qky> Ij3iv9EJz>{y+U$GƩ4 œdգ"n_ɔQͧu!uC=d|(L:Z<oF3T:dj"Җ7 WϞ/Dn_ AӧW+B P޲geqgjԡLPAikN++hUeql4!Mh#Qpss0IϖccYmo>7323cHv@(\Kƚ%Jփ5mAtփg/r~ Mǂ),fL`ÞeKj&@ |X Us~-K60$6,X m#2G 4&_qTӳ|ұԀs'/d. #¥-HtpA#$uI2 AfJ1f*DᄚWSk4H)r C6!)E5RӥFPd9JpW2Dha)l*fwTr$?C Zn0觑?Dxrk7N4 %et]ϰЍ2Ja_Q$4z0qOpyI=_]W3hhM3.-ԓёMdU0d9{)ID^&pr }uNX'2% _DX<3`Hرgжb=Pf yz ЁpCiVZ=f@F<7ځv"񤒭K>Gzzzj*UW_鳝tzr0VSmJ`U@-rddVnUZ|eJ==}!}jޗSdiփm=8 ڹKʌ3xs6@1\ ;_XIS⩽Z;`.wKx3U)m XJh-#W=֏PIr>!2NBȠJc8` \؛J*&MUY@ɸE̞ʐtbܠzo/!2 -2#zhG+Äb0E<5f,rc;$R]_uet*S4$#[`6!d 8ͦeQL&P˧(RJ~LR^!?ۖ-)DGeK< 0C: Idϭ&dCUr|{ w?7:!z7Iar /L1DCb=UzQ*)d$#F[M DA\iF-#$߾%j2BFpPBQ<IN8jh8|r2ehԚR_5UCWzE@2Iʉ5sk&_U\f?b_oa^76:I .>tfg^PcgFBT%~PEXS@m=PK[j .Q{@\;CʇpN@IC-k`.3_@>ܹzTӴ^AY1-@&7/q '#'&K\G&ʪ\`⧚J쳐6@L;+߂ vyGxh|`T`'qh!VcDHՉdzJ&aΰ"G $Uc"h:)`%B%5C"h՚S2$2TLpӜ &I“B'I$"iv`L} Uf{IH y, a"5 '⦆ HҴʖ)@L7ڑUDuls dC:DR(I v<17;ɌI+H(L@4GtIð#«tdW3 jΚj o|*kh~iW|WNA}uy8m=/z`-\߭W>oԑzW D Z;3lZ -hXil$uf`|'~`RG(E]I&ѱD.hHjC:WV+:Ite)RJ*"^ ֟h`D|/Ur@w0y H*)9y khDs|&3/rkM8#5)v)Z6]J`=7Ň{;b0r)${Z-jD рQtEɟ .hc"pƅɡ T RTzVUlCKSBFb݇eluRf *, b$p#'`2)D#;|JK lNjLm $;|amR4-O -C#pj0Y<WD4NgCc<\[B\ЊT2 ;D.p(67+OK~-Fvz: %C%(+OK1( Gbt6Hl+Ghkϙ ox]?2(+-jU2Gk_ݭփco}\z7_pHzGjA&H0m=h$/ #a"JϨz0&}4h-i5̗ړ k1##I,i!zW3W^HDZ]s5PANF*~*e=WMrȨoyZ)"krTR"shA拦j*"-T#j}<<;*1Tf[4-=ö)Ïd⏺.'J,:ɲ\'-h^:jhA΢1f*-LnYm$8t_\d]}z/D)FoXV1a^AoiVCRIYripRwFc)"*)h+;15ϋ:g#[d7)Or&O +IO Lu'8{SHK1Dm*=8 OVJdIbiv; }0 & 0"rU(KaTиB<Ź Z)z ȑ \jhr믬I%9dU]-jUODQx{tuE>ңtv8; ,yqdm=jJ+iW.NksjY"gbdfSseCRgr^|fe㔝 bua!u3,IXBC`p>\<(3&"U1Q b Jv`m,ۭDa 5T Ʃ'3.|0\pAlkQhI4m9[mwe19J ?R{l˯ KKS?J0VV%K+P2Z x c DL,e/$E;͍w@Phl%1M] j' 78f6A(Y(GRD;Z)0Y::LNZĤ߸ "Ռ^+B!J9Y#Ʈ1Y KR |8B2|Eٴ$g/\_Tn8C4bIdcVή4!ə)&M_զ`b5@,Ė>^[U7:bd*:`)5~.b `&ky55#FLxJorWv/bTW_5oO r>.- [e,,,/<9nEg02:Qs޲WYhJ2-j,"gfe@,pc[_;Nyfփ.WjMBփm=HEewޮt7C:v3vk^o!pIp9_J1H CR XorRAlH`eϙASs:B׉Ej18yQuAlr B)J0ٌt /_XB\gKD@Fm樵2` oq Z'ͩ.uZ"Ѵ-R9GuB(XW+Df+ިEhPʐ2ÇiTF^ 2Ԟ dº69!k<|.5 ||#}OMRC 4@a2J(؞ `O8LpZB[$*rJ.\dqfB;?R5ʱ8?ʸkZ>q}Ƌ3{Ez36S.nTՇnLփkk[b+enr212o03ÐM( l ~Z&& uS7 :‚CV-:f@R#DOXP`*Ǡ "HrH:P)dIeo`&fM0$M\F8H5YaΦ?LmNHDpG).[lճIS-j A)ox'q R g E _"r4 zZr"2ʡ2#`K: I s5ͫ{zOUo@j؆\%yXA0mHI:KBjfz 5ɕα<vIX,UdutTY( ]6H"R^ 4 "5t5pvwSKe0I$H@TFo1 ౒+t$yDB)hKzYd]Xd+>Cӓb#-`iW(nV0UƊA"ll*h*l=x_ZxW ? r_zK}{P/ ˌG+Ր+cLX#L׃cm\u{;rKM 6`,S7rTVUW_@Y{+<Gs\N%SoYun7Q Y8e 򶲔dLkۧΏp8+z4.e{S OQVQ@ⰲO͵HO6ō\I7%g~SxRaՌMujlM8]B u, 1O+~$bc/(Ea ʊ9$+hӺR$~+u.R I՝ᏺ~ Y:"aӖbH IJjr(S-4^)/3cOԪd}i4(p]F0H-x˵(0iCOh I )TM)U+d(`VS$UN &(treO}̜8,rcޭɶ:̽IhDq1~tDtDSCGe9rvT~hG#.H<}X;r!2s9ck x4oK MMm?0+u 8KNtNNe`.˛ e ~CHp(6#.U`ۓ;NcW[jYJgYLu+8Wݓ_'~c/_x <~Q V|T) YD+$eWƊsNk ;D5Dmk=h/Y{y|qc'Ov#]Ob~ѸMGփյ\W_V^ٶ(NKRq %,žk6&1ΘҘ{7&tjb@'%kKuV"njPM.P|"nF'1\Ip'Wksj1._N1^.ZcCf/hCs1eY1  M{NSeL EVr[!St)/t)h22-h9w@AfP4#*ۣdRZg$0BIAWltIt G2Akl$s([JZYЗ(~u*11g`ZŽIپ#<ɍRzޓ\k㲜 ؿ7D,Zŕ"ЕlegCVbӬirDC)AF!>$pr2j?Cdl cp@*}vЀI*:(^0c=mc^߃M|kCWx+;Xѵa+[/b&8y(Sӏ3_~+<E#̇] ˉ$J_V|E0K]uAg/}ŮgN?uz7clzиo2iq j9U`Fv=9wC#v]NJDt.V$ *kp Y:J[ gʼn}N];,{ގ7'U K$T cI&=㧅ȉA %y&CØ(A23FBjTEO#R '/;%&mT`H։3wکa*pQ ,7bk LKZY:eZɩ8(J6!8ea1=@s<#`p34 hdPT 8&r&RDep"#m~@BJRȯ @ƒBUk -Gu#m;3ߤKk󘫠%HH>I;׎;ՒO[q48=@?#0T6~ߡJ;4i &H8rԮ$]*}Y}}G ^ 'U@ǘb@p$ZQQ\&8UNԲx[ ׊4|_C^jo`ml]<Ǟstvىp;=pJ&fgFuUzW`S^+ ۭjW k=HA?[Ir#p蚨3+)%!:{۝V_\[ *ՇM!Wk=\ajnC'.'#vڜtn-tɯ/l?t ~Zl`y$$u(\+ ?*]8-#E7@ " jRх %,ܩ<*5JuۛWQ(}$@1Mi|"vt&D>O:V2 lWrFz>3G4'mr8t0妧䋚A9\)m||AH/l7@S]KGlRu/m Х e51D E&aXj*ETؗ)4^$VhYCsVCUNZSo( QHS螈Ps]1y ""@VʕZ^aJ)2\CabO' >\ I[\BIYM"-z}[^Hu]`dJXx cݚ5:/1|O) sv1B^!8 F?@z1vVRS7eb7& /YP2!FX@dKJel/s ֘Z`*"HyoD*a)DMPTTQ@K$y!elҤj&~>x)̂HvV wqϕ2;`Redl+Vi+ľ ԉ bNi!m45dR;C5TyRѐWRt#}PEʢe) &K[8#F+9׉Ya@IDATc P>kmӽ4{"lD\O"c5^%D ڤwBL@L ~Dz/'H^ Ӝc1 PCYj寖? z||C\«x x>p9v?~iHz]f鼳&`'iC_0DC›(~-ʯ{^o==Ǖ_?q=n->0&_#s'jRv>z[ij<+]ML5 qKXr:]--<8w\S2AkaI9jafH/9hPG%F A/BN`3nJCTҝ,WHIpB-"pE'-zA JLPdƆ_Es~Ew¹&ńf"$MXcEYjHyƟF'fI< '!;m"lCkpjuŞ!<,#(Ncm"Y ĘJ%'i;RZGי]*^@TX+Rby:#GO\$y.L=ڔxC'-7ĽRħ9GQ# h1't)>=`a8>>oVvԒkZ];!t[3SP', Ҧ43 P:m=FSd-@2J2Q6]GY\5{lňP֞= %ʸW$;_-U/xW#^wS {oo~Ýɿ?C7!!"_jW+k=h:ޅF^t_~3yŎk>zķZAW_k=փCheSxŭ]ۅo 73>(L`Qʝ)+6LK&be=|7' “Png",ڄˑ>Ƿ#<ƊF!a\ Su@dF;_VDX8Úʔ G yB: !GL=Zwe@8 pHU$'|gǏ"'+Đ-eFRQ9xM͖DD_.'ZlJb/xʊj%!Ǩ-Į;I]ʚ[ ?e ۚeNw?[j4D.M$i6BJAIRI ʣmt#9t,;VL4@23V)<^ŦHS(%ͧC#Q!Y7UC|2B l82#=%b@L=\aKSTYSYۇ6}(?ҝD)GtP 'xnv3BO4tZ@VZdd#2jX·޴Zj+=W_asnsu+^Tl=x&ɞ/<9y'{./4%_ٷY8cǼaYiWNbв+ cqF)FҌʿg1k.dάXI#{RoW|ۼ^Z'փk=փy7zpۏn[18cEҍHFybI%Ɓ't88̬%?i'"`ZbCrE#S<-FD#"澢M:Ds4 $6V) ƫȑ+.'Qm2!kcK&TN<*R/Xh()y7WT sA&ЎfT:UOpM|s(+4U8Hrw,Wh\Rׅ=DIv(!{_h*ds*cT ! /~l)Tg*6챚}ne5 }d;:]kv4a BByj^76L vDQqpBtCTd&Хϙ* C!d@y/DH7. K- _7qGV?t]F  ZW +8O7:C엿Vc'dCvv/U>Đ-_S0i892q,v(B1Վ'>lۍ`, Terc@%#$h(^<N ..#_*#=Aa N-U=:޺RCGW͈فяCQmTٛ_-U卌[YtS3{p|׎Ϯ/ k;y61gg3h3f_jWΌ΍| lk ̊ɿ/=Nϛ {21Zn gA~ Xx"itt~WGoR !켳7BL؋(ʯǡ` ?}0Y$s%YN%xRBgYn<+,YAK 0 A'&z"|(*#4*|Y1OJnn"P4Qjd@>X.5Y djDy 64Л!Y~ކ@0ħXQ(Q\R\fiJ'q9Y# G@<-b& †aw) oJQmAIAHq!](sITL4Jlm MGe'(Vsu $fxC_t)'9,DBtN&2ӹ[뭚2xf]OR : 4B ^~y:%" G} XOx,XU#[&b\*m>$IP84J3R%mG CZn+s {=ҎQƒE^Js-U^VoC8LJ_vNoFGe'ל7&XM<9n+3"N.Ѳ8ȂB-v/1!Xl\to/1dDNޤ - tH3OA89kQ$ $KrQ*:R@]'Uj)5 .}5%ӊ&8 `bzB+s;EhD)* a8rLTms*e6sq ʇ8R@'qMku:Bʊr8iSبw@NŹ]f̸Y%}?@$Ct׾*M P#qu^k4SMTs>ʖĨk 2h!!%'Kr4q$.uM͕2q`ʪF@|ʐ6eylqWJ ɴ=-ؒDAy-È =TiЩq?p }ouۖ-y(Cs@qK6Sc6]A=Z:~jwGE tǭG<8J&e/Љp0כfCO4:܎k ОBi_A]IufX,q=YZW &%J, aoYTnFEDQjE,slYL-TM\%_YvZE$*q (ī(SM` b"`#m2r$:dWeK".'i-}80|K$I *8<",MEҒuĢӍ ņxFT)I"Z&ig2Ȏyi>ʵ/dDGBeZbTTTO DI4T)2 p2c{ck*97V&w&W*B;eW\LVF39NjRDik‚ȁv9 O<@J$H/$ m$.e2GMHdXm+lxZA_jJ9寘8:z_΄nų+f\]Qao,\&}w_q_1:j}kSksAGA=k T*Pٙ!ht(LIX%4Х^. չ5sЫFǯeD9? N_Rł#%7X1c9k2+ kk87U| j(Ρ<~ð"ɇ>0;dmZM{%JъGLJqIFIǍRˈ۸YVJJnk8,R8OdGhݐ=HW寖3_qz`ݯ#dܼ['8\54C,?f8me0 WOx}^xMq(a_}Iw+C_C&#gz0N\vuo( 2,2r-?mrAWFeESaٙ/Ia%xEdIT)IDVA9(zDB &yswd˔ 0yR!՚ IkP qPF.L@PXaHNVR #Ո$Fď~w燹%MHգXVa20ֲ;o$PnK2W1+CIuZN=pwݦ?B 1)Hp2A@c{B1̔q"C@c51!z(zZn/C7l91 _91!rWMY_!p݅xS9η(G7~>=bZ-9D 2nM6HmORV/ PrWpf*Em_;HY~挆c=3c&B>[9kŘ;|P#׭o7X@pW qS0Zh`ݕL /ʚ n& 7Cd3Z@\;(.i)RQ*OL$( stan2;FM0یڌ`mGwuoY+>Wݩ[Jx'yJ $4ĵ;T6,d-Yd6 7Fǖ)LrsHqk92Koذ!@a+nOXYjmOs4U8LtS2U|忚n<[ۏ?{_׿ٛɊ&_M_1~V|e\);k=蚕7q_\ҿ{};){xߛo|cٲ]lsu66qzlBk=hǨklurWۏ=RuuK+))p <Ą5}:h(_ǛzL&;q9eRyJINX2,g$<2G"{n,v_PC $/(PjՎ@$Ԩ4hϤ"g ~IQNHe3mhE8;nݢ$@k1FzޚI &LCd2%O27!$ac6ؙCRkT~Rh&X Ґ‚;@94`,i!ԲYTL"uGb?ƓagWJhK! FJ'!6DZy#R$VEG^-0V!NV}6 r(Ʊ\ŕ #03zD-DT ckYcO.Q*Kz{w?i+VhIr^j+:ӎUoY1`s0F_8wW|tl44t=xrT^'~ᆪW sh 1~_V|+Zq A.S5&W^Y.^IWSN.uDuZz#j֢ N§_nݺݒwV6~ VQT@@00?co6Cqr0ْToW,vqe c ,aN :2vnąWH4&MUۉ9ԭJT F4B"!L"K >o8R4:57 ?Q \B@p͖EI/HQX RLBM:pb觡E 8GZSŊEUE9@X>Ū?ݚ7`ɴPvH#)TkMaT(P9| `L+ ;M[HV#(BH?fwk+V#^ @}ϓlAI˶?kPf=E/1YdUF+8֋}B 4%7HĠvLr p@4!O]CV큤> Oʠdz ,R-SV"'S1{LZTXT*( RƭT8Q寖O4*3 Eb_-CbWD3_80|S_tu1S$v{1_p|t#.~S/Alj Lǹ)n+fr;`}jWv?FGs^+$SwCxڡ`kP=QׄF+_ sXqz+=N _[L;Ü7 O|5$eRsc(H#sYJHͯ:^`=Oݳ,G䬽WhMsկ #J+SWF.f+=cDVzиu/y~y'IGCWCIx RS8N0 LTF(vֵ QH c,h:8m2nOfO\ؚ}/TEabWR[#6N5:$R%G{&ljR1Ql j~+Wz ]#G`W_%zS+uM[@Ѩk Ir9n<zvUp'|³|l֦ʂ_mGŠ+WzhؙZOkϭ?ctÙl&V|+:W_"3@g/+WȽ]9==8:ekebj=ZzɈ^փN?o|jGMZ"4+fWNa-{4 (ö'ü Q]Z H& ld烁C18#6X1c`< ?-G]UH[^ydy(J(u* 19M*@D"p(%zL|j$&5p{╾Gp{xQ@CjdV<5C=`i$s ZHr fcK ́'<ku! Q,5+ri` y0s=Ds!/5DI!M*U@)*:,EO-U#%]N煂 "^jPucp{7i"CkK!%$:7,s&$j BspF@5#ϵbzi}0lC>n+'K2s|r TZWڃ#[` jaALDmkmABrvznC㴓_˦iZ-_1mSg,p͎g_s?ݝowpWK+ZՄ [m49jPGZbFL?^|>gkai[КרNaGIm%wk=4k_}dZ70w=lq&2AkQ_!'0 o90uyÇKuK$P57`::t^VjEEJCu #YRL9+[AV^ @69@O**"AWd2%Mq#SOD6_vKR}yGB^IEP`'~yM&hd) ![kT@"v%Bf q~PXfR$mrr9m瑣 KsS"?amQzUSR c0QOl6j,N#=Qa@mCF:eHOuehE,CJXn_3kjCg]Ad]C R#IOQ|>JQ #*/C`Aɥ̓uG 䋧 !bGg#"*'9kXJc mAl寖#,n!נsc|(ϋ5$0!g Y(Bu^=LO"? *\t6$8Sb&ڕKM!MsZ,rS1S9)LӤ炣%%mq+ěf2=V42dS2(IÒ3oٵ`+ZhTT8 ӫ fkys1$୎ jeyM@ PDl cam]ZʞhDuZB~*ǥCwMzfF̌G޻iұ3Vd * 淞 WOO{?=Lp?8b$͖ɗB,٬=YՊ+yAZX8bFAϋ_{'M ߮0Wxx=0[Amn(q[Agtzp:JsuP/*uv|\vaN"dZs!ҧun7][mQa7Լd\@ҡ4eJ8j(DD䬣@ݦ /sv& SYi*IG *=(cSb rD)J@چlĬa(]j?8!=H%Jz|U$H*:=_AR 4E($Rh2t Df`QVP*'OBXd7jfј`2,]m5pvAU*3Oص JD\BKynYIc q!k洭4"E6I5ϟ=7 ;֤F;4%a屏(f8ߌBIBBEzdײF[;o=NSZt  - ~vF#h2|t] 25~*TIӁ\*6_oyza]{寖*?ψW7@u)>xO?}}~oEsb寖Zjiu!cxG$tFz26ymѳ0k=8f,_XLc[ j F/>{۳n >\ÓEvߑKAA#֡$FeޞY0W&Ob?_ eR4k.DHZqό'!L&(VWtX2m~CR(9b̭f$2 \`,K%A.E(QG~B[68Y:;{PD4E+kS6`}6 H+c1*0RPᮢeOܘRM]THtV*v㯂m0֢<:j5*w(fDa6\"zMҐ!Gwɕ diDζ%1AH17 q{{qC* mVD\`H 1ʹSFB[DՔK 7|=hYͱeaBI/+OU>ΪfIm$/-݉N[p0Z g⫙G~_@3 =p|µ];}mُ׿/oO~Gn9E:6o;56jWt"*~W|_wky!iZSZfdQWk=l+ݾqrL ] tD8N flReRSLgяpod#?\[dJ*:dNQ+-!=4<BXHd7=wyE楐TPL>}uX%K)J+-gfl9*7$U`ߞlBBL0SbQ]2z}Pni@IDATmrPe[ϟe&qçvV4nD.dARItYjh[/fI+k' уfw`?cr* GڒLVX SHV'@,9GP3m]ɧl\#Q&'|m`H'e9;f4Yc[<)i]6& d2jOP1˜ԘMR:0 DlF%Xp{BR7>; $+pZFSNLTCۇ a7 R⒆GeriW_'_5j0h4DZ.c+,sWvF|h0Kwl{~';xO+gO_x)_5j_y, N=cT\\~^>?8%&~WV278XAf:Ȋz0ѵZ^c5<7a*~+8] {$}ZV@ƞb({.M!kb蠑21aLJEj@ZึȺgSb.`1}hr'E; R 0YC-fNVqdIwPRlT-}`Շ|\+}t coM"K=,VI$ F2dġ=[g˥%Ƞf-}!dO72R)ۺ=gt__1'T S/FNxWmz/]yu+WSIn+&_-~+;^3zdy :Z iփ s0zХ+X__Nj۷\F-S~,ݔ9i;W5|x+ 8@6P$$LJ̛xC]QxXǟ(j3.%(RpͥF;)2@}{\'٢tkp"Ê2d},H+FCp@d`h!FC ^Y a<(!TcfЄԵӉ76H( *;F:/0IBph&l \x)]]dA֞!Po igZ~Pm0KIIԡu(/)Eiِܞqpg,m2lZ`QH\' f%rn.Z#{IY9-/XZ+_u=@ doӘ-N!Kt(QMyW$vϝE(# BvAHa v{ۯ5S[Zـ]1P&IpdGb~G)+?L⧎ǝ 26fAT%XEZ׺!Es8|Xq/ieמf51 h)S4"O)ۅ}!T(L= "maQcTyAiTÆw B[QJSA @b7&E7+)`kDˤp#I'gj59=S|fkB4zRO^Z@-L[DQWRs-2桌(-˶R=?ZLJѪvxl)} ks˲f|)6C4rQ.Fu_!d_eSզuk 寖rooJoa6zi|e՚%XwG?!̃{wH}ޯ{w ,7};M: {V|pT n_\ޞy]w+טORҀcJ+zǵl q3y΍#ة?-]uO_{1Vod[! [B0::bxn00) )F$= 2 }᪽w-[rU?\_fab3Q)xK0)(?%8 !ˮB~ KHNp 7G|{2r׾n} Ow|jϟO?܃7HJkfiW_MXq.ӊ1II[i ͻ,{~ z8雟zϛt}-mKXmd@qoG>(C&;rOiHx4v[A;Zyo/_n7Gkk\` ~ok a{q>*DUx(Iv7M2r3F9 P;iO:) %uhF}pZI(bz.*Pߤ NS !Wlƚf"il Ce F[:Jj 5/`) ęj>Jӛ"BV+Ьm&aB+q6Ĥ$Aa(JRS#b{ R#١B UqEߑי ׶(D{=vk7B骒1P2F% %mBC Z΃*N6@Zy@t;eHnSdpLHH[ , p(؏ C9381 t Y  -^`jy_' @Qf/\R,YCZ_ʱ}w5yu>y $VS*7Hh٦1"Vhusث4)ֳkj{tm?Xj寖Uzt&Xe3"Q+WUٳ\nytG.{tϟ pr~هnĝgie+ZՊ mކʐ'nǟ{{?ņ7so&YDK ;z_njLκjփ.hk=Ȉq)Ju_}v43Pw݅FJִy%YսA"0_p׆(Cazz(%(%Ct/F*" XkPk,T`d=9 By5>8@R߲c={o>AdE EL vҰT>ieA3PyW om\ibQ7į|"\lHh NU['$KJ8 XM$.R4 a# 1ϒ (kd}%ZF`dp );/o*!U$dqqBу+jO\/ ) A"mFVZ eBX#$NBXvO `ZkCJ*7>-mH@Qf8Jj.EYΕ8Z:[36Ɠzp.v93ȉ Ynil'PP Y,Б$ZNrRwTh6Ngz寖%bH0!0}QqN +WNNwcAލaA|}ytqy586'Ր[UWh_:{ ^:  />>?t?e W?EuXO n  41bG:k=Uzp_u}RY3*_RLA1B fypŹz:^!@LP0ʭ4!"iA؈E(w ARzO F/Ô#0#R&+32'vv2Hqoo`AͿR)֌%rhf誄N")! I։ CW70D !LZ}(-00Ih4?#)40),6裤iIלP;~#QUEܐ%YNg '*d) l8!@AQH.goA(<+ONRRS棿p?e7̳.!&% z3wv]~=Ctk>}V*bD73kTe+;D ]T, wRWMhϧf+#aC15?~b_-+$7m`UϋNvlѻ?o_~?6V0 Dkh+rփn8:zϯuo vUp~)0iփ̡#7sա+Z`Ʉ¬vnGnUzW[5ÖHek̺\!tiVίhb$pW^@vƎϢ&WD1%Ō;|T فܧcm̻̦.rr6T O,3JTV;))| 7.y9MJ68l6BER\ǨLT!0n" t((@l%@aK _6urI%'U ;(HɶMHm8T] BLI}C2{'9<""}XJS!d*絸LaR]Ɍ\eܬrici`gS(psm&v}%02+4g*>bDY aUkYy( t7dkPI-'Z䄬TMz-;KQWL$ !Zl*ۍduW.tt %eWZmI8J-?SJ+/U.Uw8vMwZj+%|NQ[ŃLg8v*p5:/L̬Ɗ}ö{s1=sW^˻;=xUhr"7B>B@&kc.\Ŋ#ڿ_e7zЅ1O/G>#9e_7wk=B%ZzP"v눯,FP"Vìqi-~)`WjqAٜI8!2m(klV\o sC7h3.')[PØGAIz5{`">s/OM4uJ`p'N9+F)8R R6[-vDoH&QD7bAd^M3*(QRZ٨frRJf#Y"-r%?-`]^g MbAF9-޾jckF[Ҋ= :"piLr\2oBv@XJ% C 5qM+ Ĵcx"Te',Q]e/ 2״M['S_nKk `_Mk%l]=R 4[޵ոF9,Պ޸ʙ1. fw;?9%IGˎ=ϟ~wٗqsra?5h,{oo64 Oe8X`I&0m~ئJ@љLKNRCgJ2 06L' )xb (NpAK%z]|ے-|ϳ}^k^V|EV|+?GD60!2L^a37z=}oZʻCn1>/D_A\-$dT`nQ;9 f&Ytn'4nbw׵8)]UdKd:9>&O _*0E;aΔ>YBiqA[RZ'#dC"FFPcNb">``(K{e+RQ*0qki %R轤.#J: d:Q. JwI)yŕQ'nMٌ֖|\nku/GR*J8u fZ5GL-P[ F5Dy$rzE5KD&F)MX]z5WtY"z(.Z,ptwa8 ƶNHG)6=[)ɻqHzɯ)8*0#/]g.p|)bQ)PнԔɚ%DGsiKU 6Ⴏ~9SPpPDc3`ؾ\\άq?*OӵI2drs" $y*_4_‡ܸ?pM1+i ;;/U+;g}c{rk.p -t}8s1fW_\A ODj[ne:gaMӘA,͘+wY79*7C}A7,S4JlYTDv~ X^rC\"":0Г[ 7dJXc|G=8, b '#ɩ$ OQbq{+vW_-C׻f׃{kg^ŃޮU p0ڄ[\f̪Ο[_ShɛS8zm孶$h`CF:]es"n~t /tTzi='m惤IGY bB^y&=UJ ƞtl6m4X8_ d A:eKV܄}˖=jJM թζ$ 䘤Ŋ/Tl ,q{N2=(Jj%PfIs$(e :F3i`񅏌$d ġ`Wq2/5&_ qĞlZGM%tU-^x`C3I|g-#RC _ 2g(ia)pP?R[E!L"i:ɣRxmRWF`uIxq6#{G!'COl99,S:@6_՜Trt< :ϔ{*寴WXW寜{WvӶ>KO4TۊN1<S|4i,77oD% wϗx gMp&]94kd]ՊV|ٵ$#~'νts9{d6=zZB1P>jw˭cPu6_bB#b s e_,pYg9lIk@\`8ߥ):1߼D]K~[+( i uNPw$T+t (Dq?R*Ivcϥ1iYr&fDRbX#*bO% 9849qփ i%wT,CDW.*m0rVX5dZ^ *Hx2I#JʌH/ + z62FnmR)69,0(-U!N%|)jO_6&PXY|-tm@!mY_|@JUnl9d r\J4Dj.r)98` dPA\9EpLpYqY*YZaDg0e޽TwqUmECu@A8ingA@(sg`$ -e!C>_9?eW|鉯+2Z\]_:6W7ody -"w}_tpܐxҕ7u09"-r^""M)t$$-9hF m%TD3xYnDRfNAdK@Ja hj@ b]1IZEYs_6ߊ,kZ5 %W8i^c*Yy淚Q%RMGft[q3:Vd|l_܏ii"Ja)lPR>aZ9bI 9uRD9h+_f}3W%tjTJ*@uCbT3z&´%-i ڑm12!74`i89 :R$9jʦ<*4H7Œ*xq&[ B#.mt-%AӰ<8Sӓ`T70zMDdz^# z352; kW#}MW ̓ct[aK %TRXh.`7._X_Zn\ʶPh=+&lq^q} Q@JJ̗{I ('!Tf;խ;q܈SNF XY,ÄPϚ9lmݤ@+ji8 S׹+3t^ RzzG9EAMA Q"Pe%O{Rh!<:jq!fE[ 1h-N#$]}fR$Nq?7+$g]⽟^vu3ep[EoCi9 ® EM94#( jD FUD9 "mW喿^dG++b|1NS,u I|a")gMl՜Ɉ?qpŗ˾jǓ=o^_{.}pr o6 R2T%aBZՊh3+rd14`(f '2_!-ݷs) ]HQŅJqrsk=aZԬZf}rW<6?CqHh*ƇS HխER 6XÃ!\sr5.2$B7h S54څD8CzDQWCX0A$pU~B0 E9PLĒ;#ظ7-195-SI-vQPeS @;,i3KG@"Tcy`HC:8ZL#wpCJ!B&%Vܬb١ 7,Bp<e<҂X.wv h W Z,7E^(>u$)d](4U' qhEăL2i:"/&v_{^W'~=?pp|{b;ŠV|Օ3_2g>k7g]x/o)fG._0`` uk= k`s% Q"]׾6rkf1c3caS;u,$u|;M2(L6omt.KdװGqKRzp[qD ZpKӓc~+LB VpqrP$kZA`H*0 I b*&q/2 Y#ILqH)P) .2f2NMoUs8L5it] l@T a1ŒQdpLjX@iZ-<(4X+Y@#18 @͊],. 2CE'R'y`T~X^j⎽Ox;щ68rIB^aDfx`3!ś5J\Ƽݘ6qZOIKC!C) _#,jzD!'SP("%%f ezZe hzO#M6d!Ӎ I^rO1dHb@qzυTY/H ,T8*E/9dLs+`2ǧ'pO? < HeLvsO48BS͑)4suފ3ihJ'G)v@i#'t)2Z&mz"p,MԵU Mʡ75Y@ǎj!mcb^P%_I&2Qe6elK jV[kZZ`{Ra:`ϑJQYԔ15 x4iT3b3HM F) lF9ǮCS%c1P dgkwlBK**R*0Wg6;f+Pa9qk+Wt ^$Gv#sNߺcXb+#~9r|1IktW]JaM1 <87rz/_8}/&1^eXJ0`Cm_&F1+jjsO q冎_N_v=U/sNhI;,٭`;flփv _Uo??Sx>hEN؂,+z!5* 4%A!E|ܓD5&߶QN7G0Og#qaZO2<`⒮*꤆0F9 ,emBJ !4\⣝(Q1a %ݠAh(41$KAo6m2r7J^8(!61؂3ۘPI 9`)P Hm nhcB/$N";˺ % J%(k8?"~i#|CIF|(I|k[ ^_9Rk(Dr Б!eLtm)PtEy^G KD`ʉ)1CW F8q$17vܨХFˣwB҇)B}mHX@o%(.B)^@i8O $Dq6'KiW_b[YfYQ7J8Z>+GS,2dqoΧg8Qn|x>oK_=1=;2B 3_Y#8O ǻ߾x{/|m2΃F:=o1崴֖sk=e0Z2;AF͜}jw4C2{Vsm\!SH"ΛJ8ldtQ5`lg>-.Z.<,d6Y6g14T'M'|l_MgaW 1^W_OW|dH4m3̭N6NDNoO!p>ݔwr۞54Smo5}˿i/>An;o>?o]#s5:O _+[0a D.?ǽ//ה&;8uo %Zlm1mLck\Ac|M`OWxkto`\=dH%&!fgI9q̦>:n*|VhᢝB}hJzD~s y{ qbC_rƒ(6FfFDb^DHy&4He{=?G-L(_&QlĴܜ+yCa?lQuqxm$ nƅIy2B:UX%N"&V7@X.blwNa&J%Q8z)P'%x?vk;4K7Byav $(!W|gJpm0HxqKX_^el[PR{1!E+;-*K2 G *b jpp z/D,Úc1Ccsj+Lx?*K`C3ܘ&;0F=`);ǻDݨ~qI5u[q9lTz%O寖{,HkP0*ؖ_+rٶfkfJ>+}13Oe" YX[7>ڙo<8φ:'TfXY괢Nf:[1~Z݊yZ2 Db(; : O~/'qc~wWy.Ǽ߮zY4vJ'RrhǡM @EOPo 2p5" yI !W̡q@:\4:A)ukmc<@<^TMy+$C\ͫV)0 HJ"SSM) F堊Fk!= BhD%2a2g#iCʪM)W QGx R*#Sk \q%=N\(r%gH[^ &$bLAAM ARx :Hr,GR:Scg[TiL юX)@XqOl}M& e1gbM?h/`>Q 8SmCzq ID>&;!fj7" NIg!SV-W(u**3Z MLߔ(HnkvrBAk %KnG GRukgjTN#$,p$2 W'h8Q4~RcF \b5qK %b2K $_eDZAZj+qDy o+GS429=24$Cn%_j.{}/jcۼɴ9¹YwnG+YdW\lBzQ򩎯Y7Fwy3^yo$߸nAFҮ_SlZ-z@'qBn?+_kM:36UeeKw{㚍sMkdjg4iK \.@0s3`HJ'L> ! YidF ?x`rzTaڑJȈO }(oҥZ;r1Sķ4P%SD6Y&G5Hl5eǯJ3$7-驤,QD>2,hJ]X+E 'sS-:8jCbsÌ:|j>J Gm.0/;D$PǞJF'~%jh6BdOB_>@+W՗'6VAvzsӠKJ&ļ.ZW1I&r4v1T2sd3D;1x9Q !MGM;.|,ܽ}_uyzO?#~ҹΗW|uˊV|[%_{N;nzE{#}L{zp ݭw.g6b} ULRi!QBWha+&zqcaS(5#cCa0$Nt< sP 6oXM̡IjV" ZbhE0(VJvSH"V/ j0R-em寖r1Tt!?lZʾa7ܱ586dnr+ V|56tbWpm[0^\{^wۛ_~7ؐ]dfc-ఀi_=E@ƶ+2}+zttCpO#mv<_i?n:O7znb zszrxvz+/2/Sf\~Πӽ!{~I^;"# deWDrcB6PxHлc}!Yxz0f+\P',sj'SM,ŗNb <`qq'Ua8J2JdGrCSNL%Y R'*?=2((tGN"id. B@?QzcQWɀ1} +'Qk"ntRMںj*̥ J4y&H&ΐNu>X|;Vy .A%sZoA5LAe?uF.A{9m 5e:~pP.ٸ%VTEp .6fQSpǧZ$c #©wNo] Sl; *syHfb`+ώ9~+^#eW_Utbt:Sg#MB[f8p<=Ɗ>=U-g1CځWMW??ݽM&# 'Sh5!m٬q nm:'}D{J~+(_tIv>8ҵ _;x[q;x#yƻ=![A փ;Tӵ Ru6Ui,Ng%ź܄a_q-2]cCϬVV^E.G}5iפn)Y~o^%770-6ւ/?(m% } )创LLx%Z!#E&!w^@)DH(*ugձX]/%<]*@o@ QDdXTsmF[ &C[5j J.Bu(q9@B1 G(v= َ DѤ|jHؠ` ZKlD%V/ Z ,ɞ擤S`$|lx2b%B l~8: AV3grAxZO[Kag Hđ@j*e~>vk qTédbmcVS6^] i0A-,*S'vط;V$AW&(g/I{حr,5.w+.;4e&;?C+g"u.CS6n=%߽x_UO|K慁F̼qWH^sD~O\ʯ#1PJ@WĶBsʱZUSo~ͯ8aox!. zpz9ɏ䟫)Eȑ'.6Ro,4oDi>Q\ ?)%f  <0&& 0ݖ;!{s/$9;})N]哑{ν(OdL$sEvwxjU2\DT@.&g\P&% 1T kEM5%.Qͷ) bw(BOc#4ee/qnw2 2y78Xee.\ ?_x嫯>az޸p3 Oq~t|4<]>-q4A|.x[p7=rsM1,q37ʁuTV|⫵d2 63ǽ}e\Fm8ε\AUƓ pqAOMjwkY3<@&Pfk8I4G6?^Ȥ {0a` +T2)W%H=8%JE9s9Bt@)f;wfv '͠nƚ3O.B1E 4m7PS:jjkMz*D2ZNm,K n=^5I+Qm,O5&=j,"Q>R '0RїzWt}#Xj+WPg`d+rB*%!a$Nv D׳0A^s{'wݟWf.| g_wYob6ª/W|q+\[9W(;wñ>z;X{nP~z!f+wb-[[n/RfL#̙rjbK61UpQ21fkx-.aŏ\M7;JC *.͹0{ޫD?. O"K te\uLČ/.Hs-oZ^CbPOIb3TZ#B%f4BgF(4si1#\uBEH$CSy2.(&ʋLXҕ:PP3՜%Ŗlb4؈#DTZ {Rk!=f LH&RQmJT%hHȾ|мApjOC%VqFIC.Z&zDy423&tGeHLi.CYS& H(=҂ (.Fvz>>ȏrRZ&ksDa6dp")@r) vZƃ:p9oJ#>iX(]6QRQf { jp寖b yOP|e0˜jlM>cW?O|?+~_.O]|oz=Cvt阈iЮhiWH=C<ȥG-GhftN9ǯjWk=1HE/}PsEy|w{ z0Z`OOnijN$RyЪCsTx@`@-'6Rɔ#t0p$t.=d,Xց{$S6J  s ̇򒜨c_:ِmTtQFk,x0QŝE~Q@ l *4S+hgp#;=*|Q-?IR a xk&+qX :9^iUGp\y&愈K$MD,K9ZB#&|Z3:F]`>U~ ֦փLg+`OO>_?=ϣ}FFjL+_ٌnW(1U?ֿzXn~7r՛9sc@k6ic!Y8k=փ[n[1Iu63-l3 8PuBG( 8ǪmHU( ٛЎ%|ƛ/275[,gnЉ4bPaXU=PRTX w=,vSA? >rt%u(!t7V:AOӞIER;g=vJcH`2y )Vd/>+8Ztj!*ӊܤjXɋ*NKmŔ"a.y*p>Ol2U RPjتYTzIQ2Au;F% ξN)X%FHЦ)X %>rr gԨN`%$ *P\T*P"g%ݣLĤ7 7Ԛ% K 1.Zy0ՙG"jz;T  ,EH&V @ֈ5WY8# q'\`EXPm ?cUc޻ (GWAZ g{ZJ+gt|g.T4-<׿D/=x01|b+>d>w/<9vw=/}%Rqܞ' ;pփ3X'Z2Mx=d~/{y70S`7#EZ`Ke '![na\^wBZ&d˿%tTwc{b͒%ۤe\|'0|IBE 3gf仑V.=7f ҌHI? !pTiDH(e@oOf^İI:\gqonՀTwxY6JqfcXޕkΎdL'a2x.-ȗݵ\HGކ{Xd%nCc2:g~;V1e!(R{H6/Ce^|X=pPÃZzB n% NVVii'-(_'RDlɪ 8بnܫ"M(]5HrjP(3<NTMW 寖҅7W|'=rtdo@铞vĈO/yړ~w#p_(wB'p޽{]yE2ʟ|WqEY+2R]AFE%ʕ̐k}OO<y ;qoVP:QYTlrIrcA݇l*,Zv?n[ ǚlmH3-8خn(DmNMBK` _,LbYP _ŅU(N16 7m"7ꐰ;@ 5bP#qe5Ѽ%W 11m-"d+I"[PDeţdS9Vox<7Fl&e|ȐtӸ8iEx ^ f#6v UÄgu V֛q0THIud60GXGaZ7 ZJK,eke^_9:gXFͤNOءU7.[<_^$^Xcz; я=y{[o8띚l۽]wν2g /.V|E?m0osc^[pp;oWn/|WZ4JkǧBbYjF|k=mzvRИx!+XW>(DHӋOyX<L ) )Psh!+ÀJSikJe-bW2 /q/$llhXjQ]Q#E _aV}.n\JcOaWv寖N&݃>Zg+=}qU+zO?ϥfI_W7?}>8 77:@117} ՛MyWЃ_a_-Upg>g=z_Ew od.p7cb ܵyal]tփ[_"~r`~#3 _X3tp^:\xR$i:sz+v9T4E~IVB"JXP\d~D`Ιt%(&dlԨDЏQ'"YdC&aQEEJy#@H#T)\%Q1<"u%O O:N0PEb(ᰦVVSF}$]Tu)kQ $V -/MHZ6VڐHX9B|HB%WpLX^U"uT4 'xnو%U2Zڲ֐(*6¸LH "94R!l 5Mt)$P`ŵN+Q@j.dUsD^b4*!ؑC#6#u_ϛq"*W|4%Zn@kh(փ: #zw{sϿ'x1Eӷ >O k=h0m#\6#c knyM#$+sz@z0o6p\$Z:3m@{XػIw*\Л /a?TBJ!ңPЛ"jC&`j`GAcA,1)siT!}s,^"{PzQ>JADvIBiWiAKoSx"eUGaPrF +Kb jJJ.H|eb{%,]VP@깎|)k"NǯXj]/b2rׁ@Wr<s(iSGߍQyg nKD= (#NK!)y1&h8hiSZ4Fhq90 -[VTl|[X9(WK(..N3L-זE+X3` 4upMaS*_K.LѐY`a*2V>Iޤl!$2W_9fb3Z OϚIc߸;vowt;mw׻o}}MMd,WLzrk{7]x4{n @nGF)PW|+Zp3a ۉj]=n=Pd>=wk?y7| o_I.}4Z@`p]`Dvymvx$Z)(1xRtk$ M-;Mu{`-U~S'[j8Q<]S33 {*  [Dޔv~^rх{}~pwro҄9b ;f- srz_x ?qsv` AW|+"<LZc..y{]/}/9p73u\pl6C(pm],Z̊k=WVV>RvS}L#;[;ΒN>{̰mVy]NT&,gj9d(>s7FHdnC 6DI C='qKE3B)ti栐ن:(JM4xqL6Wk!IXM,*ˉÑL,bnr8Wrj+@fj̭@]Y0 hJ(hH^S}y4${OׇF :^>JYCZ9SLIHZnAqҡJdeȊ+EQ2*o17,&谖[ZSs$6J)0+ߞRngIK|ڦ-LKyZcg UԦeiI]袮%)aqZ MF!,ҜWBx 5l [j+zYj+ə;x寚3p+ZA, /&aoyq_ o{_@_\\-`ECi_zE3:c(P_=kߙ햿Z`_C`ۘAHsh.cWL͋LZϪPٔ=v'zj P0G?=7_ )^kIu0紭`_~ܭp]wJR~>{^ aJInTMf/u_9aNOC_.p Z4b:+We:vk-D V$B b+Z#WnͰ͌/=@WI#T e/.7c3:co_8 SQ0`h#]@8kOz*ZE/GVHcv-q8-R*U!)?lPN`P"lH)7+\o%O7JEɢ>~(PfDŽ8K@%i+.-XRdj?Vx$%lhHDҌԏnpyxId݈́k >i+W!UʣՀX0v;s&u1E=F[{CACV .ь$$fF%se)\PA=ކ^{ᇯ*E6J΋ۻy=ws{}3ǚ12 oNtV}w{{箻{{5ڻ{a&>6G5H6,E"~*2p my_^I* |>T l7=S֖u3Y寴-Hb*6R U]fѯ^:)!s]Ю'ph=ͳ+-cc.\3@~("z֛{Y_]M鐵~[/洹+rV 3? Wcky ss`\f n ^r'3^</._f|q{?p/[%hGo>-)(?.+COytyS :18>;g7}}>7ǘh#iPi{eCN sF˚΂ ʃ&ph \3h.xm+*O~?BLJEr^(y9=_]!|4H6 9K ԲM= Koӧi%C&yzKd+!fͤ?y//oWUKL%Ə!&ɽ 9d3Gm Dg.)̒ޥ pz?GkyӯI!?4MG`6VۣU++9w PIҼDX-eș_ПV6-H)6ĠTjVꥤR'WrOjW nj0.q>G\Ul& >^BBG]Ng7`_]} ;)J'v^ +4V-7?o`߳W!K~XȰ|XXE`ga}κb#;GCOt|Q+rmN^lmc`%zǷw~Pۅ==?O.y"a~C ۄx}fޔh;˗޶Ց_N|!ʊV|eP՞cT;ٽMe?]>:>6W/Pփ7?OUU/y@ŧ)MןGV|U˹3`2H'͍>10I#?C??<` B@IDAT[?P R(=;{]=;2m }h#ͧ,e6xJP+T4:|?CK8]b'ahyEE>[gLO3J*|I@W⯤A:))SёaMp;Z1WWpg^i1`AP7٬iWeQ<)b3(+Wd>?{'_9B(ʴVi4fpg_9.:wU1 ,߹ێmxokMO?'z N/m{iz݅ 7׋g~џU<~p|`s+vjki r+-'v='$L>w=?Qv˗O/\(fڜ)?W?]ſz^fl@^4ZD +V|+]ڟ sS6#Y bR,GDaӹA΍^g >G~{|}77? *N<%x{cDܝ 48;AF AxwJVR3QZc.zR'Zl}5`QP]36&EsZ Pj(+ C8q&ރ Oݏ#a@r "@]LMѮjH+Ve( }Yd j%G[\!@j,/g寴tx+;c'>Fơ{;:8<_txsgO|U__os\vӃ_?{v;uKG.\<< gKe._r{N`C#/~ͯۛ XegxA̵8ԣK<N/^dcm!v&W1utփq4׃+H=}>֎; EzeclAݨdTC?kT4<ط77~-8 !]k-ljQ>љoiAI/t]R.1(#Y,W)z9MSY'[\`-w6+LX@#]Ʌ $Yv./PK!PAT E(RU>13%n>&,8==w}y]wu]\>@L\,JMH>3?W7ۤpk6æSȴkZi4[.5}Ae8'w/r<=hFYm4>cN*:["%B ]9|:ޛ}l I^rb(Ę̵fQ g;bdŚnID]ωh炚z ؑ\6pTDXMTt((1Q&ekeKP"uavk,Q9_a.{W )WB~2cf8w:s=nSG*rCQ&{! 2(F$`~". E)F l.1Ac]4ˠ0s/w90 ӧ>Zx{>09J[h ETlJe\] *"4IpeV^ɂ8*Kȝ^iާWBLGEP*^kc3g̘9sΜ{ g^rڔo2+Xhe,fia;;Nk ycqFY{_CXx0vp̿BYM4Il+51}ԋӛ|Kq07}uƿfVŃjb [\ţڦOēK/暨%eȬ^A4na׿ .:yVBI 0>%rNRD}8[4 ꃲ&YbH 9*ޯV gB+1^Fz HhJe(1Nx"Z !J'5B2ؙ)~+$<遃FVpeչTH(o5d츲p̔#0}ыiwyvy=]SpRe(;jUr1YucFO0RĔы9!S=}4\#@? pY#37r;F~7`[_:IsIvx'\iV"+9{֘?.56iKy d-*??.CQ`cȰtK?Vƅ(s%ёd2%ShrS$M(siH`J`?PH C#:zE + 6'fM_)k*"jcxv%3V 33M A")̷e8%9B$SO8U^ト!"xV=* )e|^E=IsOQ"8 (^L(qpmn򥉄?L9X+uϵ+ KFDI_/Wgͦf|ߜ;E/6gg۲eD,Y#&5ze˻ۃ֨S+>;sl_ d}Zǃj,1LOێe7q2B Zkr?KchpYƿRˢ39l+t-y5ZEo4Z(j 7lIKuY#'*uQ?15 0`YQs*e9Θb |18pu+SzQtKAEP{We8&2Q+(}ɱr<_[K $!ku v7aWRD|Ңl.bBZ@V3 [|Pns vZ+〖!)HZ!l3k˿{g4Sma2FE¸h.?GZG Չ-բXeot5U)aȮ!8Sŭ٘!T;@GbTȿtrk|m_Se@;#pKF:\6{cfOHnz \4Vp^@'^1c"jq.{4MX1R(YQ6hsB4LWij>JWI ]Sndj1=JkPs]t^tM{T4zcxhjh3gϚ9o˳9΢L>Ʃ{x鲥l|qP:~R f8 oNFםj?ȭq$}ΨrD#.9K=Zn7=2@$ʸ n90r'+ ,Lw$O1MКf`bN1A-4}VbjsHE8OKDM2t3C!#?8G,Xɸ0>br` QصP78h(`e贤ë; dHN↊́Adx́{\E$CÙ-iIaD򁌛㠱W WI422y?JU@F‰gS/W 5VP)< f؟vR-( gZP M{ )Zr5_+% HgXE7*Q^)J-YՓ^6@q g̚9svv]kiᲙǼO*.]ƽKg\ nijXC^5bWѱ{1}A|xm;YϹ"cJrcqY^NC=ƿu`c{u!*#5 z!QJl:.zM?xrS.2^YaSNħ?Ţ<#s٠/3"X#&HF m^D4ϝ:!,PP,M*E=p&XhE B|V激 -ޜD!4#?!:.!YΥ/f뀧2pAݠYNgCy%+D<};Gn[@&J|bpN h:iifV`̉(S6aorxb5z)'7AFQ CayPDDL( ?+Pkm9Gm-oy dCcEp(K.%7hVN? 3Kx6CZ!J`SO0+x-F:}G3A5dӁDtB! Upq/=m f\hM(C/Zt|R5HD.M\K΁s؜}hmw}w0Ы:Zwqg}YgO~o-rx@ѐqr>FZ̛7[2!$V0'+M N0+ӓN:i <;y]kf`;݂oo6pDWRCpR{ K: ukZ; 2+@19rdWzn#,ŽN΄z*QS$,e!Ą" <暣!KW'|:HVAW4R)|M"{%h"KV\0L0-#AYnx $+F&ث q"}΅1-J!qzdsHgFJkE~栅GpG7lV8M'^A#Ɨr.жl={X+ˋTeԖFznR( {PEWzWOV{lgqR$ȺIt/{3\m`]i ue=7^1 pRJn^%rWXE׮ȧqdh7DsN} ]q3hĚNlm:b*==|kk =ݵ[v{{_/^q^bRo 1dE_1ƃăXǃFfތp5i(3v 55IKPOYϴ05h]ui—=׾{ޖ[n9v90 ˼Ψbt̅K8$NT%aGU"faE'a?Cq}:cKc_H"P|F4ͨ5RWn sM4CCzRtCX1(eӰW$鵣tdC#7Tz*g4P` HAm"/t)a J1EPg?ceCYmG*q/ !@c2pic䄑h pu!Lwd^ӛo.u2ϟ?dE]W '0k,H*XC|)Rd,c;A't HbB2NN#HhftxS@+Nf[!uz`d(Eŀ|q~#q3Mj=~zA &MRXQeEvăcE@p2"yjlȢ^El;eL|z`Yq{E7 7|ZiQzW^1"ʋ!:! xuKox`%0sc WEATgG ΚZBi.JP̵S Rq`B\p+SIeWrwc#¿^ Z]CՓ^EOY:Ɩ}3gΚ9{̹sιM/gsԗ0TM&S+oWOKJ 95;uy<:2ټ;Fu4?c P)ƷEs!G"Ćny#.ǽno&W$R[Wu\l^ r 7]o`2M^$Ho h"i8OYAM晩#}}}~:ǎzJJkc6Bx]$\4rO{>dT]Sg,f46v&O3v ?om-^28M. #)e#0ФJڑI*"(wܔ A^ # !Q88`ƆK@1@tBVyNOHnTcB=SBk .ed7Ԋ*0prG,qdO G.{31Z]Vp"䠺γr|":NoSk,LD @BkL4ˬձWj%R",նWef#(o_œ9>[S]@(t'^DZ2OALhs @ j!l[+1WVՓ^txWLaFYgϚOHt k\liT> b]{W,YOc ǡ=>}gsx.Y2l *\9[n!>ѩ{JTkp>Jʏ{MYۍV'ǃj +eugq[d9b[8&FM}bR =pP' w lF{=ag_ӀOnp z%ҡetp`01ȐL36>v Nwy%,%`8 U3|K"QN@JA ѤC.)SoIP/aJ.#+ctǩ!_ e$NA f'ғKRLFe}aP&"2<yIIH@9eCuzȀ 9 4Ԃ"i!M@։ 1 `$s \=ݶ||Oz2392Ҋ$#urkIjR BqoIV>FnwK4 NB,b"I.R@gAY)L%BUf/کiT 9GGV^@ޱvjR<0Q.WWO@3o ¦<(-zdNRZJQSE6CheRQDY 7FPZk[o/[wuM%h,ǻK.(`:<#RJac_w_v<Iaλόi$QW1J|Ӹ}?ron<묽7ls İz'hк{n&)O?-9?tuwyҥK7o6{ᦛ>73Y!8E8)c0o,ȵ -zGh'_Dd1sV"#4_ic`e NL }!rt-y~;`pS o{  U֬ FBU#v~LFqYsێ" W7{y ,\og[)=]X%tPbNpTDZ:nov^R(nlW5̨`@/J(=wݽRv Zk{vi 7$cY57%VSٛ2*k@#o By打Tg7O}/-~n}}84Y Z9aRKfnS1 VtnH0$0ܳƿjƿre*W䞌k@uhd1 ɉ393ޭ *>;U(j֛أ:c!n9D\ ;Gv/g<]Y2b1:ScР,WG/| )'/)[o{_J# ϡiJ"g#_'|ҕW. OvD)pj ^}K.wm t1Q)֎r-Hv,lɡ6uiW6Qi5?#$Y/W$-8Y8Ӈ |!$<_wے;73 nE.^zEF9:JogWYAț 2vT@Jh*eW`L<rU3%L{c:0|-^ɒFĨg) =2yW1 (d)!>Д#SjQ Wtp@% b-.()3#0cK)(q^ n/ʘ< U,+.ML6p N ɪhA K'тC3fWB6 1B͠[.r5yXx ngqt kCR)K%jc+5jմ٫V5 Cp1^}su_dɼ ?;;m_V̽le=݃ ^Ŭƿ*K]cѸi* }sݯg,ݼ_Pb繩g61}9|oz`W7_riuowk/9=N:G #^u4T1⠡fsƤ%hsD{KS? ~ ܏:.j>=eλڑQY|%[cQ$՘cc]~ +_$DXd 8b-:(fҁϟ~Ʈ;ϻ믟aȃ Y 8R`7՟|ʩn@#~}'.#t5N*HdĤ_~[>m'&5?|G>Ȣ]r)_$.6 θ{cT;S\Fyȇp25?@EFCD9tq!KJZ嗿c]͛7/騊hqM<7h2,|kgAԾy(J05my!ԉfRp&h%ObѢG8wnc}_'՗S9:?9?,?裏~WXRVvm%wHC?S)]&k0M)(+9L#!4+ja "ʛכPH:(+y'H1s'R.["$z{528L)Pih )ep>F24s!Z`WZijU<%qjNgW^rDF|DRQq ?9T_G  UM.[{CzcSp!f>&ȷdJ"P-8`}<؆'˳WN*G=;QehM=6ē}NEY RSUẼI, X(NrÏ]!ʪ,BØ:*W7Sqa[}f p{46 (B+^5{[#c ,a}'Ƙ{p͸i`y]9e˖zWH0 WZ87r<+%o=zɷiьo>G.Y(q-C4ֲ/x8Oh^o5&?7m=Y]"03AYE;g9G KS<~=|4 .)`Ӎ[I_yԲ&31D.8C /i[gu :G#Q K/~>0h|m}aԎ;So`*֭Gq 7,Kg׋N;۾x;C]Hl2Gk_/ywNPw~ws!o«afK4ZpYIqcXKS .Q |s87 ۀo^B4ssMvfW_rGqCSՔ ipx,Y|[zߩ҅Dd5*m._iX[.!n`.ߺr?n[s;-_Lm?$QpS8~~cP.I ڂU B|=H/"ӱpz׻j~6F"ԶLe$HA-]Tfӳ@íe#=2 o{̻\꣡Dpls=mnc/8.j ~(Sxy+rAhaAdȑ;TO}j+Ԉ0 ;@ik`E;OEJ${RdV*E+c'] 7 y LW *}CMHo$fiiPH0똽XʏBƈ.p,D3>}Cn_LOIVۥ$y 8 =[=lbq/^7#+S6q1fA6pি^=%%6J!¡+[T89ap0L j*#3-}Jc>lAUPP2;w {HM_, W Eŭ 7q<—^1=B8{rB5ѷk~i^.RlE۳-,e۲eCC=@MګGcKJ_Re_a&=2}q7Oy'm7|7ndrmo- {\tEϻ<& YůsOvGGg39yn9πSٿw]~ލ-ZloUN1 $I\[SO9uۛ4s뭶^-μ BCFLFpf J+igb6 ûwhmP >9|3ϬeFL1GVNW-@3"Ꮭ;j,i@4As&bB \8|ϫAjT6L-όj(P00r51Qƒ CS=4d\OmrJCE~m&q9Bc8֯r( FF #ġ? TS &"a&ڤI*J^KJOX*)Sb(L>ʑIr5DžNӘrN;MWb*@$Jg5"Ke:~ROdM!GkwPIWqڅ,r(A66I"TL G4Lه^ r5 n4* W}TJqٹ'lxe S: |*bj\K1HVC#yg^"?~kdֹP|Cͭ>( nCu?ߞùהW,nzs8/JadìWmW&Nb4UM&R6MSfEץxϜe;o?ڎ<'3T`sϽ}k'p^s/~}lj}lv;gP'^}+_L.Hjg>6i3x*u,؉Ӟg4MfYd(5c{@ ۥ}(j[6W|ɣcNm^^[l̙3on?}jӑ=Vy0horCG=XzsFQv,"ɔtkķ|b8 n&+L@iT,a*G}L7z6j4+6!A^I 1+DƙB/ %٫‚Bm%ʗi> <*@!.=fY xw2|؋tLTCn3e<ɡ iȤQ5 ^0ё bQWdTe2w%{Hw?hjy>Ŷ8rFj +>xΰc;)C\fE9#07jA-PR#!T/W:0`MQJBSl2KYch q" J :ʆE!`Tl#bo"shWi a֙gf2') tdiDI/5 nZ9++A+-xV4an;~6a-OIۇxpk}|:ޛjE϶xQC$z.56aOWW^Ans[/98I޲R/4.[_v'JA0e./z$s]E׍96SQIYڬm6.=^^cF3CM){m!GF%;$$)>~#GC U]hD4-cضX^I(JkﺞQ6s٥5OnMϚG96mh/<ܒu"Zh7TCޤw{ Í7xw;9A~Zl6Ij}gXYDj+bseg]1;C"HLr+35aDh2-q%bV^9 0T-p<(i A+MJ@1i,6! T,zQR p"olSǒLN@ "2:U pD͉Qrw&JL>TIʴW|2tP/>6 Qɢ,Cn&g(*{Et']BMa4Ga .f$^D0UPЉ/WL8%U$+ngKC@ws`2i!YX4%"TkI1{l*bصWbGN #F&+'S˒LpH2;ɔLWu[ZHg; d2+TD5 C 5ݞpŋ/2(>Յ PYM31&M!NC8Ts+,72QRJ{U8 "Zhv% (K"FD7튅/,V J-^1̐F 7*SR`s\zpx::f{:'/}O燷܅>8̄DW@m`},4:BWVD?NE ^ni?p%|~ goKx cGgԧO=Tn30$.I{n[niͷz &|ӷrtt q_|v[n5\]ո|ȑD@uDHFK-&^ԧoہV7~߶nkך V&iΛ56;.ǵ]NfsGO~?|ͷ2pۖ:{y8C'(Xc _'u杕#faۍPLj)AnDS) " a0c "hdFk2,{NyMoXAM. ֮BK7EtTIJeZeǙ(71^GߟO@yK}yh~G .c֦ސ8C;[qT\+^җH̕ 7j +%treK%G*U(O"#=f98gONFS@ri՚%_)gjVt:! >e!IqI<0O™uiAs\e5Jv LlIʍom (V3 @CTRBcQ N cԪxlY0{A($Ky1m-5F$9fR:MR(zi@AQY5s8.p5 RW4!tNtT^Ce{J+[bYM) N}XrJѐ by*ӤDJ+J-`&茔Nas@{B)0/iAqcWca8Q|d$ێWwSl{@ Ϝj0 1tD kvq9s6,X.dיE1K򐖊2Iw:8p)}gD^5JQS{*VUCmM?[5eC4]Tz mGlR5]]=}}.ã,h8~cBNҏ"xL6_hjr~G}2ϭ;<}[ F]"]oGzzzͿGG]; SWȄAe_.F<'rnaWjpqeaRgQX 6tSx։t;3 y?~{G k\h$8򤶩ɵ뮽vۭ_Tasr|!Dzp%k/(QQemy,M@ >EFZLeUW]};`DM J(9=m]wƌ7UD8ĺ`ECh\_ D*|2l5جR0x{'*$0x-^2HvFBVFHev$zAggES`D7Щ-A$⫯;&$6u%f]8A)Ysb6W甿;b$qEðe5D<sA'{;4-l0Xs&_hZ!ݢ+-o#J%)Ǫ۫`_@G1dt(j-s`QW[-a9IN{]"r2^Q˨D| vbBML(Ru{tyM D(UZk8^9/(n4{e^&L{Н S8L/OB +D84{5ko EѯP[fF6uxG+#iI+x m+T+SJ?"HhoYV`vQ&7+p_q$jbmBi-Y(*Td;!< 484y9J%MʘʄV֔ sv#X('(ث^5jU6rek`y 0Gٳ\/\FCQ..Tik8F-ndEeTFw}ǂ_N9= < oxH`ln#YjZk]yUeps'Zhp]󮩈<## ZX{K^-| .V.qf`݂z~%F.Q_sB~m :;Ǧﻏ&n4tgR_J)vigZ7X7N[ML4=`|2]e FqeFQ T5rr: }'yӛu7s`mMںdՆ:=]ve{ߎ Z8T%Q8bvŸ/X}ް t &׋GA5]?я|S7׾5뭷s(늧JHoSilyRfJs~hE*.8;Wppp(FY.j+{gbJ+treHJ/v aSO!/gێ1Jo %!F^{\hV#8)C΄>aq_]1G R -' Buf"Iz1moty^._n@gǧ)*xLW0{_<$ٕNd{yMNFg6 ڴRa Ȃ 9rOdo 81.N)|rR- ۫ϜuPgHnL ;^Vs"6^9 J @QQh@]]aR7Am?- ߦګ-!1o[7孏,o|\9 p+ ԭ6)+biηG|瘺[FtG%1*ae3 կfw|lú5dO+1,Xyohgu9r+x@^f,x\&@gYeEFxg->p@m+I*ya*,!JL&J}0oxhTCv¤JZsg F.?O\_Dq 񡍡xz8&_ˑG'][lٽ60J]>g+ ȶ.Ur2 Vk@uNN8m(g`QIѪ05ל)IT_XӦZ{u[sD\6!kN[צ{lDK_׼{u]xP03n_ \v/g eBۅpHȻ'UdM.Q6^!%Q{ ϩ`H*Q 8$7S1xJ igb i H IH)&["[8&;uSMo j\8G;` V^e0 a+G9(7fGWt@JW`BU{}f:"^=S(CṴarثpWZm{vO=}dcdFfj(C۲s:ƹࡳns.fVwSӈUŽB{=^5UY2XP>#ݫp=sϛx~ˎ,~Fdw7 +rW5=O§G907lН{H-TFhC@T ؿTW\qŗC=%\q| ?ck>Yϒ@a_H Q\J` jZ… apx!mЀXxIޑ95RDOM7ul@z8mDЪ8E.Fs i%0h\x)]==vponWX-'T"O:yv7CQdە;l#Yq(ȷ)6|҃c%dzy&Rmbq޼y&+h&:WA@BkWUmog_x׾('p{yoqPTW)gga91- UX]Z<h .+3 h:ܥjDO/ h`(㨁wKДP4JhUWh ثW8;,1,\mC $>L^R=,YYPhI/hʯEORZJ *ثƿևC]Z]=}Ch {D;}kno}_ܭ-o<^XوB&TP]EO5W..x0YutR񠻐\unvkt Qm°+Fm oa\u|.0OFT`mM t 9B}hUbodBDBII@Mә%J]PdǬ-)</0EE RBJtbxkkd3IVrke;CP> N+y!O8jչsv|n4>cɽ×\zɮFh)mKNB` ) ‘kJ UĚfT|IH-4@C<\~ (7&;m%DuӤ֑^oo9䐺.Jy_ѿ+U"&qp&C]4AvkA" %8c"PkU8 d,s dV 9Mbs<(V~nLXn6h|'۫H Ky.26P hosPW8!&:E0'v HcL*n'LZ2 iWiVC!(;i`v_{{%"t͘@*J)~?E;9Y`@4 @0/o@,1 tTf~1ƙB5<]db]ar}*̠NUeby-?LW6D-eU\^x Hvqa 9\ C@˶| %5+= ,{+dWEPWhFEaПX\U,1;'Cn]U&vh+y/o؃7igP]TwФz+X Mκ(WO3 A;x<\gk-w#ox)/͊+~o =IC~ߵ]E]W=l+ֱ0OxŐ8$ԲRW~{eӵՔa ?6eV&_CD6 [Guӹ'.I~pٮת?M100P 4,2,@)[4&(A,GwK3f̨j/x 6ܖFQ[qsŖ|fID\MOoz"Lo"G"YQfzb15mTR|[O{F ﭶ Hw}뮵 ZFeX@(jSa]l;"6sI$F{Ԍar[Lʠé%Kk`Z$^ hz\Uj|˜뮯@ yEI/?U7 M-8׏}[@2'ωrZm-w4f,M*!CE#G|eF !)'(\7+JHR)MFD(bBDtl4@n/b`G X9{E䣬pA:p Q21ILi×4b&4\**{*Kb{6"|hc2G@"Pc2RN CX)a+Iu!]&9+N0n"Q?8XgՅ֒4%FJ &"$x.=ϔHW٠#LajTd:<9^eĔc6T6,~7WMk|{%O\LQn&MjET2Q$`\{z̹HXGPHecGcY+ʌثWD*q>oƌ}3fd(OgsO{ oZ=ں:{2%u{:8-qPm~ʴ4J(Jr_)\9#uF_8k9sg߽ᏼ0iC{᭷6M. W,:g5^ʟYZx6{"t[hwɌt Blqcʶo8_{s=wjnI'G#k0 w9SS3og9Ðϯ!pY\?3Gb?Y1~4!BUIL:G|HKſiWb\,DH.ar(I 핷S㋶o S^pMB(J +TxE3]8ww_@`+ (>02+dhZr!΃L,$0S"Q)s˰t.9i>4VØAr5w%D_2PgM.lViWLb{ 4-^)B\0"e#n."֐eT(0(ߌ-c%U ˎ%G: fƖg 1^QS hҢЫŠUK\8)EcͅT{PY? L:%BHרF42 `|3 0B@bH4Hy=JثJ{=b:E79mua40j㥬ݨث^5UԉkE+AfD\#7r7{;-6}n=-^)Íruƿ*s^l_5մăiY"].'.7=syc=FV]c}2K(7r1G_r%)=xhF՚3wn-h/(?'MhM'(Y}g9zm7oKFDG:"ؔp<!XXs;1<H#yuh^!|״]ז: p4wc63vN"iAkJ$"2D Ќ,z`uBQeEMCpR?@ dd-=%Ft;x㍫x ${y{I/g4(), WDaI4Ufh[{J9a'gNJiȩb`VmG:O>|s9gu{0 3#6I%ΛRVCfhm07^*eJɑXRWex eCW앒-u/JMZH@Q*{%a m룪FK4:a .p^Bt5&M=(0Ipt5ià 7-tzx+߮ߊ^Wz;4,H(Pzӆ (ũ /q\P{ ͷUC^e^h- y+w;fdĔdX6B$-^ٰҞ29P8-֕9yx{4~!@[ueF#=^%KShԂD{t9X3<rl.I򖜦ԤWpWZeW{n\,.8hg h㯍+u9?A6ư5X^5kJ̕KcnliI6OlӶuQG=xB+.W]u^#RQj4vk6ϙ[{IJn!\y$B58, [ox.X314MU!X? }LùoF+iKkm%3[U1M;'P[t`c0CyK e%a#v7`%+N6JclЎ<.q]V_HTerOJ^B:! -d ?5&ʹtvҍϟUn]$RMLeLMYbĐ'l F \V~XN{U8Vl+fKQ*@l^A&1Ӣ[ѝ&dhWhoH6ˆ tpc+!=w'kd%N7!O0`ЀWW}BdSzNh}{JX@azDoHx{%%VIM RX P5^E5t 3߈8uE9F'lĄou\A{U =5`6 `OΉT9!CgDZW2n`ܑkTJ0E1R 7SF*.sjn \s,БnUN6 GR0P"TU;{UۢZX {UFWFc W*$ΠY>h3fϘ9k]o:|>L.7݆yOX˳l5l"ʍU4AW J_R<Ȓf$aMqkp6YRPt$S;Y=Q)1gzk_[׼ŭm\CwvaoG?JTJ_Ap2*!%nj o29SRӟte04YiSE4z,^d92e 7kbA)|nNi2a}3YT,Tlw;7%ǯήh`1L*$8sArEI/U6pu# < eUyoݺ@w~4=EGF:K1KH(*$keEk1dA&8k:#d⌆hDgnUέ_4U瞳w}[؝⯠6EoV-WsIku*Q>D{Y 6"(P,]PhZ톳܅j @#X7@gOҚ&N8S{U2Ӆs@V W(81 CiCS0 AJD 4TOT_a-& 0F \ f>tpyٺ~+Ef$+m)F̺8*z^iqʡ^a 9sl!.Es=^7j|6WZht6WHS{o: e4]>N57UU\\8g^*@`j.tDnJB-090 ,_5\Ź\lw<+d~eOXA?Oc5k\{Igٷ+{%fvA}^GL:_8+l+gFm~WNt3ij9/ nY>SbؠX C!-'K5K`̥)*s;w҃y/ 5V©g<KQ3b $,bݠ,)9ϋ嘹s! Q!WȚO,^#vjlL-<瞺%<؞Z\pM4C(,vLتI 75Ō:ɛqw Nvn9Y`t*MEƌryk$!=oaKUd%MNbE\ >򑏌En1OݠKd}ם [ed˔䮢ζ) ,| x1!+zᥗ|jEYsN1E2?>`} b *G[wG~?c3[-&VKgi.P/9nz"pZ |[d}I-ghWG`[!fߥ{0SLgpF";N1omœ UF}W1IA edt2tn1yL(JW˙ÙZ<@aZ@b|&+D;INsնy@W1ID$Y0_*Z o ȉ?#)ڊBR#ߡ?ΟưYW Իv5Z2%`sլ`ɪ";Q\L/pQ*Wqjڿ’߮r`66_毶'l+^=0+ :okDYV lExP 6ts&Te՝ws={g>X >B¨X&fh;ǫitW]uV r'u(##_I%_nbӱ3)1UuXe ;3|' ݶ5Ez³!]?&3QDvuP?2pP}KFhB۩a_"L,rM4>XKhݲa_GI.qԉH&+p+o]z.c(q!JΪ,#bbYAp k4[4EP}\ߧ:µJEP{s*#KVݫZxx)CQs7v# hjJ;q7q~ǃO_=rʣ޼'SVYګئ7#?;WWhWm~5&˭g}]6^`+i&놎>2ӵ^X{c`z|? syk|a~!>3~Ûe5\̢ z_<կ>g|߸ [_~W~e,믿ܷ7;7SBoz'guֱ>N~2HO;m{w\OW זA %毻:'_;]dTї"2T=a#N1dMc/%H+@mEK 0kǶS9BAmMsk#~4fw@IDATww6 ST6"#HOmwÝv17\W.[hK W132[k p4g?,<nj}Cv(A*qE}굯}Vi V; o+.k!c9#m,cqJ }RY(B Y!3gmGo OG0Ҳ}/5΢vg+u"9l`GW9Fʨ+!] M@BQoCEq#U~!eW++ĶI#6m‰@m8u(b-W& j bIrVmӆW*f4h$%K uH=AO`6zP–25=GNgj\[XW)^B du3H̒%ʒXEۍpQx̿xUXhEo}zjoʐU 8_uZW_imQFW#awѤ"xp ~GNȮ4b`hUa 1X1JS N }=սsUeH2[H*FXycS^,&L`_2}OiUUA( P14I5 Tn˪~А/}Q y=N3yUpDMUErӰęN5OѐxIǨMU~ͦ=R g"ͯHQp$bcH3Xx&bܩeUtRMG H!ʘ,V %k%VWiqbtEE೧ao آ4MUP=W k:\3q+$XT?PU5aK.61Pʟ9 ,a& E7yQnd2`o0,:Z j$ hȑ`AԒ2XYYT<62& |=bTZ`|.=Z7l+FN+kTUxR^"uίxq+3y 7' ny=i3k*2t,cٹk 7:n+:йI_΋3gwU^O`㶷'ȩkfloug֬FH¢7yӟNIOz^{uu~["ݶ) |oEڵkƫe>lm x| G?f7ƫ{<[_G?g )7MEl)[Tq6g;ï9k77$ٽK#/H_(Y6iE2إzC wg?_WBz_u^ol9̒ ul+Ԅ.qT  N&[!T?|mFx/i C݇sg}bn+xK_g>N1Im a53gS]Z0@؋!ʱ\j1%Ro6 7@| _aTeN&zoz5ĂDЇx0کH{`-g16$o;3^jED[I aCvܦvb3\_JqUp16 +wU=]OehG Xh<ϯTMS}*JɑCpf$1B1:x35s$bIv3W0U%5rRKiVhEF@V)F Ѐ.f!m,WCGA*,+j` pL1Eo …4؈+e/PԒa+ >H)3ZrI(օhjSh..Ze6<ZhQZ4:EکD_|H-,:-WPjo:Kc PaJP>^|&)4W4)W/ (ܩl n.Uˡ3Ch~1kmp hYQYr֑%#e:X#ߺtCB)#_M}Yi }t7(Iv#ԩYAՙQXe*]Lb0-_5ʨ\:<+/> i𧳹jfa0/T^ wWm~=0T Nwj+~b,ÜRem\71LNU0݇"*Q *Tn4S- 8UM [+;~ #fw$j~=~$ մr)he<4QWpScqVhSX$=K+Pn!3)̸`&-f/8 Pw ^Qb(fPCFo8r{ 'n_7}K/ZpVJz 9ZaeBac,EӜV&BT!N@I+:zd(9 /"&5O9'*J2Y4-jT}DK "8;aE_" _na)ga?Q&{T~8s sj毴̣\MP5 C+GWrA)uA_1i=yz5׮sիW;,kI@%Wo`fM ?q`Ki7?Fuvp`vJov*~i e:b8EC*@Ŀ_ul1J$y#2jn3Mi#Va`k{CdRaK0DYlWrqD<$Gn=dS+HѦr 䯰 X0z>(4i<_hǍHёQU+%jw Z7})z'oVQeSzf!ﯨj[mEPJwUB 2w{7q.&*u=EM67+䄆Fc'XWe"s6zQ!%7qɛMF1$DF |9#r?P٦毰,SY_9Ԛp:Mm~ 6YniާLgv㵧v[oq*^vͯOU&m~Y:%2R0YdW#'BS\rK@gD <`'__>,/Kgna< ۘ!*կ~~EJMFN{_Cׯ__M/~䗿 QJn ;??83CG.$~JU^VxŚOjVVwЁ{޳Uݒ?=mH5'p1?ުtVwk׮ACH,A{_mJpOxk9plDgԝakFcE^LNjmJJbڦ<#M۔(ċD^WAa,Q9c9v" O:>J e"A46G Gee$tP3Oa#4E 3@}b&Ke~ _9@p.5z6W-+Y}Z8iDsI7I7qP!Dwnş'DrY9T~=v6JQ!TV%I}nR7%/IL=R`ژc{1Ȳ Wh-|[xR9hL<[XtI7&Ƙ VcIB=sX,UEKe#r\YЋR,!` 20+m\'+ح&Dɿ[W4<U(UeʰMG=>d3I v+ث8k0zMth Q 6qRfU_毚ra_5x!rW\Q˨W .m~eH__~2g^yaf5' |&cMpJ7~K{oakиy4G}K^b$+,fI7tITxGіK^^) DDrN)]"6aҎ=Eh??! Z4!wq/o.#<9pwD21@ʄ@CEdCBSx˫ .~i_WeHk$[ORlH|lHBb=aGuG}QOdtG +gZrdD2wo|Ib'7^:ާmFNST q,GE1';i9dA^**g<_G>O~H>p 6? AẍRlŌ>?Q)8@S3h H6֔U0r/QUNbuv!*[WA gJ@Tvc 1 *Y \:#[a +GKEy*26e:#kL1uoKnQkGgJϦOD|+dGTg 4AZ3N<oЎWWuhWU!,( .5+/GŎyҤE4ei(->W x !JT$ȮBƾ(+sĈ\;++\4'^j2=Q"g]W jS-EGʖdA"1eS=_1jJX6#Z04u fDRL faT=3d`v=ƟxrٱtvdJ@+5UWx FW<ҋwnDwg$ib@cBm~WL;ӟZjyae˙pYvOCS5ާﯦmD[F#?B8ELiniUD׹լ~JGbX;thLǓY6iiR8M"Lb'uo~tۏ{{>r"kfRf%VLg:>c 6Ӄ}g $\e6H1h_EZ 5;s5{ }~HޠfM}N۞] x5D3Y=-!Otk%| z,p TY~V]w<C4ys滋U;{ ? :5`Uq c$1p7CB@yW{~qPTG,XxXy 0*F35*Zp nۏ/ Vd_bg2jw9ťLf^P+v#q:hEh4To3Hi07t3R?H%= HJ-*ny~Tx6 F0}dC֭c!z˛1F K厠p tN}B0@R ,0zF؋eQ+kX2 ﳿ f~X!՗l_#0 DKIƑU(*Z͖*by[WPW>DCAbE\sW=[ nڮ܎Sg(@=_%E%kbUX@>_㈠  G 8ntBo D]okXp$2,)ԹלY&C(FS tG#%U6zk*PF޵@qyKC]\C>vVF1k I؊b,<'BuSF1,hzUۡ \ $```EywLJiک1RDiƹ-ÔR_źb4U_yU\c'9ٵjh64YϯgmƼJ-:%kMs Ĺ":H G\Ư4A̺whbaӰBIYxDi7Z13CQd}!KG'b  u^Q?(FuѼϫI9@}NaTY:HFPlyLnҤmcCl?LXlCHVx qd/s*WU.ӳR&Ζ(hz9: .n 0dB,R XMSave u`v#7LR} 8(\KVYxnNjvK5Lp,TA#6HdUi1]z+u3)Cj5Q2{P%a"b[Ma>4up)F: ќ9![K'E ߶_IBH,hOǀH}R,Tb NcX0Q=Ϸ V8ӊ9oo+3_>;8 hItzƱ|ivadAF+gt;^mdsUY>dgK_wBjZl>ժWe[H)U  )NW(J]ӋVVWuBUKP- FXkizy$UEm[WπP'" 4uk̵`4\^fC%wk*VI6nu45f~)BvEW5DO_5Um~tbza%Vלj+UY6jvͯ gkhOY1ֈVLdz A h(":hg& [!VC)u2Bc"&1<0YI޳ >svpgtSc(h dF93EA]hme0P֫- @S ۠#Z\\…ta2@H"tަQm@e @'h%B%ӒPVڐ/Ib%!XH /= 6CFˌp'q}QC޴}b THP&|+J{' ]9I:h%WM\D(]*S:#B3I`RH4U؍Բ#E8OwUC#Heʇ-%KE,Qwxծ$էK2.eA"˦P~eIJ9>G吃"d!d,!3H5*$' /5";(&rm(sVL%z qS+s诀u}YWQFi`2 oD rAQV]AoU( b >^KYRq ˎ-+J$4F*4+; F,qK7G-$SJ@=JRP%Pbfs*Qt4 ±@suMWpSDWlإ XE'`p`ˮ]8Uh@o_I@"\':J7 ZYR1 \(R2t T_5ѣt):=_(毚kͯy5PyZzq2Ym~W=` bQI_9 s0kFxеLВ3xAXzafpbFbK+4ƙ\UjK,-CO$ Fg!@fof2nSc(c2Xl2]e4Nd=ߎM$ޒ|O!ef%j4 331<+gWS" ¨1)HC@ K)%8R{"$dDBiʳ$P` jh64h(ɓZH Z4UbqȭX;4&4T(3y- >ۑ;%\1ZL6T.ŞRiؕ|DTa v+tSJK?{# U^ۑYy{pVK PkKĻeۿc1@f}x֨ﳿ15K+;jJWz̯|uopEmMi p>p#ys6r9R&N8yf$|NjcK= w9i*( WhB-i,eؙ~' ElEYwiKgk neQPqG\һ ȑ ΐ3:y`\) f 2}m{v.5V’h%DOqQ#ڤO\}ˢjX529cȰU)h*|M`mʃ$Y2D=ީ۲ؒj+ê:X>ͯp6r{}o{Wsi[ Gh}qjpn Z% Yh8C C4 a7(cd AG5 SaiZj[񄐦&7sRJl7_EEJDr.>:mn f*Zi{MՊ6 !'ZLk 4'00e:2LN Tj ]">'Ð w{ɥPy0C;`A.RL[ο?#v'v`{!(H@rT92nN Җl"0-2=RO,]3m潋65@:Git?E68˔TUxԔV)!ObI%HKh\[Mf9yk ,q%EyZR f&#PMǧʧ؄cU ']"n"[Y %tH)ELbKf(+4aNʳ/IJcq [bE\f@` X,B]XHG``E#3hUdOeQ [  uH8ܔwogh &і)HӜ|"FP dHɫI튃$"Q\[JX'eO$ݗJAfIN!mh'# ::F6(2eh6M#4P`G ^P-'#><1i~. )aсeU!OjQA9D3r(n7毚bYW 毚ҽz6W\n2ڰa8 <EmMy UVaW_}5`ݺu9dvAZA[Y(x|6 SUcP RW'vۓvpr<MrZeB[С$lZ!iZKi]b5針%~V:|ψ^")~\ZNHj  Rt0AyUB"5.8X!vaQUWNW毚/IQFxQruE9sj+.`Usϻ[h[Yf6,q.N)[ a-Vs0JIagz*)];`rS/1,á(s9+_Asy":6BcYsnp@d\h捨beZES)_o~dkBǝ_`sYԱ|Hs^f%,r1 88-sG&5EQJN "#Pe}ֲ̻aIWJtK5F`$ocjQ>&f#i1+2{$!m7%؎8*F &kZk ;uI͞ :R蝧).tQ1 Ƞ528Z7QeyJ^s*tê+:+JL_5 ʣxQ*`86Z\|{<*vM+KeDw8޺VWje)I4+hڵk/pL. y‘Hyv<y4^1,4$dOٌ,(0  qVdyfd"ءIJ*\PX$ 8IUJ{ ىԖÈ\68pa yձ&+VBJ<yc R0sO(M-U2nۛͰ mQ0"U^aMR (LyMPqh}ƹ'Mc÷TH_|Ȱ%]C2+>`,3 /8>.K!nyrIIiB1Jߌ1@N[hW.߲M8 qeAϓKJ]I/S0U%H,|QI?å(ьb,࠲EN5qT̨~ /UJC:gi0B)MGX70Q'K2WLPJy5s)[0hCeNMyf\8I7YLgYkXSBaؒ'yl̚#@.`jF3jUWl!jQH5)9+Ҋ1әc?|Y0!bX2bF lG, wRsPNϩEiÛ;ZROA ~Nj*֖EW毚*A1FmJͯ@{ vfж \o;G}[3`x2H Xp73EJd篈1>.:o*F8%]uD5=0&fI.ؠZ^IstH %\7dlf "7^>i EF@&C42 uAxLr&iFjHgf}J>8i0XcQJ+&A$+ffRy&IBŤ&Y  . r ]a'>J|p?;f o R)(y/i3c kp'Ye%EUGzc EF)j +|ZR82JRSw:},MbJ fC1Ͷldʬ;@rDZ8pG˲#]:@ B' ZZ` )bь:ͥ[rQ;p*,eׂGb!tt8JNF-hn)@V6C?GD2Q dc)lX@]AH;z|__UWi_wm*Ϫ7o6ʵNA&ͯM+\8KgT5vM[6܂X֒n+~ XSRKQStʛ7k0ۡ,^ xeBallBMJL);>)i1! O7A=Hօ\}OAA7{l2h_B<|z}nՔ7֞Nd$XD{mk*IT@Jh,]xB tHZŭC?XҸ,+~D"2oNDEis;%ciPZ"{+LݰQ*\w!|l#5JצMSʱ9+U@^<Y-;Jm+ lʖJ̗ *bɅWLr 4(;I &iÉbeW2`gutЪER$I yxӮñ3"$B_ b4*ʀR! ­@%ROYJ́52L:i[r3A,H6D9VV֋dL34'Iyq(  S-fیJL,;#_+i5+]6kk. h~UoX0$xW73]k7 B۽{&F5.8?;52fd-fN x`f"  jHa3xVmN!5Rg;6R@A;f ut o/8>1`F%:#A!]cTp%Jԑ'47D|d-<$eLo,FI@׊ShĜZUF܋#MMψW@-vfX~21!']K-I4R Me<!2xɂ 9p^ҩPV7u5$rdal ¡d()&mjU/R2GJJ~,j^3ltPez|AD߱@,T ,'7ljF]eXpXFRqaxpbnM P Pj% =rR\QV:,ֈx%9=!0G>/kU8ȇ#MpE.zq4S4u"뒤"K/=uY7#Tj PFӪ+T\3aVe1w_9p毚j+_6ri0>F]P=c%̯mkhpRiHAѸO~¬u88܉&Yc{='|M‡j  z3/p`zA4Ĭ(:W$hA>0ŭlˮ|-%QAi(d"ŊlcbTZ2mHDpS`D%*FhAD&^]+e%Bip3L+E:N6G% =DeR+b{'ސJjyj.{>8LCixJ]m՞U@jJZԏAv\EBAjS} Fi.ԼaV 3`}BEK-P!5({XFȮޒQzqͤƭQZqIkDT{'lk r$uy`*+ᔵp`y,< M|?SVC6i\4zQIhM_+-_5_E _W^l3*۾i`Ek).3J/NoKOEfc2 z+UW_U_mkkۚj]6jzkhu50A_y/'kL) i$oJ.T:7V>p"2b#<|!IDd 9= <'MI4JVv0GLxg0pJKX45aK 8,FLߊDII,2qҼAzuF>>_R L$#زʜ,C >DئJ6 g <0$|:>*Īv+Z,d\6!0'J&'1i/&S:&()FRZ}&0si:Q9ezFi0Jelܺ|4ZJ%$Ʌ;؁/8##js.O0FxX; hf$XƆ<9Eƿ CE!5cOI% E^Tr\)C KPv6YE$ BR$fĸa$%0Tbh$zW!^腈&ɞ[1^ T,5ԧWŏ"-T/@6bLPHq>{B4jUjj*q毚jL,ͯͯM+\X:rMcbFS* B'W%1ΰsKI`BϓSWϙ{:)1ig3ы$#~6 qsehTњ^8Qx7|X'sY.#kKz҄Y@2gY8'i[G30͘S#RMV A R lY\4))XC xg )HdWcK4 E nT_59R4 ֡5UW^$q/m~60LV i`k`]W_@iz#ecskL8&4q13l%1UY┘e1htf ƚi¯K΅b|%̷rWxJ~,$.SE"mrV/*+@rRVPꆘz6@k)CAe6Lz\'p5cbR<#uGbpDMi^a ny3HDV+]4S)H\8 gFaV!̜Y%3 ѭ&t-Ari'NK`3I;s`:_r ԡy&vWãXQG,]nӰq`9-3,̡ [_LU{eATxTc;Y va w}ОT]+?iCaw <}*S5ދy_[@Xq @||y,_DhhK޳+v5 i+'ͯJWcW5:ڶ&LL=\j7  tbA_Fc[CQpFmjG`nO\@8DAN̐Qqu)pmb&@^gǹ|DM|TW2R]r~T!De'שp&*3jg(i1_Iє%6Х:-(ǍŠQc_ i<IꑣNSBB-,-kdԢ4/J3F%@rbFjӂPF%<C4Z=mD>o%]kR 4}/ a&8"O6"ARTu0 YLGRp%]-eEThMrG毚¤l+FOW_ွPfnWj‹X_emM+^5xp)i`Հd2G2$$%2%W,G2lBLD!5oMS\JZt;ca;ddOpɍ P6s1f]Ls)CJ.aGj$fz&ɊFYz*ؼND䞽I@񗆓90i0% ýϪ tܾS'DV2Z!ag ax=h$geJ ОBВcq3mơƪ;3"%mt? [r1 V>nr')Pݷhr*N@MK 1(s-\Ihk]}NC !ܪ>X:jEEi+ +Z~mA@$ 6rt<`Ǝ-;k\|Q%Qu9<# @+{aXP ({;@TAa=~6F_a_5.k>m~*\x ÄV*kXnW<4 ksȃxW8_3 Y{fĐW`', ִ±\ea MM.Dma3Zo+>$pg~-̹.\6BND/J?4<?ٍ0aP{Wq 6F`aL384KY9#|牤L>QpSKO HNz ڲR ȹy]D¯tsd6*$k!eQ>Rbf^(;!ٓ0YR2GGfWlk%GYR4BZ M"$*k(aB99M3c2C|'s@G@TgW5,XYmZBm4*zryƠtY7iUgITQP34Cpt`:M橏Mu Sy!z IYB퀰jkhu5@47tfh_1a)#0[IlLٔ fGCٳvA Y`Cfd[2K 9 s <}JTbm2IldSg`"%mrc`Q/?UZI›aoFHf JӜ4JTkngJĖpFK(ԃyz@3̠ C#s! 5#j4>nߍyJE4Eш]u} _DJUx+E}~EO &7(P>GhʑSu~[n`9Iį| ݜxdNRAQ7h(^$ă3xԪ,Y Ӥ %Z$yko N&8܄VܒZwĄ)xg#}UxuϯrF_7}tt0r<ؽ==p@NQD{_-#u+1`ۦ+&" ->{"8l0,"BDCIU_[8ٱz 18uŜ-AF87PRˢa <lK?񝄥 e$} `J? C)2AІ4}~+E 9UR Ҭ̃}֊أihYns\ "QkX #z ^Cb0 :4TP/]5Œ6(QQ`D /Jyt>ʊ HvkBê,Pʫvr DPhИԦQX8E:BX7ڽȩERЎ?zjeQMy- `JqNBYHM+ϼ TNsD CO!,XHK^+֮IG%5^qp9\(C/{x5Wk-cm&y qE&Bf\s!b-EAMQțdⲊp@suMg*Flhm]jJ&`h*i%Xbt~/} }ãW5W}~1jN#){'|'Õs>ƥ륏=Uc>nrGy c,38lټ Sn^W]uՋ^>w|~1`޾Ұch8X 6$ea$g ,1BGBhc>@-Hܽ>=d,F& Z`DКԒ8a -@4ԨW9HaGY/<ʓX_e}nϦS[Q(@<5E'A?Pf9S,Q,XIְ$R}-DH?T9}ҡH&% |L_vlA$} Cuq1.JBi!U 2922*h|c+i4t(uJCŠ&T X`\r'.#ltQ?IR/%(_pr3t킍2)feȅqW@.yFx&zb<3X)!Re ldj'( a(/3MwaXeePEUJ%hKN[vp-/bw*mYŹexH v 5`-2B5]^P/[cxEcbC_r]ryq! EUǫ>^=r<~}M1Oc#GG>Ї>wCIի^uuU~#V=W\qEeS -oy2/WssYgV'y{w9M&򕯼kzԣvEƄKɝ Ħդj1$a>tt|y&6Sc=ǯ\AC 3ӈ՝eu9puPrI,ڳ!a`` ѵ`o<͟1: k XT) K@S,"p>@{$@h(A\d䢊a\ ~P+e AT m"4x R!g {q)#u$(UJ@)@j/O {JԕZ(LBwP1V~\$9SU_kjЉ^}UzMf`󫺵y${~Gwu=1ww=\)[9/ VNzѱf-бK.3>3FHJo__p׻޵q]zWnЇ>tW>cN8ڔ%ҷ7k=H<1aq(.Pm4SBEŠ`ҫt4$ 'ZvpdИPZp 6#Cn ֊D~s_FCO4=MK-7318A}UA\j%a$Nh≊ ETB` 5{0GaFaUH=EY`21 &,|QAH @X6*FA,chZS]EJ/Y2 P +dg+FGY٦XVpVbfūiE )@2+VP5uYc> P/X4gdx(|zU.c䌭G,gE`X598Cj"(`Q/+QGe;BfAr4F`U'ŨYO!sV I։IjefyRD+OX{^bI{БN+HVEu;(!5ZZőU~ǩTE>^xWV{1ƞǫ&}U_1$x{vWnL}~u[/qo]q|W5юCa1+ wkT\A{kEk}k[>Ajk&wKt|iනĿۿi Olr~#>_ꢋ.vۉ)_۲N?v%{=NxFvįƸ (hp^D)a7SuyH@) 86|+ Nʀ*|"e FjbcoGYBOP s92 gUĈL)hߤ EGmYCVJ颕=p/xQ"+个ETrd!'0Թdj$CG0O"*@ X5r>SrƶV\XOQ(ڭ$e ˆ8Bmb B _ihZ*(asܨi.‡l_~$3b&-wRPpGoAE kxfA"pEYt@oE]C3N&"S\ j@׋xSOlriFh4(ݐm#Xj M;&HNB]I 0y)ZU/YH!?V b(8(_xԭӤr>^4=ݩW^b\=}U_q- W?>Ǝ__Q?'ǫ&E;u|>Ϙ;IWVsGiGȑ}ַ|-"?3?s|zj3vR33vJ{ůgP!}2dg1&w"O0 c0^+Rl$NL\(o|j( c#S a[0&a!2f'Gh'@!97QKύDÚ 94!efhl-7\ `r)Fq_K)S FP̖P k&|b C/G;yv0 -v ; ^Is({&%& J`FjQ{]pE͗VZ5| AJDwxBxhI-|vHuC0QΧ{q`sᶨ䚮Lq"tWex|BS;]N(0vGä̍ŗ@iiBhKk<8P/DQOE>*%rTE8Vi$@kXW(K͏P8# Ql3q[g2qUON-نL="xU7}rdǫ2U0˸5x@W n'SbN饗LDwi7?ak9[M 甽e/^x!;׭JgcN67*,~y{e_eGOOH h=~r9.R)mzx YEeZ/bpeVOu㗫gDq8Pr`TD@p4A'HB! jE~M Cwq&*Der?hl̉%e= D "35.A!®=8&)\#Xu(x쀰Z*:yT& C.a~ї$CClլy̿M!)׸y:6Ȗ4 QI_Bp:ƻ t4y85"zue`8&Of]J"y.'ZRkum 3АeȡjoO 3FDQ<c,ŧ Uދ0܎=, %Wɜ g1#_o&|* USW* rKr.?<>h0\^!Ǵɮq$grE- Y*U\I+T :;KT87t@V}4Mdaz/.-yxUETC>^DUxWmo}~]~wSoTz?&k~nw&'i}g,-d&53%֫FWv6<=|xVĞ\^; `73 X6Fs&~x:=lZzmo{썙JCOO瑈|l{i<@4IO(jdAՒO8 oxhE: 7o3V82ceT6 hB.;QPy@AתedY )~`Я*ʜ.s 8ퟬMZd2( ZwuTxePIQ ;ȃH8e gck/1hW6Ya+֩Y) _ ̘$1qh9hځ$J uT|%v*[lTZKeZ"ՏBPzᠽ؏\Όx002cf?p ed|mOj.g*(3`3U]H?bцPҔpxl2Y%+%eh嗗<]tE݉)Jq\) ]?^[/`F[T?C#]e@6\CSXx0\aHQ-]^z28'~bOu U-Ψz>uOUdM[̻Zv>^>^W}r,p+^p{ۻF~ʾO^+tYV}~j{ *8‰(ԂW>w*7э2+lU6mԖ5_ω/5a $[RzXzE;=XȦ׃Ra Hݩ;q䕉{&wBypmo㽺'22Oz>g=Mozӄӧ? 4~)i^2̓Y2>ՠOO.#1 p ^1z{.}'{_- teyRVTR aN 9f~FXq愙y,E\h7>x\˥@iv+I @'v/l*a%Ց~@*( AZ}м@t! 6DF3JeX:Ei fפ@W FS:RVUu¤be OV _kc@;YAY| ӎXBe‰L5Hl ݭWF"ʲB٦=%AcZN;}af}BLSs@@IDAT0vLϯ>ʽ==yglodKCƀ _n7|TQۦ;1Ϳt_[`OO " .~ /<&M5mf^=W8 VULuCJ,E+</˿KB^1qӛ79M2tye=~FC+ ˬ ߪW7|} TXTd<~qލafB;GϯvC|O[I\W>g>sf8]P|zטA~G~d9=yiO{ښx ^g;-oyˇe=ïXSddgn S47$;sAE2pϬUU.S/'1N-my SI\$+|@Umߓ-L)Z M N[݄3~OZw1JՊl׳ycD)& ec# EH'?nBk*QM M{`DqԤM(=][1CwH1tPM<#[/6SY!T;=S8gɸOْi.,pKEsѡxsZjUks[_ӃPS3L,#p{ނFF (d0[fs;>ũM~^aX c5)a$!O^~zHm };͂Rװ)\,%RH "1ǫxWvty{OW},Nc^$7ʻi?I$hO>볚Ji&[msjyHhFQLfE)|݆' ׽Co`f=hwJr\s5<43 2DvoA/KNw-H$nF7 G929x+ˎ:qikP?pga`WKm/y__>K,t|+.) k,]ī]~}ݫ^*69S~=tP@,[k\ٟ j|0??dr9ƸjCi%.wK[%Ҡ̓4z3 nŏ~%6 =iU /p@DFy$ 8 TĮ ȭϚ'~1.iąZCM 1EIp|&;@q+$pB%R+uX&4B\ 5cI<.PrS@t[T, tV Kp"ѝz2PBOUY6S*Dx􍐇* l%Q ~Rl| +z Y'֣>1v*#u Q\Nܳ,y=pcE "[h pM L3Hl|R$.jP̟}3_Azs % CoP/)p咍%iWF}PBPGC^ qQU<ͭhЉ ХJFC-qEa'hJJ% PV"\%ɲ0s{[ڛ`3aNu/u vu .,cb5WUeQky(XO'U/^EBUxW:*moϯ4O `4Ї6YFAq()klj, l/m-W><̿KX0 ]1&#M @vۍD ǘV9j?Nu;(VgVT 1䋾Z! '?rW6 ~mn`zɝ3ӉăVx~oť|+Nk^aȹ(GªJ?IO=LK]@lw]A?S?7SZ (c&nƃ'WCy/؊"Vt~~kĩ%@o??z\KۿpğĽ'K/,3݅hBwloچȤω RE К̄$Ȅ+HcA]0h:)::;  A| ]Tf=wQ7HnU-} 4J #w͍+r`m;.@\*Д]"UN{d/`]/<(q *%ԢT+R0ŤvÇnK] iZ1Li lVg=$D}[77ǸpY(ǦB4FZ+Vwap[gNQhƏ"Uæώ'[ax%Y;vMu4acm+wkLyS?ɤtiy|9T&Gkz%ljjoO{+pʴ̹Y1"u&l7h=c"O\Ƙߩ) ݵja g[31~bVj̆Q(ufO}^XAϟ{Ce˛Vy_^ )}5P1Pp/p?x{L=twLp? E$>=PM#~C*m|()*pB0&qA>@ƣ(ud@O5l;72TTݨO|hQ8Z\U;ũbq*l2Ȣyv\R]w11͈,ӽV_ $c5]cL PXa51X%RZ'45l*1" Ur{Hqd9*mb]BZnKR-ؠZm(be`U^UlaQ(=TiuX44^ iEGT%ubY@ۂZ W">b'Ni+kQt+>^yr9zW }~mqYp=%Lr9MW {{\}kM[`2ےD0΃86xO37)lpmeO c[)K7z@xDp"cR9Xo|q'x>8 Ήlk,qʯ:E.ABі9[;|p+?Dys/vklesk.K/6 l"9qL00Dxtx`5xMgK] c-YO|v*==%p9&'M8M$'lL8wBdv#;OeE1Yl@ڝrdzD"`ˈ-z<`c#Lq)jG ʐO*֊* TGbN\ 1Dq,ބ D jThI&ܨ4#mNUQgt/Q *|y+>+ѲTKWyK"4IT)6Ul].IZ1 l\+9UiU_u}*GW}`ǫs»'Cm_ed[G_ዏTx 9Kxg`@`$vCoh765+xmB[4mz޷5^ n@r`gLX&vjH7~VWA&7|mL>^DZB]s =Ӷ1JƤSLJ&-e9ӌbxiս>AxO,nS楖<\/,lE@ַ̠5}SP9ޝK<'6ѷnƝ' r t:^@bC}) ~bEӀfIMT_SfNFj*DЅ2EXV;/*ym%Zv 9%+ G*wZI䭞Gшa ~,]l"5 +ٙ`\V)Ò`!:<8' -lYЦ#vMOcC΍:BQsOVETPE2Oh0eY P xՀmU-0&*K/BѲT+@9h8W$̢ElCYPX](\X1ET" W8O.V CH5HZR99\$H$R.TՀ( ĤVYETxBL}SA_o֝q^tA beE ABWQ;)2T ]yDg 8n&Mw/ nG2\#/Z@uy r\ʊz+q_A@U}:UHU_1p^Gԕ܋ CAUn4 ljchNl XJ7Ο Y>9 ayפ䴩l=9Wk%xBk)Dc[m NE0y@E*ƚDA[p=5Kl{4aW \xAo`cmT thEM:>mY׊Z'2i$s+5go h6S4Fyrzk=޸*n⩈rl)oOOt{`=myx ԳXq..{^YP*m+ϰi$uE=e(CO H訑'A=)cq "rYyXg\SΔ'5?$J,zֈ%'υ-)!#r,*̨ *2$=SFC㕾]+xǫ>^1Uns 7s[Ɍ3o~+Ͻ=?cU,jld<v61C]"A. ƔAX+whxI?~KyKÓ= zua9^C$Hi[z6fAQnͭ;|0/p{k_ֿş'v+LX4VU:V Y>t'xtkև>iOk@<鏎ẊEz3rt–7}7\vNx9_?~=01Fiϔy5s e^WK(Q@'_ fLg#āL`9`C7"7߹)|1 1n՛ YEPṁ%t!)/sC+5yG2SN+)b2*}B#уQ;t@0 + i\FKmljmiyqiRR]bX߇"\z>^tIC/ixW@M9.Wd~=dϏ5gJVp0xPy6;=yZYney74 w m=!f9,s9\`p-p,;ZL.9i-kEp-hNOʩs[g1ꤨZBd0P6{vV=^ҷozӛ'xPw&?C?g)Oc4 nЃœDʐ?8k7cq4r$"P g,d ,4,W̬5f-vx@|N8lX3Z$ڮPz~X?b E2g{떅j =ý>K+ ܾh}-KSh2u#@)b.H{k )A "f] ԠP ީg-Zz0O*z:)P`PuA|t%M 0OΰDH* Y 3O~.XɄdZTI@Q4#pn@tܸFUH&k+0~D4δAg3:!'tYnG%(56L (K&s@C HT /h0 =Ն;+xNQmIZ_] Wqp:4$P)aH5zyY:24*qTʔO8I%J*LYz?d!#u[mʴa}xeo?Ut$f&}~UwF̓ OMѬ]k& pX{xHKTL1^@]{x}xk+Y{8P=y/+c#!hMuF۞܆$6v$znڶ6g@;Qm(> J7\5[ba뻾z>Vfޖu-n,+gH S0B&ȱ;,r'FD M*aM!>W1b(KÑ9A/JrbpҖyDrcf@@ u0P_2J6d -rKR6nԷO;Z젦ú@ ҞF'B5nq !A/;jˠ‷"öJyvY"c-21,х*V2ujQA)rR-Ą{yTS5Mkt)fCD^]^FH A&ͤGYD,/ 4!J CAY:S;zmSeko-@Q =}(!]>994kK,$h=A i.9,ai` T-_ZD`Q=|v$0]_8B+YR~,C/qaٍ@ vWq {tu^IH tDT)`$2Ulk[4w/h qh$ 3s &2D 3`!JeK!]Hkܮ!W-6[Ő(*0U 4G5IÉ'0weR!/pTVr.G-0@K5źHq1B]^@-EHdx^oq?=LXS_װtMeQ0M11Ƨ43ۃؤL3m@]cxq{1S`m-nrvIDNhD BvJ\YhgOuw򓺠؂ax1_$ K W%b{Bo' Sba A,,-S{Q^S7,$C‚thy!m*'mGa:- 1 0^Z*m+\#􅀡/ 8VP cA,@VRj"aL ԌBIUFf3/A4.AV$j෺ԏ4**JZ8xS؇[I^^d9VZگ 6ɧ>'^qy)J/ 5b4D~ 4̲;ٵ'LF(s{ޓZ$XMzbܟ;sGpl9y P hr܅76qUr<71.kJ=ؤVl2` /] dx!ʀ߯:̣FU9WAIX!:H=q^F/z=_f7YmwС( SO|/\i7 Hᶷ-EYOxa{E|t0֖3aIJ5z?eK~==p|$q_L4@D,/ʱgPu4I; U^:Bi֨VQV7%4[:$njBH4** q@hUlqɘ:fDC9,3Gȟb`E\:I%zo D+ hcHVJ>M* EZ ו!El{,~x5 DK Hjix (UJ,*"Wr]<櫧Z@WP½IG#Kܛ[Qq jUGvV*.m[lJ;MDMT-Ⱂ<1K<)ה9g5p\\"k{!}JJ $*+Qҫ2w2WaI#Tƴ몋_a PnǕ^Fv *|HB'%OaNˏBZJ'~%i[ =-V#r1-:M,JO8 +#2WY[P؛̴;4-0^11ZpCnx~ǫ>^ϯwMίg9M3t ʿ w`RO 4 n@f=ATGm `ɣx85M.^v}{AxAjY/F)t1=]xy?c&>sٖ*9U>jJy)gNZNZw*օQtE `2@J_r%:l Niq]yZu-Po89`g,y#f'ü5.hСC㊤T&g}6OU! v-v5XV T$Nj({{'L&L=)[:p}dL,A!X d>}ɠ=aqh U h#WdD.q+4gs5Yb10G#uo4UDx+Va=s0,!K:DHP%Y2b1{gE !kh'ܢ|BNTu>TK Ny(?1DYY?"DrFPY/H*Vj;@m>J֐I1ǡ5* h Y::Ya}." eT}6D]/64a}RO24jzFz,,D7HHM@yp r ![If JB|GQ*-g;5`]^ f-f^2Գ%Or x sqrP+\Gw>aׄ|y5FcEj-'#"rfUb>PP{I Oʖ}k W}b@p+G>N o^g+q^1q g9U[Ś,bIq>KkfM)_|6qk xK_q} (oq[Ay7~7ND$N>__SeMJ`ԛvM&={<)ѷF0EZ9%C8G]ۀxV5֓B"p4'xxU@TLG1f p/h6NX݈ B<%ZސbN(z E_|  b}ԐbHaPx[ b8%4ēJe-V.cl4) RA5F(2]i UYʢ Xϒ<65Z>7 ؅8C='退<f[i!6!d .T[UfZѺi;=)rT&)g`vރSI=.+Ew.4"ׅM\VnLG(<ȶG!#jIIBw|LWz11bF9+UXd(RZ1W$^ϯpmys߭6ySJ[ēXEgòJ3o˂[d^c HgH!ίR֠5IkU4}h{谈ld+PzO#S3q,$dN?~k*Q9t@VBKLQ85`{'k&niΧ@VjyIsumăoС;Vu>|>ɳZ*@#GJ+&7۵^Kע7 hk;偫 V2Y $Z".|0`WX޸b"mh"hico_}BiA<8Ѿ>?ёCX.$%WPsvLfHFǩ@ є 0r T/H\G X THp%YF/f`->K\߅A t =ǟSpnQohтY.M@Q/Q*)vV(W uN\PwkP9]HlT+ё ueޑY*z(RN\H tf Y)"|vzUOg/iJ#X]P>/V-Q@>ڊQ7Uzxǫܠ-3JW;>r]c]@qYD)# 6P^U)U H"z ONn >@l P@ERL@>o!]c Ka^}, 7āy晍$, iNN+]3?ّQgz(ƫ Z 6PP DҚm15K1D"t&Y3YFDy=qL^Kq⬶+.MR]o&9ts#SbD9KBA-y4[y–0J2˗I !W[,s!dlspULS۠]I&#@eV@P]bewP;LpU*.(pռ(CAtt*iIГ$Yǃ|9HaUGIFiT\Hn(X~[Ce.>Bi滧0DVm ~i<B$Mɶ[30Ӑd"'q'ARh*x'1U܅d)'|(-v qm)FVJHP[ @IDAT)J2m#E}5U]BbE [/aFCKBKFVs"vy򆜪Z(++39|P?j>w B4ʱ.rXr:TB~JӐUhkxTQ{DY1'1P{5 8I/%܉g&m2$ZOoIyRo?u5k䰲}HWh (Б<xv*oW WCOxǫ>^w0Hn%N^؃cUrhtkb/(cHa8SJ-ǃIg=='0@Ixi|9)4ȖPZ{^rbtB!qcǗa34|K^، YC;<]L%:ja}'W}U_y{{FWwjn06uW܅CXo0n7R6cS1ȸ*ЀmAlkA*|06|pfSser/1%w Lqc*PZݸ/è{2aC(좢3{ p?6\НM9d dIUW|39ʤ%|nPlGQ@Kbb" QlعT/`WL,7H,cx̓)W-}r8'BHfRdVNSQm|lH? D;/ 9V2vgDM!E(_kq-MWAdH"5Nr.*NV5aDvTWk&=z06nR̷*.e_-2]Є3_A[}W}U_ym̯roz%gi'N?2lKf<@ab(Xj!҃PBuB]j$Vȋ6â h,q+>yЏ\ZSe0 ;P0ZЀOꊽb>a|AѨ CY ;i&Y"^y*Iʻ7> Un{4l2%fe Ke]vqO+\pڅJ"32>Hj5FΟuA _uXPotCnʐ풉"%0&Me$@~[|(^5`e0%.I6'_0@ cHgj\"yO[z(y$8-]JЇ\7*Uv&sH*] PY= ^AU'C\p.J*Ē kXͫ*At 9 G)Ķ}y9.wChcD⩪E*+RR3RݑnQt8As,Uq${ 3Typ/:"K/qjAtd*Ǵu!e62 ˦T_`c@J[OՅL0_[EƲ6ctb agf%&rzBZ cL$y)jFO g8̀%> O%`YPÓ#L!Z]~aJr0H=+)$=̕qԴ"(ڒ˹>FWfLQ)"3Q N$$ rft2;dXlJ<Q2+u-ʒ/0*z;ߦ*Bg>͂)FYE.uB#T屮ǸAQ.`G [$" &,˫Re_rj.m1KkzɔK<лOI: wP|lN"Y/!K(sUnZ>+}Ts3%=L0-w(;\ߨEXZ LZߔhg ꫇l`ϸԖ5K/ZbԱxijU-c gbjx~koWKxW\\ -U&;qJuZw H k~ x( DFZޮ,^Ԑ55EIDmP2@唩 P@FiyRp)g 1(zjxބ!a3 >5duT6.H(5$ DQh@rGYmFBCT@/7pLZ/: U !X1xN*Y֦ԃi7x8?*ahX.r/ #LF` uAyYƧACJB*٪*/6zQ0FMĨ$#H{m$l]>; ?!a$RtrLA<'3|ށ+.W6d BNP)9'Z AáJ8D^v{baOuA j&l:!\ U"=MRYK'L$ F‡j$xe7!8# ǫ>^W78Yn}~]ϯOW @8=43A 3$?:m{X7Ip*8BLn`xlILݸU`%e|ɇ`!5;@FY.\!|1/ ժ4Bpa3@ʣNq(r#)",kib V*85X0/Z+q/BR RTrq`>RC RiU>y8Y<>B&^B1ug :C^yyeW-3KFQTVWzN7b+{@?O:JdUw[@ L E2 ?|0h,GR~"W535èD"#ߪDFݪ\#Q2\X}V@^gbp5+# jd͔2|5RүAYCM`Ҫ9VPM`tE>N<(ږC xX ՌxA;#?Tfێrs_l >9Q)O_%Bn5T< K!"P3)&촲%)IHQHt 8!liyW4@/OٙNzx0h\]YRb*iKUUH3_ Xolq8EY5|mf^|1 mѵi$=H7! ,Mȃk `%{EѨ-`)!LƏl?{wlIrw*3  6]FڬeR-Defy"q}>Zx.wfΞ zJ&D c›NOU2v#fZ'DdZ+\\#(A Qʦ@YsDՕ+t"6ԞvRX>c_AUw23>ѩ\՝|u&_]}ۍn8ڿE<ZO hE7w(`m pG>]L.>Oʓy{TR"D~Q:FG" d#k wUCaY%!~6vL6#JaqbhL&֔ !L ZʝDud iV˹wYDGˤyڴ $ʳ&q8.2% 0^{?H1*$]VơdAF joj{1ڞGN^FFF'q3Ȑ4D氢x.C5nRүh݀. b8^BC*hb9Y|~7`3pQi]\%YGk   "6\9Bd"2;$ ӕh>C8D [zVRojlj+xkk^] \ Z@-xO p>Dbr NDRUVX^i_ 8.U itb' 6#4(%0D[Ъt1z:.QYx? I_"X=x(PӾ'0ĄJH?;rV&'W^̗;޿3?csI7^i=&J R(Nʨ : N&FhӨ))W+̨P2ԝO~G8Kxi8JNƗSQџ3(*º4H $i`"f Ou\,2I]VQZ(ÝExgtrNE1bv@p"#Ұ!ڒk^ !(CxK@L#Zk_GkU T.ԯ~$ & TYpK?uPTU,,zEѨUfl3jrⳬ2P/1#yz]ÐPzBI1&D %2x{t>"5XUX:7JW 6k1-lѴQŽWYWwUstSCwC>v=nx|>\ؿ w p-Gk l.PUwN7$YlyxbTi WzΪjW(^ C-Nj\,a=yXG"4~?)WMs]-@ّ ,ے}}I\b?\}e$j/ͅ3ۥ zrSR/VLB)E 8$GG=G;i>ad@xϻJəfi 0Y-íIE2K~RUiƛC8hIp” Y : Z>(qfE KewVȜN%:Pfݭ0ҬڎhFݺ`0ߥf<ԄO2KR+ U>2bc(r,A෫GhbհpP(+~6L$8C$˵6ztπ4u@DI 5N*Ńj5M^/1C|!x~8%&_0/gh䪒$.-<_ǔ64=H J]/ u7264t|u;_evkzh*5&1W@Bbv-Z~xUZeW9ptVq-X{' ry qI@"y/'+~I鈹W 6 qW \C]&ƮxOR!(wVH˃$\AKx* G'f }RdA))) =( HR g^A!j$ 5ɮ -s5ĕ:4)~XOP_aG)*z;]TIkr嘶E 6AGWjCKVȨw)Mg.eeq sU8qTg]Z\Mɜ U2Qdĭ|֗[HMJG+3G<_m%4f{Tj8MsV :xr4.2^VZʏ^I.\GKZ:9# iA| OHK|RT( yJ`y!/ Id+p~IDQ=7Ohdº~~{Ŝy-,qQD&R$ L.&mCX`\RKU"w Tg((~$(cJ9*.G]LTw. У@.*@@v4-;]jFSv9FZ͐0ӾK:Htzl K:)])麖ieRmzBo,BVwlrwD}iH+BvlrMzc,Ul2,ggiI f˙8 "E]Mt@[Ԭ5XՃea,B&HuwG =9PٓjfLxT7 cܗ5kQmxY[یicޥ^O?!Y ɔA-XF%#K&ltOܪˏ 2&OTg J^+f?|1~-Ņhd_!N_40떍?C!3}(Jcu 5NFKu cdRcCO0)RaT9j0yBfEbZS飃kWeyĞWfW fH_{Ƞ-υ=IT~4an TI)x BT[-SUBleR e")5 8Aȗ0A=-W6 ,.@sZYTߤRJ rL,·nr~z@JD{XFY2dwP}:>9X=-4bCTJ;ѮކI͊`*A=,Hm .]eیƆj3 Nd w9f&=$qNI0te#|Ԫ| mc-j5t^5j1Wldndakk\ -Jb=t5WZyԂLto,;=HWk(=j?< N IWY+fkfh붧|u;_WM͖͐n7Myun'] |ΝGŃooo݅osXH6i*H}L?VZ}'?J}$$4 ~y( oz/-ɢ.6Q tPA|@Japr*Z?E3F=,d.\;x(2V+Gԧ FzBoiN6@LFR+>Z]h *ҥXa؈ H@en0mOMj0g$?-3ER9Bš&J'5<-a9UoG0<~!a06ndJ"& !yݚ"W d^ɫǰfkvu,GTUM7}ڨf ,Θ:L97 ?yw2>;צ)9j]";L_#462c%1PmYEm&"e+%HN`ҔN;_ySi;_|:ݺ坯wUT{+_kv-p-sɿŃ77_ݮ@óAPm6lo ?p7E J} Eȝ½?MZJK7mFr"n:\] ђytAC_]D p5v},)J(;hťUˡ:-~RGyz 2+= cx~l2 e:zpҲ}( zI@h##)"ZDB=eh[nb#!LO%H{pJx4;ItHXTcخ{Ѝ%^ԣr_e`XQSB3HA6bk4K"01x@'mJ":* ӷQq|ۿw?>oQ*{-aBϿ?ץq:ų,N-Nw,:},f]4)%Ԓ3 8 )Eh-ى+/|O%Ra"7A+:9lYnpEpU4PuͻfE0'I,R]te(@$1;1+  KEpSz ,EhL ?a]v%OR6W)'XQtRn(Alt[<`)4,^#"R!#MÅrTO!Ado9JR1"U{ Z;.mOB%*QB=T5j~ l#:34{V8⑶M^\#3;53!EɓbdƤ(=3<([EH\jX3{hQLtI)Ŕ̃z Pvu`bU2Ӱ5c/z9 ϥF5fYYGn=6vnHiA]\>?#nd5aMpXfua&Y؉Ԑ#@3TU]`2Tt;_cdlW׿Uï7Q~ه}[ FǃGOD=-se^tzW ^px-H{"*vށv.fދC6w?n@^lj/ ă`^9B \y]T0"⏄tN=rJ83^(!(LY4nH*,D`D6 gl(CHAҼă*k /I2sR6ӿ]TA!,Ӊh>1cNW @|EҚջ<&XgvHπU~ g8+X(գJDFdvi8YSrRx`kR8jăcTdbie>#rҞz,azQH j+>ZvK+i?g߮8k8LRSHSө?!Ӡ?*ѩp%Q f 1n`d[JaM,*RU%Fs!SZ'?˂WUgO8|FxHΙ]XS -mͩe@Ũ ј+ &2u'/RO(-h=hax.N csLՌM11겯&ivJ9OwW+!Z*ExLgbCWws;_]Wg_W^ӟtyݻ] 3@^"e9yxWs׿}Wo,) . ˊW{orqPT& _f;^.B.r{ /UL,,Wzĉ"RmN'H-dHH&?<%he#49+YNB@@X;/TL ה'fI%wyhGمWŎ!#-XLF,NP%MW@’>IY}{߃z%٣%6_~Z g1d;%UnQs3i>yKZ6~ͺ"J+&b%N~/ $lj<ˇ6{(p)&]|40 `^*l=VV>dR ~l!FmrLb~2Ӿ_߯Wws dE'7*d;_w>ԍY7[M&@Mk\iۻ?־}{3sZࣱ@y~Ok s!u9bb|Y[JOE ~ʎgzW!Bqh:X6ʼ"HZ,pY*;DC[Pn%-ٳAtVPD-!W*r T:FxA.В@jLUm䓷  JR!LRkdP^v8U)~1J7xYTT*2C#2LIف$@%omY:CeұX i[Ie<Np4.(Yf,dM,9 .U!7[1@D">~AwV`Nߪ||NT"8zi3,Qyufj\wtPQ%"@d?H/&֩6; ۽<>([NgmX1C.O۳KDRm0uZɺ%ȷ@ @eț` 8;_EAA̝Aǻi8z,҉5ۜ\a;_`&i\꟱|?7=P;۵?s t+?Wo~C9- =?8suz9Ú1+j+}u[%區XfmJw`RYg$^ 3KYbj}߯jSX9M¨>+n_zFH lpYx";>( p]RB@{-Kd,QAuxXVG29u5y֐1ؖ {f.#;^qzoz'l$O#%$&]BDme1lNIgN+ƒL[ytz"t&}tJ쾴zh7 r#)wں fhG2 p2MKgX)XLi| ZH͒381JG[޷AH{PMNP›UUg])swjfDӹ噯\oWn$sWw{Ιjd}_o~[wW7n^u |%OX"W07`;{S}x (OVueUs@wy-/'9 sU76ϥ Um -8s_!]/бI*g Qbz ի<%მ^{8c.g,^:AWd7Fǫ6C#*Ti4Xކi"@cIceGzr!%L#4]}F/p-jH>yJBƟz K֣"$sg}fxФ%J|_ȡ*}u&(qͻf\k5M*45=!d̰O;7#@̴q7cGj*ÒSXlk淽LVp$3nfnHXBӐ;xqr,BV*pv\nX܃yc}#Җ&30ru!j[Q#d%_=vt>/:Ł5|&RΔ; 挶Q%1߬wʢJBm@IDAT(UڙHϽU=rJcWwGߏؿz__G?ɏnmn7_ҝ_ŌUh;mɝu<ۻ{ɣKYO[KZܽrI uSx 8:&71P_Uge[?ƿB64hҼWl^\q@3C&{Ԧm:Db(2x0ulVrp>Br" t tƉ1OZ>-|ZfI7d[Wmү/Bx>gƃƃun 5f74aM!Bx5~HZW. Li#+SA Ek솽dU/ϖVSQ,r!65w7oWS".ڜ>70.@yғ`l[?\n+3d񧋤Чs أF`# =')DVɅ+{g0OIr,KVap`Q6?w"! U,A/)7^ w.?|!im]&v&RNW0h͉^=x\6J#+9w#Ec% *dE \%[U"ִӼߒaڼ/cZBGA4| NxS4ߚZ&Э롾R _1Lҥ H<l{9M ~ɬQS)N67|h pYʨ8cC^ g +%8r_UIG.\ $Xc~zOIYhTHyCZ.b!Iɠ%a|?<;_WwljܠĂ?f\~rݿ{+5|B/sB㜠ΩU*WMGbf܂<܉UTQQ Ҁz!Dp_ _`lD)qױ`O\v*O˔,SJJ<,UmFIǿ\eVNs؅7:i24N?s+q:>W)%NO;G~q-+Zc ~N&3i1F '>Q)?By]xߋ"\\E ~$NYSfėx0THd&R`񠶻{Fxwzټq.?wfv쾷ye5M(@C r+cUl VPy\ Odd# Ц wT'[UbdEKGo7 0PɤxumON70!XL\َc1[evw~93P2;6]  5DΝnv5pQCfy.eMlc54",I9i֗ Uc*;Eoƈc0p_DPD`$8 9p!lDyOL}hB>8GO&~^u [C0 9ׄ Vj jm0M :n;WDOyL Uϳ}$ߋc,)Iv`[7:Aj`FǪ|I㝯|q+SfP|e7x喵{\*nWϘ}w\8!x;h0xPGwFIo4P*'HBV ƲyWԛˢnU_/' Δ ,#$v1D5r$}݁`VAe(5C}XUidm1d#i: K4QQ TkLca +q<ΖWLڿOZQpf[6uZV%z9֯SOR;i'KҟXcIL8[AO2B"{,w:SGlR+;$rI^%X+2كӤ hDk*&qHjVvH]έRI Er%&~ibC+I#qF6@r͚ϰwTk@) J+0_YlmUguNog{{׌8K,H!|5u xGp9H=)NjtDox6'gOo@C@7D"&ǡ3yW zܝ|W_]v܆_]0:WC{_xx#ƃ7 ߿*-QO;_j=%+.>/@?%%d>q]J?ޡ&#qpɠĪ<NÉjj.rf4>j” BmeO5i* /B`R\rEh4Z`ːUN#FN5K{$2@)O 0(#Oiwe:$ VS䱕g5ykȘ'0&\mtڙIUہWC,xgUcP{&lC!W!! R-j b.,0;ţAj #(q҉NaTѧL[\XwpB*:ef.Jc)jKѵ2YVLϱ 6F641Ӂ%>p[Q|Zv+Gڱb Ԫ^)#. Bto4kG[ҽoHT$*Q0mJ)o'Sm9K?7fQH"C1őQU8vW:SMЫ|;_WsL׿c+w._idn׿:^0n ۂ k)92>{OvhM~:/N}`=0#(iܡVV"I{`u,>ACmƆ)9- u:=^w1Uu|ellueۘrUS6?4mvMW2v/ Op1@8\oўqWTV=;D11.k>}c]t f=،|uHkC m4I) IquUVFG, ɯמ͉@[C7[ {yUȂuS;`b>j2ц8L]RZB2Fsg- C\Jp ++Tkog8vCT+LY猭B N}ote3dNxhCpF Y)7<'ӻy"0a~`_wպO]+Ww23+r;7dyvu@UW׿rSS\j°:c .x恸ƃ/Y%VaoPs,f dP Q$V8Me8iH;^F芪!D$Zg еFzGz:Ua>waXg0X|DqM׿׿z<͔;?'W r_qy7llՂNH>_X$$Ʃ u…*2{N1Tɶ&t@"69/ebDUU-F6bfX|B?\}*C!SlHLŹ}FR,B\A`$7te)*r!6^3fƢc ͗ޟ =M{fI "PMCm nxl|\RfK ;tEueˉ-jN~Wf5WrH3K &ɡ-Zki8-c I2M04ĵ2 Σ V0Jޘki ؇J |=?\]]ʆD ^1;)*DkT,SKuVnhTY>2:^*#=r;.,s_m\p0B׿~.;_Wasfxpd=ƃ_zݿOcپqTnt `t,mWݫ~qs՗/EPB8:깃/ᴤՉFn NK%;p'ʌ %3FCLJEzLvj3~@q(z`>lCtvjŐ].='9"\%J(b{R"LI3Pg} Rј k%~MH$H4zdlV!D}B t- P\T#iw:I};E^1ƍ$jÌBIWGyA>DU:ҵWFtYB# ]D{U":+bޕ".O#G`S" m1 A,Wqk @Ř}Yxq#лAOz;-h*K23r:0/*TnT3@|mz),KNz*t9Uv0cYs2z!+j2df|vW<_)Fz+WW׿26n{BSa_ GX'_9jN 㑝? {}֚a]oхeeI3 v;Xgn/+O{7fM$/C=Q%ܷIecbʐi#LJo{@_=/$HLP#!p|Qrt>l>PSqdu9CyhNVoKBHCn'*8HKR {vׂՒ Gr̰ a%gU9nUxNj>CEz:'HÚ'9md6ĉ^ZG:%:f5s|BWw2t}CS#WMPȝ_׿N[MWɷnʨWBR,%'0SBPiF@xW2q# B')3VzgS>),;43&R6ɃxT[EɈ 2 ߿RĜ'Gxl5d1,/|aEUa'@n0'ʱ9:mF;-;v^җN!t.r$Kj.Ù4!ibxN3ɺȓuPD0̱݉[޾lDlPY5 f|t QpƒqIG1-m1 Ԏm2VQ”Y1pAIC+"='I;kV!2O0@S=BqXB6^H׾e:1肘bׅc܌}ƍ+Q-b+yR b*|u:靯ĝ|u_549^W_x0ן&(Pax0,(30EkxĊ_W~Y: Fl-*l*RLZnyWPk"MgiVv TH#!֚0'?~㟼m ?T At%*NeϣP!^="8tuF}E ,S"a7ة. 3V+T9#懆 F%8H бDW _xR$C  )*s?}=rLΗ_X.qtVM~? T>8V__.u"j1YzcsR3nJΥ&Hs,`Nq5RD#3Co?6`*:0"vhYr ֣VeDiB 4QL.6:±8,>-Ej#7jT>Mq83-U96@b8l'VJCN|pHra dҼ$϶!Κ 9Il>쫌qXo8Di򤦈RYrڷ0SY">)fdRSq2I<*bT}dԸ:J=A'"iNH$ɨS|±%_t~iNҤJF0=:[𖦕:Z۴_o\Oʭ8kr#ʳ롄NFFvǗQoV\NB25BkkZkhb-W/62Bva;_ Hw2&;_¼|+c"K0F_]W׿`nqo<(xlyA®{<ۿg_8/p.;:} %-@n){тxA/2;.]zBQp%: -t%SU?xOe*?a*UP@] AR+,m%``=yH h](Q"~ϔ@q(bhgQN8:xJEoEQ0gL)l<!"ѱ_$  JosRG=XmOm7C OKY[fUĐd0ɒJef"N$/ObMCO\S c<4 R2[a69LMR +|epN$HY6|_ RI[gV-kO+pj3SqUO.6E!QzP/&5aVŊ:O|H?YR!43.?<4Z@t"`S@1h9򬿫d&3;Ho~t8q'ljag=xUb).VR$* r;_:~u;_j}~1i|׿T^*ۼNa~ bo?= k΅rdW Js$,4ꇚ^op*.z$ț΄3 [٥{^Ig(hn?O, ||Ivp' b XHmYv&ə,^P!,"ft)( ԸMWcgM 2@{x : lE@q}3 ':ר b5I]JQI?H44tQg"ظ5Ī'dF[P"wYE4VAPFZ,1}#Fr5{gQdGi<%FB蟘"X;5 e '[D#-bG9Sx mA~hGGJ_$ z- /[E" Az 4I6#*gW zR!JlKek rTb:"Bn `s;u$8YH7+as? z櫪Ё*(5$JXDܝؐI])tp+ػUWMͦfۺpZL\rQIu-d׿Ͼ^3Fʒl3r$RI4zHxpc e\V)_'jЙL[ˀt ⅗r Ndo{͈Ƥ={"٨jušXX#ݘq,++S Z'CX,]ZӬklw<|Euja92~MjYwe;_Ar+Sů&j2usq+}W-r)wL8G ƃs"޹>}7/<*qn;󕑺lAU3{+Fd‰%J7YUjxә@zܲyO8c@BgNg N-_Vy%,iYbx:X9),T4~)=VS=i&.I5VSsf]RJ@"@HP vN ;xP)V+&@L3NxKB2 B GUMrHPj_IS 2*^"LHkǘ%L%A`ja4{#eTUԮmD`\)W5art{f}%#P[1?.Q'\R6&< ZF>|ha3*CL D־KƐUǑ8әKBJuʂ ÊQv'W m߰ݛ4OX )0:G =Q0i5^eqz}4KFWك|UЅ`IWʝ|UGrIŋPrX;\p/3΍Cs?T<ǯ^yo/ƃu/#W=/ 7w^J~2 \Ύ?_ ͅ߂j;Xh-@dOՋH>y؟84.FC Aiav#)={z"t2lf55_$.'4\ k|O$;Y5mC\9Bj11JH@x//02wc`/;&d#Jc^/IC(UpG R5q.˓@3#\G&|r0 Tx')x>yo>ʞB~Μ5C7+`[j:pJӪ;B_$Yj/[+)"YN,(dȢjN'h |n4D0`LDg R;N;}&iХ msqt44†8OvdPNU$[i2-4Dy_LJ  Kc/GMU&,U=bV23cMP*#6iemqƯjlzv =y "L޸̎d/YWk;_jc@ihmw;_nBva46p+7wuykS׿=;+Wr߮Lg`Z׿*h.Un<c<80 4axgEN,`t<7/n_5]U aCw3\-<5ϩlt(1тE{Hgy'Ia/r^(B! ]/ŰobO8IԤW`Mnlt$%|sIR n#l?U]'cY"^{UVܝ}"56#|RrZ*kp6c eKVYťFpսH{z[IX9^GƈM{3n4.3!Ai,iiLUsA8 ÔHڶ":VkcH Z1$lkzevW?l\8?p'Or"VpyIj<.-ԫ8@:V vAqt;F*W]$P4,1x8ԓpvjG~y IQ  s|3N{ sWÔBם,ͼn)3&w9UUwyҨbSM_P,t_)Iƃ7<3ux-ƃf'my%9iQJ`*CY A)ziݝw'rӨMIh)!hZIut& fy.P}nMk+l|E0\B3T2|;CBĂ> ,0@W2*D++ź,M&]8weo/?>TDFp9#!z ,ak%YtNQ[BJV硾ÄiqK+'zƋd;e,q0wl1c-S]ӳ NR FDŽ;]0H 7Ѓz-u:@}0ig{1fc =P[wTۧޙ H䔼Ć笃ͪQ3LWV-fșxbeee)҇&]VOg.Ӌk6ș6t^jkOl b>$M&=BjJj-d7Ce 1+awҍ2׹7 ;_*M6 JYdW׿K15C\r+ɍy[K~7yпzLRCǪ|6Vm6oSMݨo0ԩP4_Fn5b27/,VDGJa cE=G͝띆nU c(||j#WM#gqr+svۍÝcM$ 3  b"ahh@n(KhA( n *+]S}ޜ̛{ͬO{o<_Z=yJ{0AʨH|2A$v#=_ْ=Hs!shf I po f9ߦ^>'\]mams!$,f]fUohw3v(u:?2}k.8 #U],:ь]Qq`^kaEh!"ۚ|4@IDAT5WTGUn<꟫RU1ՔP;vEա6Cef.Qh ؈IM~GyʺUv+j .5.%\XJ RIٙ]0ꐰTלD欉za&^k/tk .(RenSK)ZXUҠ~B@ǎIjMR$O;*eS~& ]{] m_K rW~q}>X ܍ 0;U&*gIg?hppUi}ȬeuiQ-TY_wVOeOIEM.j-8-k d*~)+ zW%~m<_<_ʧ+|e"3pg۫=Hj1Xn~5[B3ܦy43?D>+:^m\šz \:{!WQj>tUN[N`]ˇ1}5{I'M,ǻ.vT$jM(SBtU#eBmGɯ^Ms_lvd[M mEeA*,bɥ#Y"+3ݍtI0zk7!dGa/ #(#95 ?g`Lƺ>xRd*W΅e؍}#vɭMݨ*T"Z kEpL!Ӯڣ*}C>pV`"/l+-$JQ͑q:מN&,bNQ=Qj-™YԦN@e0J9Ov{u:`[*ɏURMfe{pÊWM䪠_-D4ÙrVJE:ƁZ 絪d ؉uL䭏e?fЕJ{d:5\W`ôv+K_ܯJܯ0s֐UW~7xo<_ʯ<_zʏ @Ϸi Eڃ7\{І϶ڃMʯk36U3nlA26毨G,o%"  40q_/-n5aٴ;~ދ[jOc>R-;;hӉ,\FuUBD^ ʅ*Rjəi EpT+{H3si0Є|Zjߜ>i8e1*S'Q ‘e5dҧKX+uGka\h9ׄsfrd`ux@ :?/vq(BofQUTU΄Ն{j\u.ܦr&oä }/.0]E)UGW_7BOG4@>z -Oi†ǚ\qۿtV״^xou;Nh[V3=,y [KS@(z*k8y, :QZtZ05=#AjgtV*gW+WVSk!$}2/Y8k5dYRTԡsx Nܥ~z_ + #+ܯ~U |V<__9} 3l[|ŽWG]i=XOd~O}3G{!F`2lhQ|_)= 6s &'ԘMv[_`O$\~ur9RR, [k[qfaG?Z%]w2ZUr]]b5KX'XuXc-jMu"aGI2EO AE* ;uɑYf 'Jj/AQ˽y6 WL,a92)X֋ ^-\>$[׾iNCdeE'LxBa]-;(/c7WB{U\"S>/ҧܰ ]ӭk>׊!saQš伖"".Rot$ॎHH,k\UV +䒕R}篫F hV'[ YQ‚7YjIɞ2ƭoiM:.uyRjXuML bz!LgD./jVu#y-Rn:"+} ZHs93y-*uhW~VW'_]|B>nS+cW_~UW{)}l]_Nq>|(aW۲|jȸ*Wn>t=A1lޥ=h[.iʯ'ɝA?ʯ8<3U>Tag1R= vS-VYsuT! g%,OxԌ="^9QY0@>QÂ{6ۈj?6cҙC!0YLhP*Z5qOrx*.Av2.;0GERyh+>HpUF^6UA`W09-D\RsB8xIZ\.7e{Yuxx19.ԥ>U=+ֲ,xiJr5LC'KDW0ly'zX^jUZ{Y3 оJm4QbV"_}՘4YYtcO/+wRSJՉD^PL! lvRZK j~pG娒wŧ,`slr81;Πq2ZbԠnpjY}{.Ro|nL*Wy=B=ODڃAcl 3ڃ"T㿗1<<4qml޳@6Q SdQlQRmBM-`̂l<[9%eH$ /3Qm >ͺyka5k韾@Q D(@Q D(Ρ۾W j:Wx%⧔`¯* iQ2mnK?Jw,BHnaM>OtH~ƚ 3+Ւ?zá,kdFCao(@Q D(@Q D(p.c3c(~ LOW]VmWF7 8VÙ1%ksvZ e~o o Ańqv^QvIX,+_`$Ym2>ӟ<>>`v 1!v":X–vxqLJLN{I#\' d't&qR'-[Q D(@Q D(@8 bDW`W+L"Q jQ*Mvj"tMטCH +޶ Kۤ3֖#i~6? x MyFAOU% D(@Q D(@Q [ 1E\b'JʱT75_jAWRv-re6;,UƵOwRW"@HVĩ I..0DcA ykanaLeSL3p}ˡN{Q D(@Q D(@8W+:j(u{M6GklA\*mc2ZA^6l06(Tft@F`[:g8;3j825@U&Zq288*9ŌJt,Q D(@Q D(@Q| )'x.,"ߊ_ݾػm¯@evtf>x=ZѴ6q#)\xMO `pĻIH'M& \C FzrX GU %ŊMY-_Ua'7KQ D(@Q D(@8' %hIVGL`ԙG.i*MVѝFAg.cԺ^(Q]\-tCfS699lľYމo&+ije|rq] 8 *1{P3E@Q D(@Q D(DP[W(#6+Ʉ^~U1hVaܪeLa*RÖdAܘTnv4w ``֝Bٯ |<$nHm;"rhs AVN{̭F# yus唎eQ D(@Q D(@QUj _ D}WpLεV+(mW`6uA ϓZ,;MqVĎoZ*cr44#ԎthÅe=M,c$*V3 V yQ D(@Q D(@gV`s6+?Td6WW|W#'Ax2\Q:i7;-f&W(H6\sr'V_hu2T&Na-EdQ D(@Q D(@R`_NWbůd^06}8]2Ѿ=OmL<*- 8cH +24pN.ҊY+M3iH=-4W}+P~J:.KQ D(@Q D(yS3V'L:ZO;_{ԘΞ̈O(ZŴ5L+UE 6?^E@OJШۤv zE׋kGGynZY@Q D(@Q D((~ZWVɡ6+waF`L(8>*SEܣ{G0f[ W#`NfeDuQ3=.G"hu4j3i躂mxWqErc'J5 VyQ D(@Q D(@P_9:".5W;*ïC~͛_ 4YkƐkYsʥ$doexq *&x5tŌ۫!t)Hdry:(nv-[:KQ D(@Q D(@FMXk}f5`\=_aLESi7h N9C; 4s1M֘Ш~)cҪPqosg$-;2͐7Xxu8LD-D(@Q D(@Q D ) _av*E3+Z5"}B.1MvdUV?b5Pl04 !#'W+c!+lb;3vNg VF]U]i-KQ D(@Q D(@;T;UQwЏs_1fP4GMZG*k7*qnk41Hy#\ {9ƓEZvk!x@Q D(@Q D(.@wMUE}]ɯDU,r/`i(QخW9oT)BU idbUB^~Fۚi5Yފun9܂6THjvV֏7ETM(@Q D(@Q D(P tIV04qa+j_1Cn,v6Ԍ GY/ƺF,X11pwjbXv3NB2:Ю~0Cڲk)5 2*LpZ% D(@Q D(@Q X`bUXi'<0+w,rsRsϠYi]F W #"p:2cT,5b T2l;ΜS2 ݩ:<be ԋ[ql鵛C^@Q D(@Q D(]W5ad˘%2N~ac^[c;CQ/vCڎe9؝@HgcSMTa }V@]p0tFn@9ƁZoR?MgQ D(@Q D(@V0O ` 4yycZFIcj'TmźsQ{ȶqZ;Ny E쩊s$ l4 <{QܧKe+¸)GׅaK.q">5, a ߹eM(@Q D(@Q D(ԣ+1K~kgWƢiFr8\ETtMձj/EX&;w;r5dm!art]wu-Fx*gFZ"R^A5' % D(@Q D(@Q WԲ: & 2dr3rKՌ<7G^;B ;FNGX!BM|g>,G༢g=&lpb YswR@ȕ7E@Q D(@Q D(p~`_-: bEV6XR1C`FaӲszxk3!gϻiA8;*NkR1Zu&(III`p6\@Q D(@Q D(ί VtDj+">jNkBZJ#UMpt6:~ZUk2ic2ǚ9#j/RIRGzԪ v]eܪf#OHR4P(@Q D(@Q D(p>$\WaܔMI+{.UǞI$]r4YFs }c\7I?AYiPVj72Ynvt!>eU?d kʶ@Q D(@Q D(.Wڸ_$Vs0+qqcEma=z¯\F,ox[\2/AgՃUƦO=FǻP!%f Z5%V5N F*,cCd@Q D(@Q D(.@0ؕ|-;;X-rhu~e䙁o-d Tk!cQmVqF-ج!\kXqm%zk$CUo .{Y@Q D(@Q D(h¯ :5~E&^e_SV4/Hn3#Q@9i4(l$& & #pݜuՃ;EӨK1B]fmWco.8SD(ʠ2߷~-s89(@Q D(Mza~_X&>2V/Y& cGȁ*.5Q=pۑ xq(n,d(bVMִnDUU1S*SxvL'Uen4޵G"A) x eZ+a8Y]|}kk~G}w+%/y _·yyxac1@Q D(@W$/3\X1h UU4WJήW@9,.F\6@j0bm3fKjTÕ9'-ʑBr` @Bx.!=qջ vlu!e8Rp\}|[[F`oFotҥnzL<1О[ؼVww_^GtM S;|3__d~wzwڱ9/bx+^9|/ROO|z-{ٟ{\~F(@Q ܤ ۿ˿ !h᛿ʫ5<l ~{kh.74^xG]BFwwz_z7}ӳ>͐݌ȫYj<6 /#\nɯIjE+.i@9t y'kpuJxɆiTV]w#[8NsaaSR'Vyck&J_ung}g$[5V]A/B}ʧ|ʭzg=C>ؔ._BQ D(M<?ۻ{{r-HS?~-6hoI|E+>o6+`>>#_ٵqs<៩ݴTo(cM}D Zs5PU~ =HgQp¯e'2v lAδ'sRMC`!sen6"(,wZ|zV֨=p@8雝Vy3B(yW1gGW3/row +ǰ7wWOD[r"-*~?H4tkz!U?C'[؏Q D( c0Ƈ@&I| =`~~`݆{(?#e 3L:mlBFr*ƯDN@tV[WgNnB*x$N&}X+٫$#au!㯆^3za\Zv81NY@idضPt6 (OtW֖IS7wOx<%YnNMc8N8Aiؼ3W^Hrk_RB[.n x;.Ojoo7~vR0 D(@QR߀}Q'!/~ȓs+-ǣ59qF^Asnȸ{=DH{GG7Sټįa+P6~u0w|¯d0VM… ^AźӸx)4s ]X#6B OJ&Ƞ_NGvPlvgsj"E; .|t)4VOO/5ݏ|#GJVn"^cuWo?׏npzNdō/ٓ;MRKQ D(ng:΃K.be";tJ+?`ȏu:t</`!ߌYC rz2&iU LxH2*+'0m~ȯ)LVe rh5!(3I!j @m43G/IU:bD̆'b5^s"+s!??lQ D(M__-۷mi =u lO.]E$Xywy6rbmtH+k į$]kn~u0Ξ-KS]Qs*tMƜ *Y\d1Fo#" |?>:aA:w|maU{gXc*γ.#5awB/Q!l _BLfCW'(2V7{c{Nr~+aˏzԣx>]!>y OlYO 6f&8W~WeCX~b3<4 ,+v^W  0P//5^5ҍpcg bG[[3y^14|DJ@2gM o1&o&~y a$u宅ȿ2r "2Fw;] \c\ދ ӟP-GMAm*8 6Q#m/>xG.Nqi'Ȝ#8g¹$]ߕX!CDdqqrIguޗ6Yp:8I2kҸبZ1ȹ 8Y~LָeҢl@Q D(pk($]#i)в`}M23m@NyB Gcg,O<ҬC, Z4S*|++Ts LΔ<ݤ¥+q(w~w(G8{ U&`/ӕQp4]!S 4hgIg9ڌi#n3tW'+Mp-fF QR04皦GJG9*^ -D<'.vl35ϧϺ^0 0gů`1e07PZa4U&'f5B-{[v+FRr:;޽*0$%-#/c͹$7;3Xg{uHzHF9!X\ܭ6]%Mwrpyciƭ)Oy (g?gg'M/# w, I'j]5W|ɗ @"X}:],Wd ɯ&gFLu4Ns*k5E c%9o֙a8>#nOnL@39dn^PR~G~dj0.p'*q8F=y|iYc.j _]q8"_# p6n\ FA0D #r42c>>7k֟N=4.=lٛ@⫫zS|`!e320S6JNe 0֗kӱ.0ft6R#ǎJ[V0å˅7ɮ(@Q DWw @Bf h;MCo4zzh[m~0&ϮA9nqFF<*q`n#.+FnciKpXNA7"6f:xD4C*mi MEqFd@[?L&3zGFt!e\.~vs1Ch$_poy>c*f/;!iE`ST:WƮAƜZTP̒_uU~uDc+U6拑j DB0'\܂ƚ9(`9*ne3QZFQ|o6R~bH$Ns'b(7-z#N\V|0/{#݆S//O'?+d܉wSvI~BM[Wڦ6ݻA:d ;2qbAx`.Ľ[#K< 7,؆Anөpv w} |8`,ػ¨Љ&sڕ"+ip7X G1/o+#]5?Y>.?dxE"_e'm~ĤJ7}WEMmaHY)H + Hecvp.21D;s^$)Q D(@;7BGMvti,̅FǴI5}zA6XK^ҵ`{ 36{#LwW+fϱ9}t5^͖+#M {wg<1(F7.є?G_E]/\h7NX|Y"S*!\St ~EK%nQ (s*u؞. #حNl1AAh+f=b¢Re}on:-{Z.yޕ!Qq/PѶ'$%BVHe@ C1sd|S:rÔl?btkQf"[!r] >9![~{F3%Ⓒ=K1_| *zbO, C!pos 8.vCpS8 g.}/q;PZ`.gEx:~g~#K߸ko!LoK8 ;>8xŕG#->.!8`gW/Ư`Vǝ_I 3ޭFPς(Ic)K$V Y5T`;'AaOgC ~6HB*ttJrn)vrԮ]-o ~LjM__mmܭG.@_#PFoDY.߱D6JW5:?jo)S1x=l(cXֿ;MVzl^OooX ;ɸف>W2 do&| @:pmJ0jL)qk8땎}77T_oBsG~QUS__ʭyd)lrtHropwҥ0N-Gu\6,o{LJ7U}_,PY?NirQ%ݭ"|mLE>ak 6v7= .I2]\oFLv܉|@0OX滓ώN+7<0IJQ D(B?=:~k?mHi^F?Ə!nx!Ƹ6\xp`??*|Eh\|t]FZhhG tmp̳4˴Zm;{{q4y%ώrO`$FR "hӄ#^u`.+Wgv󚜣3]E>(d&5WZ1p!NKWE{ٛs__-Wp/'HUMӢί_ @u`REA>Ie:[v5r.Βh9+Kia P60H`]- x8: ^bzТ*naEI:F:W YledeZl|g d8NF]"j0pu$q|O8#+d߻F|܈AYY_`z[pM$t6a_F +1>Hyą 9\O܈;'tp2+d}|cqֿ6}3/EDxmF/o|2gM-l|&#zkcNӴ8 Z[ ip}aP}W\c/HȮ.xG?24'wėز u78c{u\Qaz=F(@Q J >QH"]pxJgHi&zJ癹y<V1;x|9QFzO`|vcG K]ll h#0 a 0{R<yƫJL"]'^i5:jC kW|fO6Xez֪ae>U`.ƋkՑ} slV`@}-׬VêqӋCG j;q5s vj,1#aelqP"~e>?=Jq+M79+!Ki>0)n7oewMC֫vV>Y8/ŗ,$ (mj;_:q+N\ӝtTn=ds;ñqU2#sns,}t3-n]jDox9 0kߘvGW7kXW@Q D(nI4L MK2r+H[NwR6Ď[ȥ[F+iڙ1Yh}FUCXNsi`A>*02*z"^ck~ MXAFov ]* 9@IDATaqfrh&tvMuj_E=Av~%7ig_VU,("/ʒ*$m5Gc5X~Y1A@Rbߪ0od-XUbcLGB(!+ rVevmkpv5Q\"P:!^?Jq )+?=Tsi5_H}O|mOqGm4On­6F(@Q \?h0r4/NxKu'-M,A{ ֳ˓6o`AL]@.JWѶ󍒇 ݡXhN{Vێp1c0L.nys:܁#]l^5hmAo$ntj]dVh岡?K }҅ht,G`t¯lW&e_+2Ee L e= 02fᴀLO hV,1HM^U}[EWV&dQY\u7UKma@cY#1_cɑe;֣Vs% :l/_&}|;ϺnO䞅tw%s6zu\tiPLZ,]EU2kL|O0[t#^o4K%K9@5q?LQdB s&'u s)D ySNQ D(@OZǗMFN+DZ &CiM!bP[9a 2 Ax<7xE|dOf܍vIdߔ1EWz& 4);\l1uv]*oхO ftbI˙6(v粨WKOWĝk6W2)+t+2D!㊤9G郻qD%TgXR+QGh=5W^tDS;@v?5g &MȆY\,mro)w#DE0<)#r^ycFѾgufzw9 8FIC}'O3 #_Fʬ|&@GPƥWWtv#zߠ/rBRv^O$N ұg=Ogbn䭗&)Otn41[/us]Em(ipq@V]<_R\6WBoN"Xȯuc3.JQwca 7g9Y.:0|b91=aԠ c TT -.Z^wfvQaEQ컫>Q;q%:hFfV ct jp*zt-diOέQTq2+=tuvPֲ{0 v᳽[m %"sWĽr(sMzWBo_ʕ:oGSobd5ҩn[#+ב>슭kxn_E+u]dLhcY~,jI9;rIQ D(@12z@a+z#Dn 3IOi5bDx^gO? >Q~;fԊk %l5rf hwEQq!@.]8_4Ȳ9V^q ~E>aKC̹8ƶ+ &~j~E+Jf;I"TG|st V;ky1[DׅN;M1Ƕ,^ G{I(FPx._'j (lc?!go`{N;]V'ܒXXyֳoo,pw|vm =ƷBO 2QSǽQt8˻yܵ&FF,4 s.śLm1Mq"eK8M 3ƭ^n1u]T&TMWp||F0ޱtG˷8brGtgzQJ!>(sAGC㴂~ O1ߩX 6 Ö| D}53|#sÈuU`Q D(  \v- Bݾ@lӜ ~y+Xl`OY4F#G_'d%uuJ]1x~m4&dem"vnlҬ7!v!);f>=߃,5p:!hb"XgIC"飉J"i뻟I#{|ɪv)'=sƔSdv͝Q8ѵA_Sˑ,bCBVYIy]h:.{苾hc̚}dG>1?\[__s=Aȶv {~f_՟y1cX81¸RiC8_ ԊV%U_7a"s_hn_xťe6JH.߂R;Qt{iY~SFAg otDrq #fұme(e?aĵSW7n^S *ӟ6ֻK"(#o|qߑvs<h=%Q D(@7&>+-~{im<.uNBL0(f<h(GN6ɟeXj`ٴ57v$^V7`j-1%40̴\1L]0b{v˝ݪN }C 莥OW<+v!?ҥnMMm_fWmhy>j~HK^F+nVNN[;OҘ ~'ME% Y2YDy/FGSʊ/EL!7TJ~IனϔJ.Y?k6kiʺe.zw>Ob#~gOd~ ~IOzW0"5l\vOǴ{a}ex #iO{W}Wq7l'qx1r%fO|bѤ-+P11F!!_9&sa1HYY-9(LMyW lszxE/< )/+5y+ ^cdeӸ<x$۔\K1(LCQ7|-{*lm (X?.NƂ]a(g:ӂ+됩Fo/{wx)o7'&hmzx {ہu*uūc`s*0e^x7td?^Y<{8"V/{a?wrSqUO >G`sN+aK s}OSDOvM`"5vU9ξci67(S8p]V҈(C{ W y8ns<7Vr@vI)7 _<*FA^!_v9硋w' c! D(@QS~ lӜXh4٥,yƦp0g<&i1NkM4H_~Ѻ0MB3#loä5/9RŎWQe¨ Й<6w:>_6՛.Lm / FUC˖af`6vkEY&04$h9 +4N+SUtrH>nH;i\&zOurU-85eo4s&:BK92pdOSS[dv56&);".Rڵ#IN*nO;Lan[]LvK];jVckn(<EkQ D(@70/kttڤq&qI[Jy`i/g[8͙R̿2W=GOWGm紈^w#im;yyic#?3!Fo5~Uqm'R1ވ,c׼/9oe=h%ƥqǠc15pw9m8 ȳR&s1vlv4czbU \P`eQ@[G_ a7٦yϛ7? D(@QPOHa~69Mӳ~)0, 83N$fWձXj+:\@|>?z+zf :Qز)쌼.nVYձCgNp 3;UفcTTȎjmI.EHWvT Ā!}Dy@^@uΰ !~7qlQ D(@`:Hŏ@'Ƨፉ.n3nLDQb@ƭ5$m JkCW(m25Iْ_UG=؜Ќ[:Cf1꒦Y T+Wt x''[;ja|4 r@{d45!?ӍtιDzgԜMtZf?@Q D(JyclT(}, ܔ^ {~qFyR?W0'o_$SůY @&~š78 EP%}086`9AmGPJ/*cԀwXwR+8q{ z }l d$N&LU7<ʪV43@s`;] L`tk(@Q D(p``2۵.YZ;;*0sھ/1o3^Awtis~ƕmW+?W¯\JgЙK&&ǹMƔ 1=yXSށ%iAYBS#A҈k3tO4AEMMWqwUyD(@Q D(@Q D*P JTn~%5̯ cb)(sߒ"gT*[sR,Cj.#\NNPS2(TY,m*ޅUzj. wlŴ'{Y@Q D(@Q D( ^L~UF~Z0J f_vZܶ':ɯgƠ?˂{(9G34J}Ө 4 GEx$* Nq^%x#Kc_xKlgέi F(@Q D(@Q D8WŽäW 5W-ޝ@{m(5MQHg=j#0Kk_؂*Ql;*7+R;ˮnEՠz_yi?^)$7$:3_[5Y@Q D(@Q D(yqڌ̯ QƯTt#*tYMg4ׂxcT6W?a*L&-=8ZXc!C7gE%'>SVſ99RU4.kЍtyUM(@Q D(@Q D( BtC`R`(̌K~5wUYQ1 [.gF`ۉK-ՋUF N#}^1sDMQh[y]L:vXI#G6EKJPbY@Q D(@Q D( LVeoR;]j~Uaͯ*A|5W0Ȝ̊6bW,;vȿd5r0#GyP33sy+~'3l̬vUeq*bsƬ ? 4M3 {P(@Q D(@Q D(p~Lw+蓬kɯ͝R 5_Q UN !{5%3 f vy9`^B7MQv`o9C,>:wa-NjőƝS;yJdQ D(@Q D(@Q`_UU1f ]V1dmʯnl0mnUa9l[pZH|2sf,rW=V,E!i'@Q D(@Q D(.@Iw2[+vW  `%" VGLUZYylYy#s53 (J-m&H? atTW*.E0G(@Q D(@Q Ds)дc ίtw~%m9L[.qlN؛Zٰ3))l»>c6.] PM|iv[ř:b6y p5xsF͉j,h6K(@Q D(@Q D(p.$LWJ~+žNGccaM%哈A=Nf}w7F.QUScߜ1#2͘ԁ5%+ ]VU}JoԤGXf-,Q D(@Q D(@Q NNY̷la4+6&DaEs~c>-;rNDkϕSVՄ S t<@g,Q D(@Q D(@Q| @Q~U}84[WR"_Ȋɴk_]w r#FpzuК;)kKIY9_OP fbfL ` l mq@tj"qJ`]ȓ% D(@Q D(@Q _+:nW' &__Q3h P9A/ pA߭ȝ_4 V=HpzL>ښ~9[@R;UW*hD1*x+X3ED(@Q D(@Q D)`v~e+0!ã]ɯgE*̪R2Gk&HZ!n$ap#ͫ){2f(;gWr)nV+HZ5rN k0.% D(@Q D(@Q \Dq|lw~%įQ_͙Ae@ mƭiG2ǾZXeʞ&(әô˰뢻~Sf6i0;mH =N)AsfQ D(@Q D(@U@dlG~MmWP"bJ&Hy~lvѼ0\NVgL4f!fLZkbN)#MX Fh*8me5%D\Ր(@Q D(@Q D(p~NWp+!Օ &eU/&z+f bqˊUh}PoFc0@>o)Xv]m#;c̸nt↖i2_a灺;*?@Q D(@Q D(Ψ@W¨W5tN6~urzs njSj9-6l^HOUBdR,(0(kBcw'EQ Wf*k-:ͬUEq0±[]'Y@Q D(@Q D(9 ,dʐ09Y+5¯ 5blU%{9dx֐ޡ@0OT[{3Y$ȍ"rxwg1V.oFґQk 6'Q^G#đy(@Q D(@Q D(ݓ~eKxT[_ @ߟ-WWzc#C( h3 &w#BB>)?; 'U msJu2Yk&ώiiz>/PV^ZXnh[55FQʢ](.mQ% D(@Q D(@Q K!cu~%vD(vW3c~ Z#C  "[,j~8]8!սrӣUV)4?ղ (nYn0s$bqݩQ% D(@Q D(@Q W9qi<_j p/y[>rrGp7 }Xdg^',݇50pD1[aMp>alz'}8g08!N?D(@Q D(@Q D(_ɼ Fu32[uR̯fwxB`!lt"nKtI mt'3s @ @b&ӌ&XOf|ف9L+pf /^GzYS6 D(@Q D(@Q ,YBlW _W=qUt $V %諔ż4 Kq4z|jLU@f.w;ès9fEXbO|"r%2H׼X:P]Z;Z4N̶'{(@Q D(@Q D(pf *ɯz C$=([ѧn'M8l5pځX t4 :nap`5HҪ"Ru,jc(U !KQ D(@Q D(.'ЖϦTv%2`U!.Ah_ɲ)U>aNUgzjCȑw$C*!]gӲﴆoR}lU@%KZ@Q D(@Q D(Υ@EYUJƶƯdRw+ԕꈰ0ll6WŪ1`j3Ĭԋ%Jy3=Ye*9̪&hHu^9fS5i~*-]6#ӑB^@Q D(@Q D(yPnX]ɯ{mn7lp7`6G?50ޤ5v/cr>hLA<jVZƿ*x@Q D(@Q D(ΧWBc1 &'+EBv<灻`l70#.lƸլ3ovc[ V.+#m̌! vmNVΪ ÞAvevnOϔQ D(@Q D(@KY)x=-W-__-|ʯ%F~ vN [Zca5\k1Λ/j}Cg_2Sh#^ɉ9h2c08,|B<G.NlYiåqY@Q D(@Q D(> U_jI_] YT^{¬&ʹL!Y6xMӘ2CH?3r ,٘,6$eLEuZ=Vzl"m5\9AiKuQpuXO(@Q D(@Q D(p~_ TI0&a{ @_*VjaW 4jD\t*-,F gxZ0;yKvUgv9 d09HdP` *ʦ >SdId,HdL"眓 =Z}9n0{f>;ۧϝZ{kc5Ć3$o9@Y 7uSʬFLM} "f\ 5NpX+`V X+`V X+m_7!_uWTjKdKp_`Y:z3jK%?3ȣMM7YԅU4pG ٍc] 7j?iՀ]6~S4aT#(|X+`V X+`V X* hlR_i9JKkrl!pʯ)6AFyLMMD/ :M|-'dUr'4S1gu9!N_Q<7uƒڄfaŇV X+`V X+`؎~%>+р`B~%xj6%4yڄĶS&"ka _3yƅ/`By1ԴerSJF{a2n}IGJU4*BZ)[4T< DCSEV X+`V X+`V`' j^arkU`*՘܂UX: +oUה#&7±-NM&넊 [XM<NDP& sT)6hpjP;8QR:wP7>V X+`V X+`ǯ@Z8ïZep+rW6_A0`\ RF2tSg6!p8udT?viRjjl7)`U݅1]ƆaCNχV X+`V X+`ضBNɯ`M`P+X~5Zl ËNF!{ c7pZ02՞Ӫߚ(L:L&pu Uu=JR&x/hMx.]i,sSh+6]`5ŭ?}4PfzV,pOqܔNp/ZCɎ: X+`V X+`V lO%/$+\sڔ_+j<[SJƬ"'XG`j}W0l>2+ƞyW;H݆ӊsru. raV X+`V X+`v@_%ڏ__js~x~Ehٚb~%/_%Rh"~5W)t Y5s(g;Snqb}mњ=LxQmujrT#=ep ִҖJ8N X+`V X+`V H_ gqWtn¯X/M|k~qr)Ӫlڷ4@wYSL-a"M M &e7AK7RQZZ[2Y#1z퉪ӡS6ZK xafZ+`V X+`V X*WQ O";~)~5QR5ȧLX1Q>Y; d*'3ȗ"RE-Z.&-,jS$G#'=!o,ˆ(Hd} )s X+`V X+`V H!1 _ڰ+QWWMۏ_N4Xo<Z'!T&7nfM9]E )%ԧVc#5 i9,[ F9#,]Nki鲰M [POmaXإUkԳV\#J_}GBV X+`+enƯ]W˨+1Q(6K[&lM˚<)2#!4^Wh,p۝j - Ǜm4j*6V37Q:ߒynyJKFɫ6S!V#Gv*c7]7Y/ڞÇ8o~sI']D˩nS0@IDATֈ0iR n oȥcKiDp5"Qކ+ {AdYno33QaoqU`zxeHYA.GDO4'x+}cƥ/}={\W/>[׾G=./xt:=gu֧>|G}.v#vN>+|^_ʷm{j_or`a{F;iE/zQZx| ]gK_nv^;V X+`V ?|;?1ԥ.Ťl[K3C_h)<%O} ڄ_d]ne 2ş fZrW ~iUtVrQH t74Bd,CUhA`s'*Կ\AIa7V"Tĵڨ[8SN?.qK<]I,v(FG*zХwݩ_?{q0Ƕ.|F,^ij͟_bހ Sl79v '. X+`V XT _w%ܓ&nc%DM:4;صkфXk"nsYme~E`i~,HW_;&F!>jlruSR\ vvS,6YV|M# TrU7gk@90+>A<~?OA]|RF8RaO"ǑMKy_^ Wع/K<-NyrwV X+`Gze7l8a: ϯp` m+J6W}Lhp67AtNiULNg Aܨjm &$&7~&mc+p.TDKC`spԃ|rWQƥ*U>կ>Ovoh8yΣ=Hqdߐ~.ɛo=t[y'WlKUr|dVfV X+`"XS]@Ӟ\07fo}[/W__җ>'#'jz;F>;#i٫ެL+K..T▷׿uzgK\/k|Ni~V X+`@*?dESg>pwݲȯVU C7(\~5B*lx**V VK~%ǺnсVoKn' 'w4Л%&H'n_| oLƃ@KQjZ03u93A5Kc4@S q*Us9S.Q9)^k]Z}]=F[q%~/nس f󏰜r }ەK; |>]:ٕK+`V X+p.T8swܹ{/ %n.jZ]~= +K-aHZ­-X(_>r㦧@kE`@|:Ւ 0KA-(_| YlMx+_bN\ 3MD׊We9|hHTE7  GWwe䇈5qւݕ+`V X+pT R/s\L/Vq@Uu*h~įKK~eD 1MxLoa$@f![IkR5ԡIPXoȹ6Z=dI%P /t (Cki Bnj,퉚`zqQOF2[sg Tvኮx+;u>4_΢|%wM{QJ>wj޽{oދ\"(ϝå7Ma?_0 PF,EYz;Mvߘ:st  ׼5ە涚\-m9Ml WJ8>_sqK7b '\Zf"KɛH>9򹠯>7e΂bحo}cy7ȿ-nqqI9ci\dV X+`6U6ט2 [e+i6)wxc:=GMfaЇ\az87ro.'Hr&lƼ2UaUvΩMfaNKgW3C5gя~iq9iW%+3={4aDP.^mNzKN-Qu`Y[̯'\_Mb]:4NU_E',&3pNu7]M>lIΨKP#gS}MS!~:m-&aJUA/l(ZJNMU#~nӶDl̙gr"~w>Ooo# w*駟~ OxB‚kfO~)G>;|ӛg?S?qKhSu<[jƧ"@F}ι-4&|0g~χx `M Y%>xÞ5AcXWgc +hr)Tk< zgy&`F7Q[Dŧ10WUq%ԪOcq*3`<(ѐgHU)=AS*bxhcUNVC|B~کtKo"#`涩 [VG29*OOIa̛-oפcY"w?1n~T[z佃z.wN% q:prXEV X+`V tG'=ookcY-5Y%5B'+H}eN,)CfHLO{1.&_̣s$J>32AZ` <;XF'8tlĜ  NwSma,ߊȴw{6|va&+Q)W6.r\}g f;:6Q CЉi~&99d0*2$cs֨n_Fiɷ1w|XNљ{CYVd=IOj3w$O;Y&q={r40>6|nmiuVtgs9y2* ._;餓Z#kNRmeW&Tva/yKZâ`,^U@5-$NU ~5L~bQ~%hHh9-pMDT#xl #tTڷʄR_AE'ϻt4B1˳N,N*t"#j8-Kg(0.Ql@xq8H(PSΫ9\ ;F%!)]&#gM^S0% GXk}@*ut/| `1Q-Z׻pkErP9/R">|S 2'f4I żgU ΐ1^dB6&hԸ&Yp  fpr|gNïM/~L~jb7}J&0ϑPXLgLx͞2hU!2O+mPx+_G?_bNI?mT8y?}譐%Wn\Rg@.7EopgϞn ? Gaa 'O[~sϪ-oT9ȋ 䦂=!l݊\+Lw^fnw;y<ɩ i-Q]5$fwo2qL>[Ak=O:αV X+`V` 0(,V"t! zL|2֘5X~/hٜ s_iQM@NY3Ӽ2ak11<QSB~LyZf}B;+֕ LLʍj&0 <#dzwlNl&הԦN:10-8yvWH8 +ï"sJ%%Ѣ"~lйk8O66!F EYll6Yä]c63XAt2iLMUVLsN}zP5?9E7 ŒU|hPkkxl8Iph}N)iF$`'puakҼ]5JM^vGL\&xӎ&|Ztd5. F6ݚuxGZiX*%=p72 B&jE0J Y = $ЊEuGM:i`|w*lģ7 eT吀`Gj}eQmզwUĖe]z@$b Z/ze<ܭk#ӫYgE&JF'V X+`n) t;jNo;0&,0U) N+kj_!eD-H dWpDVk_ 6|́6̚;nZ4\fݲ #~uQMЃLkհWh@Ԩ0Xd ѨƔ>J &8REq&W0)&Ȗ<>yMm'N,Ntg8i!9Upw%솏SLg,$I~-@V&+?¹",^mU;9dǞk/0C\&[MÛO'"C_ZB9֖o*䢞Dmp72irlwvbm[fzrgydhe֬ wϚ`G t+3 vջ몶+jV X+`V ,Q_r`ƽ̗x!nX'#⍙f2E*҄/[֬=837^R:0/rdaeNHSL Ʌt4w`Y wcqrX̧}l:9R0-\Ӏ`+ƼVD إ~ԥX M50 ~ F@GUs(%գ Vs'ť=q:Du:Qz\Ijl6’j(E!:Q/x pVfO?b0‡//2s^$h[dY%pA$)6|MD 20HQǻ+`0SUG?tf6\Z@4'1XrY]&Ԭ">g81h>³lzBk8Y޽{AKh md!=[q={neWnn5"A8Xw'Xے uu,cKW;vxSGQ'$B)x@];&CB*ⳢVx/io1wN>y_~Ǣz#</cV X+`pYmA2yLmx嬤qD5= ti''ݙ.'\ln_m&MG;߁VB0g2a>BsQYB#Y ])˻cv׾fV*ΝPs]lRG/bZn,I[|Pksewbz_Pǯap+ǖ+R:WIU#MJ#GNnڄ\<1$XX,ӆ DDAqThQ_=X7 nza1f0rYxWa8 -veQ3g>sQzԣt#D6T5ʮ*5}01&NMQaɹ*)7ή|2L-B|Lg֮S3?3.58x#gߖp3tZ=s6=@X2GpE4W6\SA&㖼 gGuTH5lyHV X+`VR5HtwU֖.c`pxleFi7&pvv>,aKf5[k@.`pjGù.PWP&q'mm>S?HMΑ鹨2~ڟ_bPzPyJ7H: :/_"lT0L Cx`8MӫUY$F'G|ԅu6uS#P͑ <|#|R"#| -Ed0DV)crI[&Y눗8s?3k)>P-}c!s'-%s #{ZnBݩ\,<.6Ĩ`+(-)\@r@KaU50T!Ox6(lcg餓N"22WWR}M,dNbѦפcwwOyS_??˿K`#O(x3u|#;UsϦ|LC2hl'!Ma>?u*ܴk;טmچfӀ[8f[޾ /S_SLWU {e/W(@C}>` @vMWUY'mξb; zyYn=ŷCeۧa7*Kna$U oV X+` tf}f`hT 4FS:I XEfp(&,DR0U)&)oy[;4%2s=qZ4 Gk^;)օ2ۉYA&OLlV(@NKѼ#Nnqǯj_[JSo4faoQg%Jp6W}#7hOe0p )c 6u Fvˣm/G<߲6i`7AMgTc"z:/mr:5`7a~ ̕V\1xFSQ#=6X _XնU_b^قCSso1}1 ǭ5S[Ņu X+`V ;`}hhj<Ֆȅ.5c&Y"u2;LpdaD*X0DI͚%y"EBUb)0L;$tV/(W0V\\.BJUFyAMȠlDj_LaTEL Ŵq}J;^x@ʟ-,}uo1u=qz9씖?j'?O!Wm|;l\6|H/S^EYf m U.OIu8xBݻ,Owt@4يg*pCQ1 x NZ 6J,"ݖpRu,|_GԳ +YYF˃;k2`a|U sU,sQr7̂S7*[Qo׫aQv+~qQ-/E w1[]ؔV X+`87(鷴y4sJX;`1rT܍qN3YJlLXt;}#~W@c7CGMpay2∙-Q0Xmw`$844XOQ9~dԌ5Mq,^ϨOX}o0+uʶ ,UFrOgR07 KF'4FA\'$G S *DLN?UwmLP 9Ө'4aw &X,(g=Y-_+q+H>|AEeCnu[Ѽ2+r: g:_YݞWiI[OY"4"]~igɢzـ4 @eʱNz~76[Q+u2`8ƜTN~rkk_ye3^H >ńv^w6\f/B/HKgih0Κ5\7PwR,Qcೊ5^>5dT8|ZPb]iӭ򘣔{SKHhob|@ ҥm2!޹۞5V X+`VP*4VAsNI3>,_Őj(S*p-KDt R3Etz ծ.ڜ_]W\ȯO(8k@/3Oi iZM{,h0ZmPIlֆyq$T悰)XoG48ZeCQx(-!c^5#t!O[Kh;Cm,3R] Zx\2N87$ݖܖ÷OK'ܴKZ(Yw_%wCr>.l X+`V XCPյ n+VR5\pVl2]e"QZXAx h.}ЄS@2'Q/w fï}( Քx[N6^ÕMoac`@rcS \;9MI[9F}Xq*L.$8UTm!@@=9CT-V  2oT>/60vxV X+`V XE cUmM"43> -WS^_A ,-X+|:€^213ĞV.S * j[3p2f\+MX. ?d|2L6.bA(taK(|4: R"dχ rG?|7iV X+`qKl ֘c/A!XHҼnX 6Ħ50RO`ï [@W2˯&Jߴ-bC|<ӚƢdzBkEc0Z_W@BU&aH8L(4*4fc2HuAAV [`#R>ikUX+`V X+`+Zuç>'p7w|vcgUviXn᠔ʧMi)f ůU8W1"tT9( rZJ.è;%o'NF(( C~pTU:Y`#@6TW %<&$)dUeESUÇO[,hT#LV X+`V X+`8/}( ϯL+VMom(~5_F[So4i;t. kWR)UNU%1: XGkSQp$i0=,bWz>Mյa]0m0;8Bj'V X+`V X+`R UpKW[+/!6 R-+A5HW쇀5/m(q5~-+&_~u쿠!PČL٩49Q JxE}PmmX᣺ &*j6W ~u)B2e4!v~V X+`V X+`V`_vįph\JZn&Z} 'ip 5u쩀1'rÊ G]0_2|'':Wwj)XNhLÎV X+`V X+`Qï:0.ՎLa[b^ё\(EfR)vAA֨*n%F*"% ~Dƥtс]b; UjMc׆+`V X+`V X+mQůrm_iK~0QM`b_s1o"c'k&YMhÚƋὦtʧyr{lD?*-a7=1Y.V X+`V X+`V`g W z ۟_ ؖ ;çM+[/ÜډSc:EB-A5Bm o])6*iRBpi+3-@/tB@sŀ(#~V X+`V X+`V 9Ք_Nii@`]~%Fb<|-@ ۹|@ۥ ~LF#up@)QqQt|X+`V X+`V X(Bi<*hV  ~%S*8IQ^:LNh>Rp tV X+`V X+`؄_+aW)oMz?n{%c5`qi FRU(Rr:6Z?-TM=@j b(Ǯ켚xQ :]NvZV X+`V X+`+ |< ~~%ﴆ_AhJl. |()ش?kR2E[zTzMFp4-xzM5 O#N@.゙)Idjq- Jw/ ./N(dSU9Rc *.QV X+`V X+`;T$_'˪D_sd2ޥ-s u M5U&6ʡMp5uXӡZ5ՂОpXȦ |Tbn7SdU8R{1HNՅ=Qcj[!oΫ֮8ΥsL&jV X+`V X+`VT #)c%:ǯp~yïF5ï,Хn&N&<'  > x-vQ\x&=BA3`˱Sy:ւ@)4Um*TG]N4( ơȝnE] X+`V X+`V UW0p[įU,gMeɯ@Xt)+M!QKqخ4yp 8:ޚPh,]DizRDy(W .b`bb)W+!>V X+`V X+`N%"1 jW?TH71EWy2SF:\N+C d9yÞ\ f7ER&@eS5m4CIMDdXR3p[!XF>V X+`V X+`X_)~@1V X+`V X+`v[\ï~(>f}jך /6ŊpjlqEMX뭯 $Ay8QD &뉽Ac/Mp#O>V X+`V X+`߳$XY_AT}?،_mM6c|/A8-?e%Uܔq_٥x Kd' 8xذAʾ. 7[W,JZڍV\֧  ƨbV X+`V X+`v@+y}+~Ok~Շ_ɭ,=ה|$&vKN&PSlȚd줪jDxLN!6E28"U-{@;jP-O`kSҾ*ȨU\}X+`V X+`V X)0Wdfk`*0| 8N|v !.Ymq5< % %<-BP[<ޢ^p#J6&*yQ9e:zk_j*U׆եqญ Fr.JPw>V X+`V X+`H  "04U? W"r42"_r&}@m7GAN'C3Ym@H#ʀUd>S5mJb[5vU{4R„~8 c9aV X+`V X+`~{NUl(_ٜ@Ѩe`"e /:{ uipJHyE!mj1HTbpK5pj-o@w2O'Ğҩꋿ假V')h& X+`V X+`V l_EJrL Am &h_(ש'~-L P\ƛRkE%5-Խv*hD q8wT X+`V X+`V l_E*ib~막udM1Bi*_q0~n l}Ӏdw@ՇiXT' PL0mu*&Ɔ0e#[\Y2?jӱE%jɨ>V X+`V X+`6H~%} P*l¯e;~H& CX0qoDt%l+Qs7.ut8QBSnr2Nt k|X+`V X+`V Xm*_a`ֆxunkj4Yx) a9YM^taNp2*FUm@.(:4Yr7u!۞F3QʈiV X+`V X+`V`' Lv.+~5LW}R~ț8TOFv] Om<ҞΛZp/:DZx`^ј@O5ΪC;7(In_;DĈhh6]MGXaV X+`V X+`;S`_ᑶ_MJ[+m 6_IhCXXV!8198õa7(y+}ևהmMJaX΍1r"E~g#_ ޤ7[$` ÀV$|X+`V X+`V Xm+0@M \_ +x :+8"Lw NQ 1ʂ\8W=Z%q A'B1 '_ nqS ZHO&:UaLMٴ_V X+`V X+`= >-W8_8[.d@+pW+i#%FCj@T { `i8d}<$6>ak &u#|T+ Q[ ekyk\Mh,\[2aV X+`V X+`vC1*!0h2~%߲*Y~Ϛ#lzf;q,ؙ L`XNAhzėYH"zp\]tP'Y?V lmV)~،V X+`V  ,W#oY_ʒKY=l9#0R5iEix)%tf8%+w1ޤlFNi07Z֓\ 6(⧖pp:SO1QUPf/?ҟ+px+snv&Wzӛz{Cr \ా X+`V X+ph( +' 6ïSTdF \le Aӆk-)xR13'E8j@:u ^z#2U 1h6ũ4pvGFjTEeEW\3-!zU[E?JȮgq>YSrK_zϞ=WU/~V8"s=QK{ ^p+]NYgO}|;G}]~| _ 0n[w%.qK]j_. C e/{ ^[꺭@06aLWWCۼ*ϥ|#޾?GuTU>|;/1+_w]Ip!?H~wz0V X+`VU;a"3Y9;'.}kH6{R~; \%8~J4MGxIMt %GM<uBj^ ֛urLSn ᱦHkC;ߎJСƜ0(Nj/7zfr)Lbہ><=7ǣQ\õGһ=r^ɼq?}^wgSxы^t{ wazH3 n}[_׭6A{>Ϸծ<9lђo|oۉ:'HCM[+_z=y&{-;q]X+`V XU`M ֡2"oD7%/Y$ȕ[* +P (PWҀSWW^+WWAFT@nu,FXTŽ`JU5t o#aA2I҆\UVuaLp t(lj q!8'ؕut@xj| =i`ߟoxS~K_Ї>r8 ΟZ0~'?ŠEx;^F85Rch7-nE2'#J 9|, a2 '[M|+Q"PN1C2֩թV-XuXD{Q@V X+`8`v$%ESg>L {%y7\P,r,W] F A_Z_)D3J!ɯY#*";a_;"(Th,ĩ^2?Կ$7=3cO  } XqD\SĽGsS* ErO>DG uV X+`X~m6uڹ O~[+S{r9L$?1bzXa(fKHZ#`?.uk^Wm0C 2?)ii۰Yp8d\s=6,cs0JFŌ,GȜŭ`H27%;tKVTb~EjS~}5JxK0bU6W5yIB!y\൅+]S?ateBd-x Cؑ-Ȩ*KҦ3 : ˼aWݨ6[<|M@t92=2{"ԧN"'*'<5ȓ"VFRXK^ZFSN9rHʓbPV=kHax~U(YH>O3r4z\mOI4y-WZLh$V)ot9u/OVy~NZ&)@f.6;9+3޻?[r|*_8[]ursoa&7kqF6ㆼ]R2>7%zCLqV X+`8ǒˬ)`m|~dM\@WӔflTbf~mo{[rWwΚO~UaQOGTi'W[AƜӤNyۭM4յX VdwOWa9j xɏJ Pm; ~'eC)ߩi?,SK9[Oaո%V'Ѽ/yox=^W͎ &S+`V X+`Pŷ)E6+35k[՘%UJ5c"BN=&5c`LFZ#q%wc*Aba=%: ;L-kYpI~ ݰߎI P"gpk)Ã0vR2~a%Q;&W-WL~9e r*Á-0H7ԂQ:U/=N XʍS30#.ecգYϪpaJl?993* H v/3ڢi9'w}TR|R">|P )wkX2/W&pX?Yd|pvL͞cXny[ζ2Ngh;G%tpnX+~z@U6㻠, g=F|!w:v[==<)""9 DFJBF@% 1od۵r10\ɷZgv\ƙO7G?bbp=R [񮾁>Мw&#-|SȯX~6iuX͢=ׂ-I+,,,,,,,,,,>@7cвdƶ] |k [o4DF$6 A.| Bq*MQEi.`g;\unȊis[_7`1\8! 't2|94x3&LӞFT*콽:49.t%>6cW%]r >tB|ATlK.]Yfޭ "˲DIlx7@x"gwt5QYHrMA@;o~N 2s-^xTrKY-6T]:9˷z$ g<uldc)t{~Ƚc|-[" /x9<7j:i kNjSyv&~pavzi &b;lr7>UI K㺶x)q)Iy 19ݭ]2 10M6`.O"<̻uAp>Ls34W>$~ejneXR_ FVVf(*my=h/ 5*6g+ >p-^(y][PNl\*+j@-V,>Ud`g q+w6ꫯ̦Id3駟b|\4DzivwAܙmotu.ad!yzBamȎDs!Vn3Ԏ6ާOZ^,ZEAΘ$|yy귵anrΧoYS7afE-LܬN3ڔSȸc:/< ֕]^sU!͕=yRg~wbp0+iӃJ6Ii.m>O#,,Ƹp, , , , , , , , ,  ":vɨ=G`6]s;>7h s %WW~>f rW@Ds<4A&uWx@T 3w0f¹f&fPS#ڸCE #B圉Nj'+Z)n3 r̮UU>eS4fa*;pzWܫX5opP֡z}̮bSTP]53[*9|<~ a2Iy)|̃7ml?64u'#l(5e[XXXXXXXXx-Gt| .lƭ4[xAM D9)b9 /%FP{2f" @7'aJ6:@xpl?Wi}w@s?vcg|b { tK|>\ޗ_ jcC&V֙ڪB/W.xh`jQ1[qijc'JL^T:ѫ!]Ӝ4 ϋC$z?iZbܴYO ު[t~+7TV =^}}Ф|_|E|0; %r|y~i p!Md>V!6'Y\ء}kӸKcuHc$ft,gӊi{!.dwWֵ^G^pMqnhS˳_?wy:H+׃'}myr\>oڝJތ@tތQ, , , , , , , , , cH-kNK]9&7̱(-6҄mh/H393Wp\D'vNڄ^X;\Wv?;|Sۨ$'BD![tO ]@/KVtT+;1J3E\q{v_+]g`n)A6^vZt-ȱo|JlIENDB`python-opentelemetry-1.39.1/.github/scripts/000077500000000000000000000000001511654350100210505ustar00rootroot00000000000000python-opentelemetry-1.39.1/.github/scripts/update-version-patch.sh000077500000000000000000000004661511654350100254570ustar00rootroot00000000000000#!/bin/bash -e sed -i "/\[stable\]/{n;s/version=.*/version=$1/}" eachdist.ini sed -i "/\[prerelease\]/{n;s/version=.*/version=$2/}" eachdist.ini ./scripts/eachdist.py update_patch_versions \ --stable_version=$1 \ --unstable_version=$2 \ --stable_version_prev=$3 \ --unstable_version_prev=$4 python-opentelemetry-1.39.1/.github/scripts/update-version.sh000077500000000000000000000003261511654350100243550ustar00rootroot00000000000000#!/bin/bash -e sed -i "/\[stable\]/{n;s/version=.*/version=$1/}" eachdist.ini sed -i "/\[prerelease\]/{n;s/version=.*/version=$2/}" eachdist.ini ./scripts/eachdist.py update_versions --versions stable,prerelease python-opentelemetry-1.39.1/.github/scripts/use-cla-approved-github-bot.sh000077500000000000000000000001561511654350100266220ustar00rootroot00000000000000#!/bin/bash -e git config user.name otelbot git config user.email 197425009+otelbot@users.noreply.github.com python-opentelemetry-1.39.1/.github/workflows/000077500000000000000000000000001511654350100214165ustar00rootroot00000000000000python-opentelemetry-1.39.1/.github/workflows/backport.yml000066400000000000000000000034321511654350100237500ustar00rootroot00000000000000name: Backport on: workflow_dispatch: inputs: number: description: "The pull request # to backport" required: true permissions: contents: read jobs: backport: runs-on: ubuntu-latest permissions: contents: write # required for pushing changes steps: - run: | if [[ ! $GITHUB_REF_NAME =~ ^release/v[0-9]+\.[0-9]+\.x-0\.[0-9]+bx$ ]]; then echo this workflow should only be run against long-term release branches exit 1 fi - uses: actions/checkout@v4 with: # history is needed to run git cherry-pick below fetch-depth: 0 - name: Use CLA approved github bot run: .github/scripts/use-cla-approved-github-bot.sh - uses: actions/create-github-app-token@df432ceedc7162793a195dd1713ff69aefc7379e # v2.0.6 id: otelbot-token with: app-id: ${{ vars.OTELBOT_APP_ID }} private-key: ${{ secrets.OTELBOT_PRIVATE_KEY }} - name: Create pull request env: NUMBER: ${{ github.event.inputs.number }} # not using secrets.GITHUB_TOKEN since pull requests from that token do not run workflows GITHUB_TOKEN: ${{ steps.otelbot-token.outputs.token }} run: | commit=$(gh pr view $NUMBER --json mergeCommit --jq .mergeCommit.oid) title=$(gh pr view $NUMBER --json title --jq .title) branch="otelbot/backport-${NUMBER}-to-${GITHUB_REF_NAME//\//-}" git cherry-pick $commit git push origin HEAD:$branch gh pr create --title "[$GITHUB_REF_NAME] $title" \ --body "Clean cherry-pick of #$NUMBER to the \`$GITHUB_REF_NAME\` branch." \ --head $branch \ --base $GITHUB_REF_NAME python-opentelemetry-1.39.1/.github/workflows/benchmarks.yml000066400000000000000000000027341511654350100242640ustar00rootroot00000000000000name: SDK Benchmark Tests on: push: branches: [ main ] permissions: contents: read jobs: sdk-benchmarks: permissions: contents: write # required for pushing to gh-pages runs-on: oracle-bare-metal-64cpu-512gb-x86-64 container: image: python:3.13-slim steps: - name: Install Git # since Git isn't available in the container image used above run: | apt-get update apt-get install -y git - name: Make repo safe for Git inside container run: git config --global --add safe.directory "$GITHUB_WORKSPACE" - name: Checkout Core Repo @ SHA - ${{ github.sha }} uses: actions/checkout@v4 - name: Install tox run: pip install tox-uv - name: Run tox run: tox -e benchmark-opentelemetry-sdk -- -k opentelemetry-sdk/benchmarks --benchmark-json=opentelemetry-sdk/output.json - name: Report on SDK benchmark results uses: benchmark-action/github-action-benchmark@v1 with: name: OpenTelemetry Python SDK Benchmarks tool: pytest output-file-path: opentelemetry-sdk/output.json gh-pages-branch: gh-pages github-token: ${{ secrets.GITHUB_TOKEN }} # Make a commit on `gh-pages` with benchmarks from previous step benchmark-data-dir-path: "benchmarks" auto-push: true max-items-in-chart: 100 # Alert with a commit comment on possible performance regression alert-threshold: '200%' comment-on-alert: true python-opentelemetry-1.39.1/.github/workflows/changelog.yml000066400000000000000000000023231511654350100240700ustar00rootroot00000000000000# This action requires that any PR targeting the main branch should touch at # least one CHANGELOG file. If a CHANGELOG entry is not required, add the "Skip # Changelog" label to disable this action. name: changelog on: pull_request: types: [opened, synchronize, reopened, labeled, unlabeled] branches: - main permissions: contents: read jobs: changelog: runs-on: ubuntu-latest if: | !contains(github.event.pull_request.labels.*.name, 'Skip Changelog') && github.actor != 'otelbot[bot]' steps: - uses: actions/checkout@v4 - name: Check for CHANGELOG changes run: | # Only the latest commit of the feature branch is available # automatically. To diff with the base branch, we need to # fetch that too (and we only need its latest commit). git fetch origin ${{ github.base_ref }} --depth=1 if [[ $(git diff --name-only FETCH_HEAD | grep CHANGELOG) ]] then echo "A CHANGELOG was modified. Looks good!" else echo "No CHANGELOG was modified." echo "Please add a CHANGELOG entry, or add the \"Skip Changelog\" label if not required." false fi python-opentelemetry-1.39.1/.github/workflows/check-links.yml000066400000000000000000000025741511654350100243440ustar00rootroot00000000000000name: check-links on: push: branches: [ main ] pull_request: permissions: contents: read jobs: changedfiles: name: changed files runs-on: ubuntu-latest if: ${{ github.actor != 'dependabot[bot]' }} outputs: md: ${{ steps.changes.outputs.md }} steps: - name: Checkout Repo uses: actions/checkout@v4 with: fetch-depth: 0 - name: Get changed files id: changes run: | echo "md=$(git diff --name-only --diff-filter=ACMRTUXB $(git merge-base origin/main ${{ github.event.pull_request.head.sha }}) ${{ github.event.pull_request.head.sha }} | grep .md$ | xargs)" >> $GITHUB_OUTPUT check-links: runs-on: ubuntu-latest needs: changedfiles if: | github.event.pull_request.user.login != 'otelbot[bot]' && github.event_name == 'pull_request' && ${{needs.changedfiles.outputs.md}} steps: - name: Checkout Repo uses: actions/checkout@v4 with: fetch-depth: 0 - name: Install markdown-link-check run: npm install -g markdown-link-check@v3.12.2 - name: Run markdown-link-check run: | markdown-link-check \ --verbose \ --config .github/workflows/check_links_config.json \ ${{needs.changedfiles.outputs.md}} \ || { echo "Check that anchor links are lowercase"; exit 1; } python-opentelemetry-1.39.1/.github/workflows/check_links_config.json000066400000000000000000000004401511654350100261110ustar00rootroot00000000000000{ "ignorePatterns": [ { "pattern": "http(s)?://\\d+\\.\\d+\\.\\d+\\.\\d+" }, { "pattern": "http(s)?://localhost" }, { "pattern": "http(s)?://example.com" } ], "aliveStatusCodes": [429, 200] } python-opentelemetry-1.39.1/.github/workflows/codeql-analysis.yml000066400000000000000000000023351511654350100252340ustar00rootroot00000000000000name: CodeQL Analysis on: workflow_dispatch: schedule: # ┌───────────── minute (0 - 59) # │ ┌───────────── hour (0 - 23) # │ │ ┌───────────── day of the month (1 - 31) # │ │ │ ┌───────────── month (1 - 12 or JAN-DEC) # │ │ │ │ ┌───────────── day of the week (0 - 6 or SUN-SAT) # │ │ │ │ │ # │ │ │ │ │ # │ │ │ │ │ # * * * * * - cron: '30 1 * * *' permissions: contents: read jobs: CodeQL-Build: permissions: security-events: write # for github/codeql-action/analyze to upload SARIF results runs-on: ubuntu-latest steps: - name: Checkout repository uses: actions/checkout@v4 # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL uses: github/codeql-action/init@v3 with: languages: python - name: Autobuild uses: github/codeql-action/autobuild@v3 - name: Perform CodeQL Analysis uses: github/codeql-action/analyze@v3 python-opentelemetry-1.39.1/.github/workflows/contrib.yml000066400000000000000000000013761511654350100236100ustar00rootroot00000000000000name: Core Contrib Test on: push: branches-ignore: - 'release/*' - 'otelbot/*' pull_request: permissions: contents: read concurrency: group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} cancel-in-progress: true jobs: contrib_0: uses: open-telemetry/opentelemetry-python-contrib/.github/workflows/core_contrib_test_0.yml@main with: CORE_REPO_SHA: ${{ github.sha }} CONTRIB_REPO_SHA: ${{ github.event_name == 'pull_request' && ( contains(github.event.pull_request.labels.*.name, 'prepare-release') && github.event.pull_request.head.ref || contains(github.event.pull_request.labels.*.name, 'backport') && github.event.pull_request.base.ref || 'main' ) || 'main' }} python-opentelemetry-1.39.1/.github/workflows/fossa.yml000066400000000000000000000006221511654350100232540ustar00rootroot00000000000000name: FOSSA scanning on: push: branches: - main permissions: contents: read jobs: fossa: runs-on: ubuntu-latest steps: - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - uses: fossas/fossa-action@3ebcea1862c6ffbd5cf1b4d0bd6b3fe7bd6f2cac # v1.7.0 with: api-key: ${{secrets.FOSSA_API_KEY}} team: OpenTelemetry python-opentelemetry-1.39.1/.github/workflows/generate_workflows.py000066400000000000000000000130671511654350100257060ustar00rootroot00000000000000from collections import defaultdict from pathlib import Path from re import compile as re_compile from jinja2 import Environment, FileSystemLoader from tox.config.cli.parse import get_options from tox.config.sets import CoreConfigSet from tox.config.source.tox_ini import ToxIni from tox.session.state import State _tox_test_env_regex = re_compile( r"(?Ppy\w+)-test-" r"(?P[-\w]+\w)-?(?P\d+)?" ) _tox_lint_env_regex = re_compile(r"lint-(?P[-\w]+)") _tox_contrib_env_regex = re_compile( r"py39-test-(?P[-\w]+\w)-?(?P\d+)?" ) def get_tox_envs(tox_ini_path: Path) -> list: tox_ini = ToxIni(tox_ini_path) conf = State(get_options(), []).conf tox_section = next(tox_ini.sections()) core_config_set = CoreConfigSet( conf, tox_section, tox_ini_path.parent, tox_ini_path ) ( core_config_set.loaders.extend( tox_ini.get_loaders( tox_section, base=[], override_map=defaultdict(list, {}), conf=core_config_set, ) ) ) return core_config_set.load("env_list") def get_test_job_datas(tox_envs: list, operating_systems: list) -> list: os_alias = {"ubuntu-latest": "Ubuntu", "windows-latest": "Windows"} python_version_alias = { "pypy3": "pypy-3.9", "py39": "3.9", "py310": "3.10", "py311": "3.11", "py312": "3.12", "py313": "3.13", } test_job_datas = [] for operating_system in operating_systems: for tox_env in tox_envs: tox_test_env_match = _tox_test_env_regex.match(tox_env) if tox_test_env_match is None: continue groups = tox_test_env_match.groupdict() aliased_python_version = python_version_alias[ groups["python_version"] ] tox_env = tox_test_env_match.string test_requirements = groups["test_requirements"] if test_requirements is None: test_requirements = " " else: test_requirements = f"-{test_requirements} " test_job_datas.append( { "name": f"{tox_env}_{operating_system}", "ui_name": ( f"{groups['name']}" f"{test_requirements}" f"{aliased_python_version} " f"{os_alias[operating_system]}" ), "python_version": aliased_python_version, "tox_env": tox_env, "os": operating_system, } ) return test_job_datas def get_lint_job_datas(tox_envs: list) -> list: lint_job_datas = [] for tox_env in tox_envs: tox_lint_env_match = _tox_lint_env_regex.match(tox_env) if tox_lint_env_match is None: continue tox_env = tox_lint_env_match.string lint_job_datas.append( { "name": f"{tox_env}", "ui_name": f"{tox_lint_env_match.groupdict()['name']}", "tox_env": tox_env, } ) return lint_job_datas def get_misc_job_datas(tox_envs: list) -> list: regex_patterns = [ _tox_test_env_regex, _tox_lint_env_regex, _tox_contrib_env_regex, re_compile(r"benchmark.+"), ] return [ tox_env for tox_env in tox_envs if not any(pattern.match(tox_env) for pattern in regex_patterns) ] def _generate_workflow( job_datas: list, template_name: str, output_dir: Path, max_jobs: int = 250, ): # Github seems to limit the amount of jobs in a workflow file, that is why # they are split in groups of 250 per workflow file. for file_number, job_datas in enumerate( [ job_datas[index : index + max_jobs] for index in range(0, len(job_datas), max_jobs) ] ): with open( output_dir.joinpath(f"{template_name}_{file_number}.yml"), "w" ) as test_yml_file: test_yml_file.write( Environment( loader=FileSystemLoader( Path(__file__).parent.joinpath("templates") ) ) .get_template(f"{template_name}.yml.j2") .render(job_datas=job_datas, file_number=file_number) ) test_yml_file.write("\n") def generate_test_workflow( tox_ini_path: Path, workflow_directory_path: Path, operating_systems ) -> None: _generate_workflow( get_test_job_datas(get_tox_envs(tox_ini_path), operating_systems), "test", workflow_directory_path, ) def generate_lint_workflow( tox_ini_path: Path, workflow_directory_path: Path, ) -> None: _generate_workflow( get_lint_job_datas(get_tox_envs(tox_ini_path)), "lint", workflow_directory_path, ) def generate_misc_workflow( tox_ini_path: Path, workflow_directory_path: Path, ) -> None: _generate_workflow( get_misc_job_datas(get_tox_envs(tox_ini_path)), "misc", workflow_directory_path, ) if __name__ == "__main__": tox_ini_path = Path(__file__).parent.parent.parent.joinpath("tox.ini") output_dir = Path(__file__).parent generate_test_workflow( tox_ini_path, output_dir, ["ubuntu-latest", "windows-latest"] ) generate_lint_workflow(tox_ini_path, output_dir) generate_misc_workflow(tox_ini_path, output_dir) python-opentelemetry-1.39.1/.github/workflows/lint_0.yml000066400000000000000000000243641511654350100233370ustar00rootroot00000000000000# Do not edit this file. # This file is generated automatically by executing tox -e generate-workflows name: Lint 0 on: push: branches-ignore: - 'release/*' - 'otelbot/*' pull_request: permissions: contents: read concurrency: group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} cancel-in-progress: true env: CORE_REPO_SHA: main # Set the SHA to the branch name if the PR has a label 'prepare-release' or 'backport' otherwise, set it to 'main' # For PRs you can change the inner fallback ('main') # For pushes you change the outer fallback ('main') # The logic below is used during releases and depends on having an equivalent branch name in the contrib repo. CONTRIB_REPO_SHA: ${{ github.event_name == 'pull_request' && ( contains(github.event.pull_request.labels.*.name, 'prepare-release') && github.event.pull_request.head.ref || contains(github.event.pull_request.labels.*.name, 'backport') && github.event.pull_request.base.ref || 'main' ) || 'main' }} PIP_EXISTS_ACTION: w jobs: lint-opentelemetry-api: name: opentelemetry-api runs-on: ubuntu-latest timeout-minutes: 30 steps: - name: Checkout repo @ SHA - ${{ github.sha }} uses: actions/checkout@v4 - name: Set up Python 3.13 uses: actions/setup-python@v5 with: python-version: "3.13" - name: Install tox run: pip install tox-uv - name: Run tests run: tox -e lint-opentelemetry-api lint-opentelemetry-proto-gen-latest: name: opentelemetry-proto-gen-latest runs-on: ubuntu-latest timeout-minutes: 30 steps: - name: Checkout repo @ SHA - ${{ github.sha }} uses: actions/checkout@v4 - name: Set up Python 3.13 uses: actions/setup-python@v5 with: python-version: "3.13" - name: Install tox run: pip install tox-uv - name: Run tests run: tox -e lint-opentelemetry-proto-gen-latest lint-opentelemetry-sdk: name: opentelemetry-sdk runs-on: ubuntu-latest timeout-minutes: 30 steps: - name: Checkout repo @ SHA - ${{ github.sha }} uses: actions/checkout@v4 - name: Set up Python 3.13 uses: actions/setup-python@v5 with: python-version: "3.13" - name: Install tox run: pip install tox-uv - name: Run tests run: tox -e lint-opentelemetry-sdk lint-opentelemetry-semantic-conventions: name: opentelemetry-semantic-conventions runs-on: ubuntu-latest timeout-minutes: 30 steps: - name: Checkout repo @ SHA - ${{ github.sha }} uses: actions/checkout@v4 - name: Set up Python 3.13 uses: actions/setup-python@v5 with: python-version: "3.13" - name: Install tox run: pip install tox-uv - name: Run tests run: tox -e lint-opentelemetry-semantic-conventions lint-opentelemetry-getting-started: name: opentelemetry-getting-started runs-on: ubuntu-latest timeout-minutes: 30 steps: - name: Checkout repo @ SHA - ${{ github.sha }} uses: actions/checkout@v4 - name: Set up Python 3.13 uses: actions/setup-python@v5 with: python-version: "3.13" - name: Install tox run: pip install tox-uv - name: Run tests run: tox -e lint-opentelemetry-getting-started lint-opentelemetry-opentracing-shim: name: opentelemetry-opentracing-shim runs-on: ubuntu-latest timeout-minutes: 30 steps: - name: Checkout repo @ SHA - ${{ github.sha }} uses: actions/checkout@v4 - name: Set up Python 3.13 uses: actions/setup-python@v5 with: python-version: "3.13" - name: Install tox run: pip install tox-uv - name: Run tests run: tox -e lint-opentelemetry-opentracing-shim lint-opentelemetry-opencensus-shim: name: opentelemetry-opencensus-shim runs-on: ubuntu-latest timeout-minutes: 30 steps: - name: Checkout repo @ SHA - ${{ github.sha }} uses: actions/checkout@v4 - name: Set up Python 3.13 uses: actions/setup-python@v5 with: python-version: "3.13" - name: Install tox run: pip install tox-uv - name: Run tests run: tox -e lint-opentelemetry-opencensus-shim lint-opentelemetry-exporter-opencensus: name: opentelemetry-exporter-opencensus runs-on: ubuntu-latest timeout-minutes: 30 steps: - name: Checkout repo @ SHA - ${{ github.sha }} uses: actions/checkout@v4 - name: Set up Python 3.13 uses: actions/setup-python@v5 with: python-version: "3.13" - name: Install tox run: pip install tox-uv - name: Run tests run: tox -e lint-opentelemetry-exporter-opencensus lint-opentelemetry-exporter-otlp-proto-common: name: opentelemetry-exporter-otlp-proto-common runs-on: ubuntu-latest timeout-minutes: 30 steps: - name: Checkout repo @ SHA - ${{ github.sha }} uses: actions/checkout@v4 - name: Set up Python 3.13 uses: actions/setup-python@v5 with: python-version: "3.13" - name: Install tox run: pip install tox-uv - name: Run tests run: tox -e lint-opentelemetry-exporter-otlp-proto-common lint-opentelemetry-exporter-otlp-combined: name: opentelemetry-exporter-otlp-combined runs-on: ubuntu-latest timeout-minutes: 30 steps: - name: Checkout repo @ SHA - ${{ github.sha }} uses: actions/checkout@v4 - name: Set up Python 3.13 uses: actions/setup-python@v5 with: python-version: "3.13" - name: Install tox run: pip install tox-uv - name: Run tests run: tox -e lint-opentelemetry-exporter-otlp-combined lint-opentelemetry-exporter-otlp-proto-grpc-latest: name: opentelemetry-exporter-otlp-proto-grpc-latest runs-on: ubuntu-latest timeout-minutes: 30 steps: - name: Checkout repo @ SHA - ${{ github.sha }} uses: actions/checkout@v4 - name: Set up Python 3.13 uses: actions/setup-python@v5 with: python-version: "3.13" - name: Install tox run: pip install tox-uv - name: Run tests run: tox -e lint-opentelemetry-exporter-otlp-proto-grpc-latest lint-opentelemetry-exporter-otlp-proto-http: name: opentelemetry-exporter-otlp-proto-http runs-on: ubuntu-latest timeout-minutes: 30 steps: - name: Checkout repo @ SHA - ${{ github.sha }} uses: actions/checkout@v4 - name: Set up Python 3.13 uses: actions/setup-python@v5 with: python-version: "3.13" - name: Install tox run: pip install tox-uv - name: Run tests run: tox -e lint-opentelemetry-exporter-otlp-proto-http lint-opentelemetry-exporter-prometheus: name: opentelemetry-exporter-prometheus runs-on: ubuntu-latest timeout-minutes: 30 steps: - name: Checkout repo @ SHA - ${{ github.sha }} uses: actions/checkout@v4 - name: Set up Python 3.13 uses: actions/setup-python@v5 with: python-version: "3.13" - name: Install tox run: pip install tox-uv - name: Run tests run: tox -e lint-opentelemetry-exporter-prometheus lint-opentelemetry-exporter-zipkin-combined: name: opentelemetry-exporter-zipkin-combined runs-on: ubuntu-latest timeout-minutes: 30 steps: - name: Checkout repo @ SHA - ${{ github.sha }} uses: actions/checkout@v4 - name: Set up Python 3.13 uses: actions/setup-python@v5 with: python-version: "3.13" - name: Install tox run: pip install tox-uv - name: Run tests run: tox -e lint-opentelemetry-exporter-zipkin-combined lint-opentelemetry-exporter-zipkin-proto-http: name: opentelemetry-exporter-zipkin-proto-http runs-on: ubuntu-latest timeout-minutes: 30 steps: - name: Checkout repo @ SHA - ${{ github.sha }} uses: actions/checkout@v4 - name: Set up Python 3.13 uses: actions/setup-python@v5 with: python-version: "3.13" - name: Install tox run: pip install tox-uv - name: Run tests run: tox -e lint-opentelemetry-exporter-zipkin-proto-http lint-opentelemetry-exporter-zipkin-json: name: opentelemetry-exporter-zipkin-json runs-on: ubuntu-latest timeout-minutes: 30 steps: - name: Checkout repo @ SHA - ${{ github.sha }} uses: actions/checkout@v4 - name: Set up Python 3.13 uses: actions/setup-python@v5 with: python-version: "3.13" - name: Install tox run: pip install tox-uv - name: Run tests run: tox -e lint-opentelemetry-exporter-zipkin-json lint-opentelemetry-propagator-b3: name: opentelemetry-propagator-b3 runs-on: ubuntu-latest timeout-minutes: 30 steps: - name: Checkout repo @ SHA - ${{ github.sha }} uses: actions/checkout@v4 - name: Set up Python 3.13 uses: actions/setup-python@v5 with: python-version: "3.13" - name: Install tox run: pip install tox-uv - name: Run tests run: tox -e lint-opentelemetry-propagator-b3 lint-opentelemetry-propagator-jaeger: name: opentelemetry-propagator-jaeger runs-on: ubuntu-latest timeout-minutes: 30 steps: - name: Checkout repo @ SHA - ${{ github.sha }} uses: actions/checkout@v4 - name: Set up Python 3.13 uses: actions/setup-python@v5 with: python-version: "3.13" - name: Install tox run: pip install tox-uv - name: Run tests run: tox -e lint-opentelemetry-propagator-jaeger lint-opentelemetry-test-utils: name: opentelemetry-test-utils runs-on: ubuntu-latest timeout-minutes: 30 steps: - name: Checkout repo @ SHA - ${{ github.sha }} uses: actions/checkout@v4 - name: Set up Python 3.13 uses: actions/setup-python@v5 with: python-version: "3.13" - name: Install tox run: pip install tox-uv - name: Run tests run: tox -e lint-opentelemetry-test-utils python-opentelemetry-1.39.1/.github/workflows/misc_0.yml000066400000000000000000000143331511654350100233170ustar00rootroot00000000000000# Do not edit this file. # This file is generated automatically by executing tox -e generate-workflows name: Misc 0 on: push: branches-ignore: - 'release/*' - 'otelbot/*' pull_request: permissions: contents: read concurrency: group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} cancel-in-progress: true env: CORE_REPO_SHA: main # Set the SHA to the branch name if the PR has a label 'prepare-release' or 'backport' otherwise, set it to 'main' # For PRs you can change the inner fallback ('main') # For pushes you change the outer fallback ('main') # The logic below is used during releases and depends on having an equivalent branch name in the contrib repo. CONTRIB_REPO_SHA: ${{ github.event_name == 'pull_request' && ( contains(github.event.pull_request.labels.*.name, 'prepare-release') && github.event.pull_request.head.ref || contains(github.event.pull_request.labels.*.name, 'backport') && github.event.pull_request.base.ref || 'main' ) || 'main' }} PIP_EXISTS_ACTION: w jobs: spellcheck: name: spellcheck runs-on: ubuntu-latest timeout-minutes: 30 steps: - name: Checkout repo @ SHA - ${{ github.sha }} uses: actions/checkout@v4 - name: Set up Python 3.11 uses: actions/setup-python@v5 with: python-version: "3.11" - name: Install tox run: pip install tox-uv - name: Run tests run: tox -e spellcheck tracecontext: name: tracecontext runs-on: ubuntu-latest timeout-minutes: 30 steps: - name: Checkout repo @ SHA - ${{ github.sha }} uses: actions/checkout@v4 - name: Set up Python 3.11 uses: actions/setup-python@v5 with: python-version: "3.11" - name: Install tox run: pip install tox-uv - name: Run tests run: tox -e tracecontext typecheck: name: typecheck runs-on: ubuntu-latest timeout-minutes: 30 steps: - name: Checkout repo @ SHA - ${{ github.sha }} uses: actions/checkout@v4 - name: Set up Python 3.11 uses: actions/setup-python@v5 with: python-version: "3.11" - name: Install tox run: pip install tox-uv - name: Run tests run: tox -e typecheck docs: name: docs runs-on: ubuntu-latest timeout-minutes: 30 if: | github.event.pull_request.user.login != 'otelbot[bot]' && github.event_name == 'pull_request' steps: - name: Checkout repo @ SHA - ${{ github.sha }} uses: actions/checkout@v4 - name: Set up Python 3.11 uses: actions/setup-python@v5 with: python-version: "3.11" - name: Install tox run: pip install tox-uv - name: Run tests run: tox -e docs docker-tests-otlpexporter: name: docker-tests-otlpexporter runs-on: ubuntu-latest timeout-minutes: 30 steps: - name: Checkout repo @ SHA - ${{ github.sha }} uses: actions/checkout@v4 - name: Set up Python 3.11 uses: actions/setup-python@v5 with: python-version: "3.11" - name: Install tox run: pip install tox-uv - name: Run tests run: tox -e docker-tests-otlpexporter docker-tests-opencensus: name: docker-tests-opencensus runs-on: ubuntu-latest timeout-minutes: 30 steps: - name: Checkout repo @ SHA - ${{ github.sha }} uses: actions/checkout@v4 - name: Set up Python 3.11 uses: actions/setup-python@v5 with: python-version: "3.11" - name: Install tox run: pip install tox-uv - name: Run tests run: tox -e docker-tests-opencensus public-symbols-check: name: public-symbols-check runs-on: ubuntu-latest timeout-minutes: 30 if: | !contains(github.event.pull_request.labels.*.name, 'Approve Public API check') && github.actor != 'otelbot[bot]' && github.event_name == 'pull_request' steps: - name: Checkout repo @ SHA - ${{ github.sha }} uses: actions/checkout@v4 with: fetch-depth: 0 - name: Checkout main run: git checkout main - name: Pull origin run: git pull --rebase=false origin main - name: Checkout pull request run: git checkout ${{ github.event.pull_request.head.sha }} - name: Set up Python 3.11 uses: actions/setup-python@v5 with: python-version: "3.11" - name: Install tox run: pip install tox-uv - name: Run tests run: tox -e public-symbols-check shellcheck: name: shellcheck runs-on: ubuntu-latest timeout-minutes: 30 steps: - name: Checkout repo @ SHA - ${{ github.sha }} uses: actions/checkout@v4 - name: Set up Python 3.11 uses: actions/setup-python@v5 with: python-version: "3.11" - name: Install tox run: pip install tox-uv - name: Run tests run: tox -e shellcheck generate-workflows: name: generate-workflows runs-on: ubuntu-latest timeout-minutes: 30 if: | !contains(github.event.pull_request.labels.*.name, 'Skip generate-workflows') && github.event.pull_request.user.login != 'otelbot[bot]' && github.event_name == 'pull_request' steps: - name: Checkout repo @ SHA - ${{ github.sha }} uses: actions/checkout@v4 - name: Set up Python 3.11 uses: actions/setup-python@v5 with: python-version: "3.11" - name: Install tox run: pip install tox-uv - name: Run tests run: tox -e generate-workflows - name: Check github workflows are up to date run: git diff --exit-code || (echo 'Generated workflows are out of date, run "tox -e generate-workflows" and commit the changes in this PR.' && exit 1) precommit: name: precommit runs-on: ubuntu-latest timeout-minutes: 30 steps: - name: Checkout repo @ SHA - ${{ github.sha }} uses: actions/checkout@v4 - name: Set up Python 3.11 uses: actions/setup-python@v5 with: python-version: "3.11" - name: Install tox run: pip install tox-uv - name: Run tests run: tox -e precommit python-opentelemetry-1.39.1/.github/workflows/ossf-scorecard.yml000066400000000000000000000030201511654350100250510ustar00rootroot00000000000000name: OSSF Scorecard on: push: branches: - main schedule: - cron: "16 11 * * 4" # once a week workflow_dispatch: permissions: read-all jobs: analysis: runs-on: ubuntu-latest permissions: # Needed for Code scanning upload security-events: write # Needed for GitHub OIDC token if publish_results is true id-token: write steps: - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 with: persist-credentials: false - uses: ossf/scorecard-action@05b42c624433fc40578a4040d5cf5e36ddca8cde # v2.4.2 with: results_file: results.sarif results_format: sarif publish_results: true # Upload the results as artifacts (optional). Commenting out will disable # uploads of run results in SARIF format to the repository Actions tab. # https://docs.github.com/en/actions/advanced-guides/storing-workflow-data-as-artifacts - name: "Upload artifact" uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2 with: name: SARIF file path: results.sarif retention-days: 5 # Upload the results to GitHub's code scanning dashboard (optional). # Commenting out will disable upload of results to your repo's Code Scanning dashboard - name: "Upload to code-scanning" uses: github/codeql-action/upload-sarif@5f8171a638ada777af81d42b55959a643bb29017 # v3.28.12 with: sarif_file: results.sarif python-opentelemetry-1.39.1/.github/workflows/prepare-patch-release.yml000066400000000000000000000074001511654350100263130ustar00rootroot00000000000000name: Prepare patch release on: workflow_dispatch: permissions: contents: read jobs: prepare-patch-release: permissions: contents: write # required for pushing changes pull-requests: write # required for adding labels to PRs runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 - name: Install toml run: pip install toml - run: | if [[ ! $GITHUB_REF_NAME =~ ^release/v[0-9]+\.[0-9]+\.x-0\.[0-9]+bx$ ]]; then echo this workflow should only be run against long-term release branches exit 1 fi if ! grep --quiet "^## Unreleased$" CHANGELOG.md; then echo the change log is missing an \"Unreleased\" section exit 1 fi - name: Set environment variables run: | stable_version=$(./scripts/eachdist.py version --mode stable) unstable_version=$(./scripts/eachdist.py version --mode prerelease) if [[ $stable_version =~ ^([0-9]+\.[0-9]+)\.([0-9]+)$ ]]; then stable_major_minor="${BASH_REMATCH[1]}" stable_patch="${BASH_REMATCH[2]}" else echo "unexpected stable_version: $stable_version" exit 1 fi if [[ $unstable_version =~ ^0\.([0-9]+)b([0-9]+)$ ]]; then unstable_minor="${BASH_REMATCH[1]}" unstable_patch="${BASH_REMATCH[2]}" else echo "unexpected unstable_version: $unstable_version" exit 1 fi stable_version_prev="$stable_major_minor.$((stable_patch))" unstable_version_prev="0.${unstable_minor}b$((unstable_patch))" stable_version="$stable_major_minor.$((stable_patch + 1))" unstable_version="0.${unstable_minor}b$((unstable_patch + 1))" echo "STABLE_VERSION=$stable_version" >> $GITHUB_ENV echo "UNSTABLE_VERSION=$unstable_version" >> $GITHUB_ENV echo "STABLE_VERSION_PREV=$stable_version_prev" >> $GITHUB_ENV echo "UNSTABLE_VERSION_PREV=$unstable_version_prev" >> $GITHUB_ENV - name: Update version run: .github/scripts/update-version-patch.sh $STABLE_VERSION $UNSTABLE_VERSION $STABLE_VERSION_PREV $UNSTABLE_VERSION_PREV - name: Update the change log with the approximate release date run: | date=$(date "+%Y-%m-%d") sed -Ei "s/^## Unreleased$/## Version ${STABLE_VERSION}\/${UNSTABLE_VERSION} ($date)/" CHANGELOG.md - name: Use CLA approved github bot run: .github/scripts/use-cla-approved-github-bot.sh - uses: actions/create-github-app-token@df432ceedc7162793a195dd1713ff69aefc7379e # v2.0.6 id: otelbot-token with: app-id: ${{ vars.OTELBOT_APP_ID }} private-key: ${{ secrets.OTELBOT_PRIVATE_KEY }} - name: Create pull request id: create_pr env: # not using secrets.GITHUB_TOKEN since pull requests from that token do not run workflows GITHUB_TOKEN: ${{ steps.otelbot-token.outputs.token }} run: | message="Prepare release ${STABLE_VERSION}/${UNSTABLE_VERSION}" branch="otelbot/prepare-release-${STABLE_VERSION}-${UNSTABLE_VERSION}" git commit -a -m "$message" git push origin HEAD:$branch pr_url=$(gh pr create --title "[$GITHUB_REF_NAME] $message" \ --body "$message." \ --head $branch \ --base $GITHUB_REF_NAME) echo "pr_url=$pr_url" >> $GITHUB_OUTPUT - name: Add prepare-release label to PR if: steps.create_pr.outputs.pr_url != '' env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} run: | gh pr edit ${{ steps.create_pr.outputs.pr_url }} --add-label "prepare-release" python-opentelemetry-1.39.1/.github/workflows/prepare-release-branch.yml000066400000000000000000000210101511654350100264420ustar00rootroot00000000000000name: Prepare release branch on: workflow_dispatch: inputs: prerelease_version: description: "Pre-release version number? (e.g. 1.9.0rc2)" required: false permissions: contents: read jobs: prereqs: runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 - name: Install toml run: pip install toml - name: Verify prerequisites env: PRERELEASE_VERSION: ${{ github.event.inputs.prerelease_version }} run: | if [[ $GITHUB_REF_NAME != main ]]; then echo this workflow should only be run against main exit 1 fi if ! grep --quiet "^## Unreleased$" CHANGELOG.md; then echo the change log is missing an \"Unreleased\" section exit 1 fi if [[ ! -z $PRERELEASE_VERSION ]]; then stable_version=$(./scripts/eachdist.py version --mode stable) stable_version=${stable_version//.dev/} if [[ $PRERELEASE_VERSION != ${stable_version}* ]]; then echo "$PRERELEASE_VERSION is not a prerelease for the version on main ($stable_version)" exit 1 fi fi create-pull-request-against-release-branch: permissions: contents: write # required for pushing changes pull-requests: write # required for adding labels to PRs runs-on: ubuntu-latest needs: prereqs steps: - uses: actions/checkout@v4 - name: Install toml run: pip install toml - name: Create release branch env: PRERELEASE_VERSION: ${{ github.event.inputs.prerelease_version }} run: | if [[ -z $PRERELEASE_VERSION ]]; then stable_version=$(./scripts/eachdist.py version --mode stable) stable_version=${stable_version//.dev/} else stable_version=$PRERELEASE_VERSION fi unstable_version=$(./scripts/eachdist.py version --mode prerelease) unstable_version=${unstable_version//.dev/} if [[ $stable_version =~ ^([0-9]+)\.([0-9]+)\.0$ ]]; then stable_version_branch_part=$(echo $stable_version | sed -E 's/([0-9]+)\.([0-9]+)\.0/\1.\2.x/') unstable_version_branch_part=$(echo $unstable_version | sed -E 's/0\.([0-9]+)b0/0.\1bx/') release_branch_name="release/v${stable_version_branch_part}-${unstable_version_branch_part}" elif [[ $stable_version =~ ^([0-9]+)\.([0-9]+)\.0 ]]; then # pre-release version, e.g. 1.9.0rc2 release_branch_name="release/v$stable_version-$unstable_version" else echo "unexpected version: $stable_version" exit 1 fi git push origin HEAD:$release_branch_name echo "STABLE_VERSION=$stable_version" >> $GITHUB_ENV echo "UNSTABLE_VERSION=$unstable_version" >> $GITHUB_ENV echo "RELEASE_BRANCH_NAME=$release_branch_name" >> $GITHUB_ENV - name: Update version run: .github/scripts/update-version.sh $STABLE_VERSION $UNSTABLE_VERSION - name: Update the change log with the approximate release date run: | date=$(date "+%Y-%m-%d") sed -Ei "s/^## Unreleased$/## Version ${STABLE_VERSION}\/${UNSTABLE_VERSION} ($date)/" CHANGELOG.md - name: Use CLA approved github bot run: .github/scripts/use-cla-approved-github-bot.sh - uses: actions/create-github-app-token@df432ceedc7162793a195dd1713ff69aefc7379e # v2.0.6 id: otelbot-token with: app-id: ${{ vars.OTELBOT_APP_ID }} private-key: ${{ secrets.OTELBOT_PRIVATE_KEY }} - name: Create pull request against the release branch id: create_release_branch_pr env: # not using secrets.GITHUB_TOKEN since pull requests from that token do not run workflows GITHUB_TOKEN: ${{ steps.otelbot-token.outputs.token }} run: | message="Prepare release ${STABLE_VERSION}/${UNSTABLE_VERSION}" branch="otelbot/prepare-release-${STABLE_VERSION}-${UNSTABLE_VERSION}" git commit -a -m "$message" git push origin HEAD:$branch pr_url=$(gh pr create --title "[$RELEASE_BRANCH_NAME] $message" \ --body "$message." \ --head $branch \ --base $RELEASE_BRANCH_NAME) echo "pr_url=$pr_url" >> $GITHUB_OUTPUT - name: Add prepare-release label to PR if: steps.create_release_branch_pr.outputs.pr_url != '' env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} run: | gh pr edit ${{ steps.create_release_branch_pr.outputs.pr_url }} --add-label "prepare-release" create-pull-request-against-main: permissions: contents: write # required for pushing changes pull-requests: write # required for adding labels to PRs runs-on: ubuntu-latest needs: prereqs steps: - uses: actions/checkout@v4 - name: Install toml run: pip install toml - name: Set environment variables env: PRERELEASE_VERSION: ${{ github.event.inputs.prerelease_version }} run: | if [[ -z $PRERELEASE_VERSION ]]; then stable_version=$(./scripts/eachdist.py version --mode stable) stable_version=${stable_version//.dev/} else stable_version=$PRERELEASE_VERSION fi unstable_version=$(./scripts/eachdist.py version --mode prerelease) unstable_version=${unstable_version//.dev/} if [[ $stable_version =~ ^([0-9]+)\.([0-9]+)\.0$ ]]; then stable_major="${BASH_REMATCH[1]}" stable_minor="${BASH_REMATCH[2]}" stable_next_version="$stable_major.$((stable_minor + 1)).0" elif [[ $stable_version =~ ^([0-9]+)\.([0-9]+)\.0 ]]; then # pre-release version, e.g. 1.9.0rc2 stable_major="${BASH_REMATCH[1]}" stable_minor="${BASH_REMATCH[2]}" stable_next_version="$stable_major.$stable_minor.0" else echo "unexpected stable_version: $stable_version" exit 1 fi if [[ $unstable_version =~ ^0\.([0-9]+)b[0-9]+$ ]]; then unstable_minor="${BASH_REMATCH[1]}" else echo "unexpected unstable_version: $unstable_version" exit 1 fi unstable_next_version="0.$((unstable_minor + 1))b0" echo "STABLE_VERSION=${stable_version}" >> $GITHUB_ENV echo "STABLE_NEXT_VERSION=${stable_next_version}.dev" >> $GITHUB_ENV echo "UNSTABLE_VERSION=${unstable_version}" >> $GITHUB_ENV echo "UNSTABLE_NEXT_VERSION=${unstable_next_version}.dev" >> $GITHUB_ENV - name: Update version run: .github/scripts/update-version.sh $STABLE_NEXT_VERSION $UNSTABLE_NEXT_VERSION - name: Update the change log on main run: | # the actual release date on main will be updated at the end of the release workflow date=$(date "+%Y-%m-%d") sed -Ei "s/^## Unreleased$/## Unreleased\n\n## Version ${STABLE_VERSION}\/${UNSTABLE_VERSION} ($date)/" CHANGELOG.md - name: Use CLA approved github bot run: .github/scripts/use-cla-approved-github-bot.sh - uses: actions/create-github-app-token@df432ceedc7162793a195dd1713ff69aefc7379e # v2.0.6 id: otelbot-token with: app-id: ${{ vars.OTELBOT_APP_ID }} private-key: ${{ secrets.OTELBOT_PRIVATE_KEY }} - name: Create pull request against main id: create_main_pr env: # not using secrets.GITHUB_TOKEN since pull requests from that token do not run workflows GITHUB_TOKEN: ${{ steps.otelbot-token.outputs.token }} run: | message="Update version to ${STABLE_NEXT_VERSION}/${UNSTABLE_NEXT_VERSION}" body="Update version to \`${STABLE_NEXT_VERSION}/${UNSTABLE_NEXT_VERSION}\`." branch="otelbot/update-version-to-${STABLE_NEXT_VERSION}-${UNSTABLE_NEXT_VERSION}" git commit -a -m "$message" git push origin HEAD:$branch pr_url=$(gh pr create --title "$message" \ --body "$body" \ --head $branch \ --base main) echo "pr_url=$pr_url" >> $GITHUB_OUTPUT - name: Add prepare-release label to PR if: steps.create_main_pr.outputs.pr_url != '' env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} run: | gh pr edit ${{ steps.create_main_pr.outputs.pr_url }} --add-label "prepare-release" python-opentelemetry-1.39.1/.github/workflows/release.yml000066400000000000000000000115701511654350100235650ustar00rootroot00000000000000name: Release on: workflow_dispatch: permissions: contents: read jobs: release: permissions: contents: write # required for creating GitHub releases runs-on: ubuntu-latest steps: - run: | if [[ $GITHUB_REF_NAME != release/* ]]; then echo this workflow should only be run against release branches exit 1 fi - uses: actions/checkout@v4 - name: Install toml run: pip install toml - name: Set environment variables run: | stable_version=$(./scripts/eachdist.py version --mode stable) unstable_version=$(./scripts/eachdist.py version --mode prerelease) if [[ $stable_version =~ ^([0-9]+)\.([0-9]+)\.([0-9]+) ]]; then stable_major="${BASH_REMATCH[1]}" stable_minor="${BASH_REMATCH[2]}" stable_patch="${BASH_REMATCH[3]}" else echo "unexpected stable_version: $stable_version" exit 1 fi if [[ $stable_patch != 0 ]]; then if [[ $unstable_version =~ ^0\.([0-9]+)b([0-9]+)$ ]]; then unstable_minor="${BASH_REMATCH[1]}" unstable_patch="${BASH_REMATCH[2]}" else echo "unexpected unstable_version: $unstable_version" exit 1 fi if [[ $unstable_patch != 0 ]]; then prior_version_when_patch="$stable_major.$stable_minor.$((stable_patch - 1))/0.${unstable_minor}b$((unstable_patch - 1))" fi fi echo "STABLE_VERSION=$stable_version" >> $GITHUB_ENV echo "UNSTABLE_VERSION=$unstable_version" >> $GITHUB_ENV echo "PRIOR_VERSION_WHEN_PATCH=$prior_version_when_patch" >> $GITHUB_ENV - run: | if [[ -z $PRIOR_VERSION_WHEN_PATCH ]]; then # not making a patch release if ! grep --quiet "^## Version ${STABLE_VERSION}/${UNSTABLE_VERSION} " CHANGELOG.md; then echo the pull request generated by prepare-release-branch.yml needs to be merged first exit 1 fi fi # check out main branch to verify there won't be problems with merging the change log # at the end of this workflow - uses: actions/checkout@v4 with: ref: main # back to the release branch - uses: actions/checkout@v4 # next few steps publish to pypi - uses: actions/setup-python@v5 with: python-version: '3.9' - name: Build wheels run: ./scripts/build.sh - name: Install twine run: | pip install twine # The step below publishes to testpypi in order to catch any issues # with the package configuration that would cause a failure to upload # to pypi. One example of such a failure is if a classifier is # rejected by pypi (e.g "3 - Beta"). This would cause a failure during the # middle of the package upload causing the action to fail, and certain packages # might have already been updated, this would be bad. - name: Publish to TestPyPI env: TWINE_USERNAME: '__token__' TWINE_PASSWORD: ${{ secrets.test_pypi_token }} run: | twine upload --repository testpypi --skip-existing --verbose dist/* - name: Publish to PyPI env: TWINE_USERNAME: '__token__' TWINE_PASSWORD: ${{ secrets.pypi_password }} run: | twine upload --skip-existing --verbose dist/* - name: Generate release notes env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} run: | # conditional block not indented because of the heredoc if [[ ! -z $PRIOR_VERSION_WHEN_PATCH ]]; then cat > /tmp/release-notes.txt << EOF This is a patch release on the previous $PRIOR_VERSION_WHEN_PATCH release, fixing the issue(s) below. EOF fi # CHANGELOG_SECTION.md is also used at the end of the release workflow # for copying the change log updates to main sed -n "0,/^## Version ${STABLE_VERSION}\/${UNSTABLE_VERSION} /d;/^## Version /q;p" CHANGELOG.md \ > /tmp/CHANGELOG_SECTION.md # the complex perl regex is needed because markdown docs render newlines as soft wraps # while release notes render them as line breaks perl -0pe 's/(?> /tmp/release-notes.txt - name: Create GitHub release env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} run: | gh release create --target $GITHUB_REF_NAME \ --title "Version ${STABLE_VERSION}/${UNSTABLE_VERSION}" \ --notes-file /tmp/release-notes.txt \ --discussion-category announcements \ v$STABLE_VERSION python-opentelemetry-1.39.1/.github/workflows/templates/000077500000000000000000000000001511654350100234145ustar00rootroot00000000000000python-opentelemetry-1.39.1/.github/workflows/templates/lint.yml.j2000066400000000000000000000031641511654350100254230ustar00rootroot00000000000000# Do not edit this file. # This file is generated automatically by executing tox -e generate-workflows name: Lint {{ file_number }} on: push: branches-ignore: - 'release/*' - 'otelbot/*' pull_request: permissions: contents: read concurrency: group: ${% raw %}{{ github.workflow }}-${{ github.head_ref || github.run_id }}{% endraw %} cancel-in-progress: true env: CORE_REPO_SHA: main # Set the SHA to the branch name if the PR has a label 'prepare-release' or 'backport' otherwise, set it to 'main' # For PRs you can change the inner fallback ('main') # For pushes you change the outer fallback ('main') # The logic below is used during releases and depends on having an equivalent branch name in the contrib repo. CONTRIB_REPO_SHA: {% raw %}${{ github.event_name == 'pull_request' && ( contains(github.event.pull_request.labels.*.name, 'prepare-release') && github.event.pull_request.head.ref || contains(github.event.pull_request.labels.*.name, 'backport') && github.event.pull_request.base.ref || 'main' ) || 'main' }}{% endraw %} PIP_EXISTS_ACTION: w jobs: {%- for job_data in job_datas %} {{ job_data.name }}: name: {{ job_data.ui_name }} runs-on: ubuntu-latest timeout-minutes: 30 steps: - name: Checkout repo @ SHA - ${% raw %}{{ github.sha }}{% endraw %} uses: actions/checkout@v4 - name: Set up Python 3.13 uses: actions/setup-python@v5 with: python-version: "3.13" - name: Install tox run: pip install tox-uv - name: Run tests run: tox -e {{ job_data.tox_env }} {%- endfor %} python-opentelemetry-1.39.1/.github/workflows/templates/misc.yml.j2000066400000000000000000000056001511654350100254050ustar00rootroot00000000000000# Do not edit this file. # This file is generated automatically by executing tox -e generate-workflows name: Misc {{ file_number }} on: push: branches-ignore: - 'release/*' - 'otelbot/*' pull_request: permissions: contents: read concurrency: group: ${% raw %}{{ github.workflow }}-${{ github.head_ref || github.run_id }}{% endraw %} cancel-in-progress: true env: CORE_REPO_SHA: main # Set the SHA to the branch name if the PR has a label 'prepare-release' or 'backport' otherwise, set it to 'main' # For PRs you can change the inner fallback ('main') # For pushes you change the outer fallback ('main') # The logic below is used during releases and depends on having an equivalent branch name in the contrib repo. CONTRIB_REPO_SHA: {% raw %}${{ github.event_name == 'pull_request' && ( contains(github.event.pull_request.labels.*.name, 'prepare-release') && github.event.pull_request.head.ref || contains(github.event.pull_request.labels.*.name, 'backport') && github.event.pull_request.base.ref || 'main' ) || 'main' }}{% endraw %} PIP_EXISTS_ACTION: w jobs: {%- for job_data in job_datas %} {{ job_data }}: name: {{ job_data }} runs-on: ubuntu-latest timeout-minutes: 30 {%- if job_data == "generate-workflows" %} if: | !contains(github.event.pull_request.labels.*.name, 'Skip generate-workflows') && github.event.pull_request.user.login != 'otelbot[bot]' && github.event_name == 'pull_request' {%- endif %} {%- if job_data == "public-symbols-check" %} if: | !contains(github.event.pull_request.labels.*.name, 'Approve Public API check') && github.actor != 'otelbot[bot]' && github.event_name == 'pull_request' {%- endif %} {%- if job_data == "docs" %} if: | github.event.pull_request.user.login != 'otelbot[bot]' && github.event_name == 'pull_request' {%- endif %} steps: - name: Checkout repo @ SHA - ${% raw %}{{ github.sha }}{% endraw %} uses: actions/checkout@v4 {%- if job_data == "public-symbols-check" %} with: fetch-depth: 0 - name: Checkout main run: git checkout main - name: Pull origin run: git pull --rebase=false origin main - name: Checkout pull request run: git checkout ${% raw %}{{ github.event.pull_request.head.sha }}{% endraw %} {%- endif %} - name: Set up Python 3.11 uses: actions/setup-python@v5 with: python-version: "3.11" - name: Install tox run: pip install tox-uv - name: Run tests run: tox -e {{ job_data }} {%- if job_data == "generate-workflows" %} - name: Check github workflows are up to date run: git diff --exit-code || (echo 'Generated workflows are out of date, run "tox -e generate-workflows" and commit the changes in this PR.' && exit 1) {%- endif %} {%- endfor %} python-opentelemetry-1.39.1/.github/workflows/templates/test.yml.j2000066400000000000000000000035401511654350100254320ustar00rootroot00000000000000# Do not edit this file. # This file is generated automatically by executing tox -e generate-workflows name: Test {{ file_number }} on: push: branches-ignore: - 'release/*' - 'otelbot/*' pull_request: permissions: contents: read concurrency: group: ${% raw %}{{ github.workflow }}-${{ github.head_ref || github.run_id }}{% endraw %} cancel-in-progress: true env: CORE_REPO_SHA: main # Set the SHA to the branch name if the PR has a label 'prepare-release' or 'backport' otherwise, set it to 'main' # For PRs you can change the inner fallback ('main') # For pushes you change the outer fallback ('main') # The logic below is used during releases and depends on having an equivalent branch name in the contrib repo. CONTRIB_REPO_SHA: {% raw %}${{ github.event_name == 'pull_request' && ( contains(github.event.pull_request.labels.*.name, 'prepare-release') && github.event.pull_request.head.ref || contains(github.event.pull_request.labels.*.name, 'backport') && github.event.pull_request.base.ref || 'main' ) || 'main' }}{% endraw %} PIP_EXISTS_ACTION: w jobs: {%- for job_data in job_datas %} {{ job_data.name }}: name: {{ job_data.ui_name }} runs-on: {{ job_data.os }} timeout-minutes: 30 steps: - name: Checkout repo @ SHA - ${% raw %}{{ github.sha }}{% endraw %} uses: actions/checkout@v4 - name: Set up Python {{ job_data.python_version }} uses: actions/setup-python@v5 with: python-version: "{{ job_data.python_version }}" - name: Install tox run: pip install tox-uv {%- if job_data.os == "windows-latest" %} - name: Configure git to support long filenames run: git config --system core.longpaths true {%- endif %} - name: Run tests run: tox -e {{ job_data.tox_env }} -- -ra {%- endfor %} python-opentelemetry-1.39.1/.github/workflows/test_0.yml000066400000000000000000004322051511654350100233450ustar00rootroot00000000000000# Do not edit this file. # This file is generated automatically by executing tox -e generate-workflows name: Test 0 on: push: branches-ignore: - 'release/*' - 'otelbot/*' pull_request: permissions: contents: read concurrency: group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} cancel-in-progress: true env: CORE_REPO_SHA: main # Set the SHA to the branch name if the PR has a label 'prepare-release' or 'backport' otherwise, set it to 'main' # For PRs you can change the inner fallback ('main') # For pushes you change the outer fallback ('main') # The logic below is used during releases and depends on having an equivalent branch name in the contrib repo. CONTRIB_REPO_SHA: ${{ github.event_name == 'pull_request' && ( contains(github.event.pull_request.labels.*.name, 'prepare-release') && github.event.pull_request.head.ref || contains(github.event.pull_request.labels.*.name, 'backport') && github.event.pull_request.base.ref || 'main' ) || 'main' }} PIP_EXISTS_ACTION: w jobs: py39-test-opentelemetry-api_ubuntu-latest: name: opentelemetry-api 3.9 Ubuntu runs-on: ubuntu-latest timeout-minutes: 30 steps: - name: Checkout repo @ SHA - ${{ github.sha }} uses: actions/checkout@v4 - name: Set up Python 3.9 uses: actions/setup-python@v5 with: python-version: "3.9" - name: Install tox run: pip install tox-uv - name: Run tests run: tox -e py39-test-opentelemetry-api -- -ra py310-test-opentelemetry-api_ubuntu-latest: name: opentelemetry-api 3.10 Ubuntu runs-on: ubuntu-latest timeout-minutes: 30 steps: - name: Checkout repo @ SHA - ${{ github.sha }} uses: actions/checkout@v4 - name: Set up Python 3.10 uses: actions/setup-python@v5 with: python-version: "3.10" - name: Install tox run: pip install tox-uv - name: Run tests run: tox -e py310-test-opentelemetry-api -- -ra py311-test-opentelemetry-api_ubuntu-latest: name: opentelemetry-api 3.11 Ubuntu runs-on: ubuntu-latest timeout-minutes: 30 steps: - name: Checkout repo @ SHA - ${{ github.sha }} uses: actions/checkout@v4 - name: Set up Python 3.11 uses: actions/setup-python@v5 with: python-version: "3.11" - name: Install tox run: pip install tox-uv - name: Run tests run: tox -e py311-test-opentelemetry-api -- -ra py312-test-opentelemetry-api_ubuntu-latest: name: opentelemetry-api 3.12 Ubuntu runs-on: ubuntu-latest timeout-minutes: 30 steps: - name: Checkout repo @ SHA - ${{ github.sha }} uses: actions/checkout@v4 - name: Set up Python 3.12 uses: actions/setup-python@v5 with: python-version: "3.12" - name: Install tox run: pip install tox-uv - name: Run tests run: tox -e py312-test-opentelemetry-api -- -ra py313-test-opentelemetry-api_ubuntu-latest: name: opentelemetry-api 3.13 Ubuntu runs-on: ubuntu-latest timeout-minutes: 30 steps: - name: Checkout repo @ SHA - ${{ github.sha }} uses: actions/checkout@v4 - name: Set up Python 3.13 uses: actions/setup-python@v5 with: python-version: "3.13" - name: Install tox run: pip install tox-uv - name: Run tests run: tox -e py313-test-opentelemetry-api -- -ra pypy3-test-opentelemetry-api_ubuntu-latest: name: opentelemetry-api pypy-3.9 Ubuntu runs-on: ubuntu-latest timeout-minutes: 30 steps: - name: Checkout repo @ SHA - ${{ github.sha }} uses: actions/checkout@v4 - name: Set up Python pypy-3.9 uses: actions/setup-python@v5 with: python-version: "pypy-3.9" - name: Install tox run: pip install tox-uv - name: Run tests run: tox -e pypy3-test-opentelemetry-api -- -ra py39-test-opentelemetry-proto-gen-oldest_ubuntu-latest: name: opentelemetry-proto-gen-oldest 3.9 Ubuntu runs-on: ubuntu-latest timeout-minutes: 30 steps: - name: Checkout repo @ SHA - ${{ github.sha }} uses: actions/checkout@v4 - name: Set up Python 3.9 uses: actions/setup-python@v5 with: python-version: "3.9" - name: Install tox run: pip install tox-uv - name: Run tests run: tox -e py39-test-opentelemetry-proto-gen-oldest -- -ra py39-test-opentelemetry-proto-gen-latest_ubuntu-latest: name: opentelemetry-proto-gen-latest 3.9 Ubuntu runs-on: ubuntu-latest timeout-minutes: 30 steps: - name: Checkout repo @ SHA - ${{ github.sha }} uses: actions/checkout@v4 - name: Set up Python 3.9 uses: actions/setup-python@v5 with: python-version: "3.9" - name: Install tox run: pip install tox-uv - name: Run tests run: tox -e py39-test-opentelemetry-proto-gen-latest -- -ra py310-test-opentelemetry-proto-gen-oldest_ubuntu-latest: name: opentelemetry-proto-gen-oldest 3.10 Ubuntu runs-on: ubuntu-latest timeout-minutes: 30 steps: - name: Checkout repo @ SHA - ${{ github.sha }} uses: actions/checkout@v4 - name: Set up Python 3.10 uses: actions/setup-python@v5 with: python-version: "3.10" - name: Install tox run: pip install tox-uv - name: Run tests run: tox -e py310-test-opentelemetry-proto-gen-oldest -- -ra py310-test-opentelemetry-proto-gen-latest_ubuntu-latest: name: opentelemetry-proto-gen-latest 3.10 Ubuntu runs-on: ubuntu-latest timeout-minutes: 30 steps: - name: Checkout repo @ SHA - ${{ github.sha }} uses: actions/checkout@v4 - name: Set up Python 3.10 uses: actions/setup-python@v5 with: python-version: "3.10" - name: Install tox run: pip install tox-uv - name: Run tests run: tox -e py310-test-opentelemetry-proto-gen-latest -- -ra py311-test-opentelemetry-proto-gen-oldest_ubuntu-latest: name: opentelemetry-proto-gen-oldest 3.11 Ubuntu runs-on: ubuntu-latest timeout-minutes: 30 steps: - name: Checkout repo @ SHA - ${{ github.sha }} uses: actions/checkout@v4 - name: Set up Python 3.11 uses: actions/setup-python@v5 with: python-version: "3.11" - name: Install tox run: pip install tox-uv - name: Run tests run: tox -e py311-test-opentelemetry-proto-gen-oldest -- -ra py311-test-opentelemetry-proto-gen-latest_ubuntu-latest: name: opentelemetry-proto-gen-latest 3.11 Ubuntu runs-on: ubuntu-latest timeout-minutes: 30 steps: - name: Checkout repo @ SHA - ${{ github.sha }} uses: actions/checkout@v4 - name: Set up Python 3.11 uses: actions/setup-python@v5 with: python-version: "3.11" - name: Install tox run: pip install tox-uv - name: Run tests run: tox -e py311-test-opentelemetry-proto-gen-latest -- -ra py312-test-opentelemetry-proto-gen-oldest_ubuntu-latest: name: opentelemetry-proto-gen-oldest 3.12 Ubuntu runs-on: ubuntu-latest timeout-minutes: 30 steps: - name: Checkout repo @ SHA - ${{ github.sha }} uses: actions/checkout@v4 - name: Set up Python 3.12 uses: actions/setup-python@v5 with: python-version: "3.12" - name: Install tox run: pip install tox-uv - name: Run tests run: tox -e py312-test-opentelemetry-proto-gen-oldest -- -ra py312-test-opentelemetry-proto-gen-latest_ubuntu-latest: name: opentelemetry-proto-gen-latest 3.12 Ubuntu runs-on: ubuntu-latest timeout-minutes: 30 steps: - name: Checkout repo @ SHA - ${{ github.sha }} uses: actions/checkout@v4 - name: Set up Python 3.12 uses: actions/setup-python@v5 with: python-version: "3.12" - name: Install tox run: pip install tox-uv - name: Run tests run: tox -e py312-test-opentelemetry-proto-gen-latest -- -ra py313-test-opentelemetry-proto-gen-oldest_ubuntu-latest: name: opentelemetry-proto-gen-oldest 3.13 Ubuntu runs-on: ubuntu-latest timeout-minutes: 30 steps: - name: Checkout repo @ SHA - ${{ github.sha }} uses: actions/checkout@v4 - name: Set up Python 3.13 uses: actions/setup-python@v5 with: python-version: "3.13" - name: Install tox run: pip install tox-uv - name: Run tests run: tox -e py313-test-opentelemetry-proto-gen-oldest -- -ra py313-test-opentelemetry-proto-gen-latest_ubuntu-latest: name: opentelemetry-proto-gen-latest 3.13 Ubuntu runs-on: ubuntu-latest timeout-minutes: 30 steps: - name: Checkout repo @ SHA - ${{ github.sha }} uses: actions/checkout@v4 - name: Set up Python 3.13 uses: actions/setup-python@v5 with: python-version: "3.13" - name: Install tox run: pip install tox-uv - name: Run tests run: tox -e py313-test-opentelemetry-proto-gen-latest -- -ra pypy3-test-opentelemetry-proto-gen-oldest_ubuntu-latest: name: opentelemetry-proto-gen-oldest pypy-3.9 Ubuntu runs-on: ubuntu-latest timeout-minutes: 30 steps: - name: Checkout repo @ SHA - ${{ github.sha }} uses: actions/checkout@v4 - name: Set up Python pypy-3.9 uses: actions/setup-python@v5 with: python-version: "pypy-3.9" - name: Install tox run: pip install tox-uv - name: Run tests run: tox -e pypy3-test-opentelemetry-proto-gen-oldest -- -ra pypy3-test-opentelemetry-proto-gen-latest_ubuntu-latest: name: opentelemetry-proto-gen-latest pypy-3.9 Ubuntu runs-on: ubuntu-latest timeout-minutes: 30 steps: - name: Checkout repo @ SHA - ${{ github.sha }} uses: actions/checkout@v4 - name: Set up Python pypy-3.9 uses: actions/setup-python@v5 with: python-version: "pypy-3.9" - name: Install tox run: pip install tox-uv - name: Run tests run: tox -e pypy3-test-opentelemetry-proto-gen-latest -- -ra py39-test-opentelemetry-sdk_ubuntu-latest: name: opentelemetry-sdk 3.9 Ubuntu runs-on: ubuntu-latest timeout-minutes: 30 steps: - name: Checkout repo @ SHA - ${{ github.sha }} uses: actions/checkout@v4 - name: Set up Python 3.9 uses: actions/setup-python@v5 with: python-version: "3.9" - name: Install tox run: pip install tox-uv - name: Run tests run: tox -e py39-test-opentelemetry-sdk -- -ra py310-test-opentelemetry-sdk_ubuntu-latest: name: opentelemetry-sdk 3.10 Ubuntu runs-on: ubuntu-latest timeout-minutes: 30 steps: - name: Checkout repo @ SHA - ${{ github.sha }} uses: actions/checkout@v4 - name: Set up Python 3.10 uses: actions/setup-python@v5 with: python-version: "3.10" - name: Install tox run: pip install tox-uv - name: Run tests run: tox -e py310-test-opentelemetry-sdk -- -ra py311-test-opentelemetry-sdk_ubuntu-latest: name: opentelemetry-sdk 3.11 Ubuntu runs-on: ubuntu-latest timeout-minutes: 30 steps: - name: Checkout repo @ SHA - ${{ github.sha }} uses: actions/checkout@v4 - name: Set up Python 3.11 uses: actions/setup-python@v5 with: python-version: "3.11" - name: Install tox run: pip install tox-uv - name: Run tests run: tox -e py311-test-opentelemetry-sdk -- -ra py312-test-opentelemetry-sdk_ubuntu-latest: name: opentelemetry-sdk 3.12 Ubuntu runs-on: ubuntu-latest timeout-minutes: 30 steps: - name: Checkout repo @ SHA - ${{ github.sha }} uses: actions/checkout@v4 - name: Set up Python 3.12 uses: actions/setup-python@v5 with: python-version: "3.12" - name: Install tox run: pip install tox-uv - name: Run tests run: tox -e py312-test-opentelemetry-sdk -- -ra py313-test-opentelemetry-sdk_ubuntu-latest: name: opentelemetry-sdk 3.13 Ubuntu runs-on: ubuntu-latest timeout-minutes: 30 steps: - name: Checkout repo @ SHA - ${{ github.sha }} uses: actions/checkout@v4 - name: Set up Python 3.13 uses: actions/setup-python@v5 with: python-version: "3.13" - name: Install tox run: pip install tox-uv - name: Run tests run: tox -e py313-test-opentelemetry-sdk -- -ra pypy3-test-opentelemetry-sdk_ubuntu-latest: name: opentelemetry-sdk pypy-3.9 Ubuntu runs-on: ubuntu-latest timeout-minutes: 30 steps: - name: Checkout repo @ SHA - ${{ github.sha }} uses: actions/checkout@v4 - name: Set up Python pypy-3.9 uses: actions/setup-python@v5 with: python-version: "pypy-3.9" - name: Install tox run: pip install tox-uv - name: Run tests run: tox -e pypy3-test-opentelemetry-sdk -- -ra py39-test-opentelemetry-semantic-conventions_ubuntu-latest: name: opentelemetry-semantic-conventions 3.9 Ubuntu runs-on: ubuntu-latest timeout-minutes: 30 steps: - name: Checkout repo @ SHA - ${{ github.sha }} uses: actions/checkout@v4 - name: Set up Python 3.9 uses: actions/setup-python@v5 with: python-version: "3.9" - name: Install tox run: pip install tox-uv - name: Run tests run: tox -e py39-test-opentelemetry-semantic-conventions -- -ra py310-test-opentelemetry-semantic-conventions_ubuntu-latest: name: opentelemetry-semantic-conventions 3.10 Ubuntu runs-on: ubuntu-latest timeout-minutes: 30 steps: - name: Checkout repo @ SHA - ${{ github.sha }} uses: actions/checkout@v4 - name: Set up Python 3.10 uses: actions/setup-python@v5 with: python-version: "3.10" - name: Install tox run: pip install tox-uv - name: Run tests run: tox -e py310-test-opentelemetry-semantic-conventions -- -ra py311-test-opentelemetry-semantic-conventions_ubuntu-latest: name: opentelemetry-semantic-conventions 3.11 Ubuntu runs-on: ubuntu-latest timeout-minutes: 30 steps: - name: Checkout repo @ SHA - ${{ github.sha }} uses: actions/checkout@v4 - name: Set up Python 3.11 uses: actions/setup-python@v5 with: python-version: "3.11" - name: Install tox run: pip install tox-uv - name: Run tests run: tox -e py311-test-opentelemetry-semantic-conventions -- -ra py312-test-opentelemetry-semantic-conventions_ubuntu-latest: name: opentelemetry-semantic-conventions 3.12 Ubuntu runs-on: ubuntu-latest timeout-minutes: 30 steps: - name: Checkout repo @ SHA - ${{ github.sha }} uses: actions/checkout@v4 - name: Set up Python 3.12 uses: actions/setup-python@v5 with: python-version: "3.12" - name: Install tox run: pip install tox-uv - name: Run tests run: tox -e py312-test-opentelemetry-semantic-conventions -- -ra py313-test-opentelemetry-semantic-conventions_ubuntu-latest: name: opentelemetry-semantic-conventions 3.13 Ubuntu runs-on: ubuntu-latest timeout-minutes: 30 steps: - name: Checkout repo @ SHA - ${{ github.sha }} uses: actions/checkout@v4 - name: Set up Python 3.13 uses: actions/setup-python@v5 with: python-version: "3.13" - name: Install tox run: pip install tox-uv - name: Run tests run: tox -e py313-test-opentelemetry-semantic-conventions -- -ra pypy3-test-opentelemetry-semantic-conventions_ubuntu-latest: name: opentelemetry-semantic-conventions pypy-3.9 Ubuntu runs-on: ubuntu-latest timeout-minutes: 30 steps: - name: Checkout repo @ SHA - ${{ github.sha }} uses: actions/checkout@v4 - name: Set up Python pypy-3.9 uses: actions/setup-python@v5 with: python-version: "pypy-3.9" - name: Install tox run: pip install tox-uv - name: Run tests run: tox -e pypy3-test-opentelemetry-semantic-conventions -- -ra py39-test-opentelemetry-getting-started_ubuntu-latest: name: opentelemetry-getting-started 3.9 Ubuntu runs-on: ubuntu-latest timeout-minutes: 30 steps: - name: Checkout repo @ SHA - ${{ github.sha }} uses: actions/checkout@v4 - name: Set up Python 3.9 uses: actions/setup-python@v5 with: python-version: "3.9" - name: Install tox run: pip install tox-uv - name: Run tests run: tox -e py39-test-opentelemetry-getting-started -- -ra py310-test-opentelemetry-getting-started_ubuntu-latest: name: opentelemetry-getting-started 3.10 Ubuntu runs-on: ubuntu-latest timeout-minutes: 30 steps: - name: Checkout repo @ SHA - ${{ github.sha }} uses: actions/checkout@v4 - name: Set up Python 3.10 uses: actions/setup-python@v5 with: python-version: "3.10" - name: Install tox run: pip install tox-uv - name: Run tests run: tox -e py310-test-opentelemetry-getting-started -- -ra py311-test-opentelemetry-getting-started_ubuntu-latest: name: opentelemetry-getting-started 3.11 Ubuntu runs-on: ubuntu-latest timeout-minutes: 30 steps: - name: Checkout repo @ SHA - ${{ github.sha }} uses: actions/checkout@v4 - name: Set up Python 3.11 uses: actions/setup-python@v5 with: python-version: "3.11" - name: Install tox run: pip install tox-uv - name: Run tests run: tox -e py311-test-opentelemetry-getting-started -- -ra py312-test-opentelemetry-getting-started_ubuntu-latest: name: opentelemetry-getting-started 3.12 Ubuntu runs-on: ubuntu-latest timeout-minutes: 30 steps: - name: Checkout repo @ SHA - ${{ github.sha }} uses: actions/checkout@v4 - name: Set up Python 3.12 uses: actions/setup-python@v5 with: python-version: "3.12" - name: Install tox run: pip install tox-uv - name: Run tests run: tox -e py312-test-opentelemetry-getting-started -- -ra py313-test-opentelemetry-getting-started_ubuntu-latest: name: opentelemetry-getting-started 3.13 Ubuntu runs-on: ubuntu-latest timeout-minutes: 30 steps: - name: Checkout repo @ SHA - ${{ github.sha }} uses: actions/checkout@v4 - name: Set up Python 3.13 uses: actions/setup-python@v5 with: python-version: "3.13" - name: Install tox run: pip install tox-uv - name: Run tests run: tox -e py313-test-opentelemetry-getting-started -- -ra py39-test-opentelemetry-opentracing-shim_ubuntu-latest: name: opentelemetry-opentracing-shim 3.9 Ubuntu runs-on: ubuntu-latest timeout-minutes: 30 steps: - name: Checkout repo @ SHA - ${{ github.sha }} uses: actions/checkout@v4 - name: Set up Python 3.9 uses: actions/setup-python@v5 with: python-version: "3.9" - name: Install tox run: pip install tox-uv - name: Run tests run: tox -e py39-test-opentelemetry-opentracing-shim -- -ra py310-test-opentelemetry-opentracing-shim_ubuntu-latest: name: opentelemetry-opentracing-shim 3.10 Ubuntu runs-on: ubuntu-latest timeout-minutes: 30 steps: - name: Checkout repo @ SHA - ${{ github.sha }} uses: actions/checkout@v4 - name: Set up Python 3.10 uses: actions/setup-python@v5 with: python-version: "3.10" - name: Install tox run: pip install tox-uv - name: Run tests run: tox -e py310-test-opentelemetry-opentracing-shim -- -ra py311-test-opentelemetry-opentracing-shim_ubuntu-latest: name: opentelemetry-opentracing-shim 3.11 Ubuntu runs-on: ubuntu-latest timeout-minutes: 30 steps: - name: Checkout repo @ SHA - ${{ github.sha }} uses: actions/checkout@v4 - name: Set up Python 3.11 uses: actions/setup-python@v5 with: python-version: "3.11" - name: Install tox run: pip install tox-uv - name: Run tests run: tox -e py311-test-opentelemetry-opentracing-shim -- -ra py312-test-opentelemetry-opentracing-shim_ubuntu-latest: name: opentelemetry-opentracing-shim 3.12 Ubuntu runs-on: ubuntu-latest timeout-minutes: 30 steps: - name: Checkout repo @ SHA - ${{ github.sha }} uses: actions/checkout@v4 - name: Set up Python 3.12 uses: actions/setup-python@v5 with: python-version: "3.12" - name: Install tox run: pip install tox-uv - name: Run tests run: tox -e py312-test-opentelemetry-opentracing-shim -- -ra py313-test-opentelemetry-opentracing-shim_ubuntu-latest: name: opentelemetry-opentracing-shim 3.13 Ubuntu runs-on: ubuntu-latest timeout-minutes: 30 steps: - name: Checkout repo @ SHA - ${{ github.sha }} uses: actions/checkout@v4 - name: Set up Python 3.13 uses: actions/setup-python@v5 with: python-version: "3.13" - name: Install tox run: pip install tox-uv - name: Run tests run: tox -e py313-test-opentelemetry-opentracing-shim -- -ra pypy3-test-opentelemetry-opentracing-shim_ubuntu-latest: name: opentelemetry-opentracing-shim pypy-3.9 Ubuntu runs-on: ubuntu-latest timeout-minutes: 30 steps: - name: Checkout repo @ SHA - ${{ github.sha }} uses: actions/checkout@v4 - name: Set up Python pypy-3.9 uses: actions/setup-python@v5 with: python-version: "pypy-3.9" - name: Install tox run: pip install tox-uv - name: Run tests run: tox -e pypy3-test-opentelemetry-opentracing-shim -- -ra py39-test-opentelemetry-opencensus-shim_ubuntu-latest: name: opentelemetry-opencensus-shim 3.9 Ubuntu runs-on: ubuntu-latest timeout-minutes: 30 steps: - name: Checkout repo @ SHA - ${{ github.sha }} uses: actions/checkout@v4 - name: Set up Python 3.9 uses: actions/setup-python@v5 with: python-version: "3.9" - name: Install tox run: pip install tox-uv - name: Run tests run: tox -e py39-test-opentelemetry-opencensus-shim -- -ra py310-test-opentelemetry-opencensus-shim_ubuntu-latest: name: opentelemetry-opencensus-shim 3.10 Ubuntu runs-on: ubuntu-latest timeout-minutes: 30 steps: - name: Checkout repo @ SHA - ${{ github.sha }} uses: actions/checkout@v4 - name: Set up Python 3.10 uses: actions/setup-python@v5 with: python-version: "3.10" - name: Install tox run: pip install tox-uv - name: Run tests run: tox -e py310-test-opentelemetry-opencensus-shim -- -ra py311-test-opentelemetry-opencensus-shim_ubuntu-latest: name: opentelemetry-opencensus-shim 3.11 Ubuntu runs-on: ubuntu-latest timeout-minutes: 30 steps: - name: Checkout repo @ SHA - ${{ github.sha }} uses: actions/checkout@v4 - name: Set up Python 3.11 uses: actions/setup-python@v5 with: python-version: "3.11" - name: Install tox run: pip install tox-uv - name: Run tests run: tox -e py311-test-opentelemetry-opencensus-shim -- -ra py312-test-opentelemetry-opencensus-shim_ubuntu-latest: name: opentelemetry-opencensus-shim 3.12 Ubuntu runs-on: ubuntu-latest timeout-minutes: 30 steps: - name: Checkout repo @ SHA - ${{ github.sha }} uses: actions/checkout@v4 - name: Set up Python 3.12 uses: actions/setup-python@v5 with: python-version: "3.12" - name: Install tox run: pip install tox-uv - name: Run tests run: tox -e py312-test-opentelemetry-opencensus-shim -- -ra py313-test-opentelemetry-opencensus-shim_ubuntu-latest: name: opentelemetry-opencensus-shim 3.13 Ubuntu runs-on: ubuntu-latest timeout-minutes: 30 steps: - name: Checkout repo @ SHA - ${{ github.sha }} uses: actions/checkout@v4 - name: Set up Python 3.13 uses: actions/setup-python@v5 with: python-version: "3.13" - name: Install tox run: pip install tox-uv - name: Run tests run: tox -e py313-test-opentelemetry-opencensus-shim -- -ra py39-test-opentelemetry-exporter-opencensus_ubuntu-latest: name: opentelemetry-exporter-opencensus 3.9 Ubuntu runs-on: ubuntu-latest timeout-minutes: 30 steps: - name: Checkout repo @ SHA - ${{ github.sha }} uses: actions/checkout@v4 - name: Set up Python 3.9 uses: actions/setup-python@v5 with: python-version: "3.9" - name: Install tox run: pip install tox-uv - name: Run tests run: tox -e py39-test-opentelemetry-exporter-opencensus -- -ra py310-test-opentelemetry-exporter-opencensus_ubuntu-latest: name: opentelemetry-exporter-opencensus 3.10 Ubuntu runs-on: ubuntu-latest timeout-minutes: 30 steps: - name: Checkout repo @ SHA - ${{ github.sha }} uses: actions/checkout@v4 - name: Set up Python 3.10 uses: actions/setup-python@v5 with: python-version: "3.10" - name: Install tox run: pip install tox-uv - name: Run tests run: tox -e py310-test-opentelemetry-exporter-opencensus -- -ra py311-test-opentelemetry-exporter-opencensus_ubuntu-latest: name: opentelemetry-exporter-opencensus 3.11 Ubuntu runs-on: ubuntu-latest timeout-minutes: 30 steps: - name: Checkout repo @ SHA - ${{ github.sha }} uses: actions/checkout@v4 - name: Set up Python 3.11 uses: actions/setup-python@v5 with: python-version: "3.11" - name: Install tox run: pip install tox-uv - name: Run tests run: tox -e py311-test-opentelemetry-exporter-opencensus -- -ra py312-test-opentelemetry-exporter-opencensus_ubuntu-latest: name: opentelemetry-exporter-opencensus 3.12 Ubuntu runs-on: ubuntu-latest timeout-minutes: 30 steps: - name: Checkout repo @ SHA - ${{ github.sha }} uses: actions/checkout@v4 - name: Set up Python 3.12 uses: actions/setup-python@v5 with: python-version: "3.12" - name: Install tox run: pip install tox-uv - name: Run tests run: tox -e py312-test-opentelemetry-exporter-opencensus -- -ra py313-test-opentelemetry-exporter-opencensus_ubuntu-latest: name: opentelemetry-exporter-opencensus 3.13 Ubuntu runs-on: ubuntu-latest timeout-minutes: 30 steps: - name: Checkout repo @ SHA - ${{ github.sha }} uses: actions/checkout@v4 - name: Set up Python 3.13 uses: actions/setup-python@v5 with: python-version: "3.13" - name: Install tox run: pip install tox-uv - name: Run tests run: tox -e py313-test-opentelemetry-exporter-opencensus -- -ra py39-test-opentelemetry-exporter-otlp-proto-common_ubuntu-latest: name: opentelemetry-exporter-otlp-proto-common 3.9 Ubuntu runs-on: ubuntu-latest timeout-minutes: 30 steps: - name: Checkout repo @ SHA - ${{ github.sha }} uses: actions/checkout@v4 - name: Set up Python 3.9 uses: actions/setup-python@v5 with: python-version: "3.9" - name: Install tox run: pip install tox-uv - name: Run tests run: tox -e py39-test-opentelemetry-exporter-otlp-proto-common -- -ra py310-test-opentelemetry-exporter-otlp-proto-common_ubuntu-latest: name: opentelemetry-exporter-otlp-proto-common 3.10 Ubuntu runs-on: ubuntu-latest timeout-minutes: 30 steps: - name: Checkout repo @ SHA - ${{ github.sha }} uses: actions/checkout@v4 - name: Set up Python 3.10 uses: actions/setup-python@v5 with: python-version: "3.10" - name: Install tox run: pip install tox-uv - name: Run tests run: tox -e py310-test-opentelemetry-exporter-otlp-proto-common -- -ra py311-test-opentelemetry-exporter-otlp-proto-common_ubuntu-latest: name: opentelemetry-exporter-otlp-proto-common 3.11 Ubuntu runs-on: ubuntu-latest timeout-minutes: 30 steps: - name: Checkout repo @ SHA - ${{ github.sha }} uses: actions/checkout@v4 - name: Set up Python 3.11 uses: actions/setup-python@v5 with: python-version: "3.11" - name: Install tox run: pip install tox-uv - name: Run tests run: tox -e py311-test-opentelemetry-exporter-otlp-proto-common -- -ra py312-test-opentelemetry-exporter-otlp-proto-common_ubuntu-latest: name: opentelemetry-exporter-otlp-proto-common 3.12 Ubuntu runs-on: ubuntu-latest timeout-minutes: 30 steps: - name: Checkout repo @ SHA - ${{ github.sha }} uses: actions/checkout@v4 - name: Set up Python 3.12 uses: actions/setup-python@v5 with: python-version: "3.12" - name: Install tox run: pip install tox-uv - name: Run tests run: tox -e py312-test-opentelemetry-exporter-otlp-proto-common -- -ra py313-test-opentelemetry-exporter-otlp-proto-common_ubuntu-latest: name: opentelemetry-exporter-otlp-proto-common 3.13 Ubuntu runs-on: ubuntu-latest timeout-minutes: 30 steps: - name: Checkout repo @ SHA - ${{ github.sha }} uses: actions/checkout@v4 - name: Set up Python 3.13 uses: actions/setup-python@v5 with: python-version: "3.13" - name: Install tox run: pip install tox-uv - name: Run tests run: tox -e py313-test-opentelemetry-exporter-otlp-proto-common -- -ra pypy3-test-opentelemetry-exporter-otlp-proto-common_ubuntu-latest: name: opentelemetry-exporter-otlp-proto-common pypy-3.9 Ubuntu runs-on: ubuntu-latest timeout-minutes: 30 steps: - name: Checkout repo @ SHA - ${{ github.sha }} uses: actions/checkout@v4 - name: Set up Python pypy-3.9 uses: actions/setup-python@v5 with: python-version: "pypy-3.9" - name: Install tox run: pip install tox-uv - name: Run tests run: tox -e pypy3-test-opentelemetry-exporter-otlp-proto-common -- -ra py39-test-opentelemetry-exporter-otlp-combined_ubuntu-latest: name: opentelemetry-exporter-otlp-combined 3.9 Ubuntu runs-on: ubuntu-latest timeout-minutes: 30 steps: - name: Checkout repo @ SHA - ${{ github.sha }} uses: actions/checkout@v4 - name: Set up Python 3.9 uses: actions/setup-python@v5 with: python-version: "3.9" - name: Install tox run: pip install tox-uv - name: Run tests run: tox -e py39-test-opentelemetry-exporter-otlp-combined -- -ra py310-test-opentelemetry-exporter-otlp-combined_ubuntu-latest: name: opentelemetry-exporter-otlp-combined 3.10 Ubuntu runs-on: ubuntu-latest timeout-minutes: 30 steps: - name: Checkout repo @ SHA - ${{ github.sha }} uses: actions/checkout@v4 - name: Set up Python 3.10 uses: actions/setup-python@v5 with: python-version: "3.10" - name: Install tox run: pip install tox-uv - name: Run tests run: tox -e py310-test-opentelemetry-exporter-otlp-combined -- -ra py311-test-opentelemetry-exporter-otlp-combined_ubuntu-latest: name: opentelemetry-exporter-otlp-combined 3.11 Ubuntu runs-on: ubuntu-latest timeout-minutes: 30 steps: - name: Checkout repo @ SHA - ${{ github.sha }} uses: actions/checkout@v4 - name: Set up Python 3.11 uses: actions/setup-python@v5 with: python-version: "3.11" - name: Install tox run: pip install tox-uv - name: Run tests run: tox -e py311-test-opentelemetry-exporter-otlp-combined -- -ra py312-test-opentelemetry-exporter-otlp-combined_ubuntu-latest: name: opentelemetry-exporter-otlp-combined 3.12 Ubuntu runs-on: ubuntu-latest timeout-minutes: 30 steps: - name: Checkout repo @ SHA - ${{ github.sha }} uses: actions/checkout@v4 - name: Set up Python 3.12 uses: actions/setup-python@v5 with: python-version: "3.12" - name: Install tox run: pip install tox-uv - name: Run tests run: tox -e py312-test-opentelemetry-exporter-otlp-combined -- -ra py313-test-opentelemetry-exporter-otlp-combined_ubuntu-latest: name: opentelemetry-exporter-otlp-combined 3.13 Ubuntu runs-on: ubuntu-latest timeout-minutes: 30 steps: - name: Checkout repo @ SHA - ${{ github.sha }} uses: actions/checkout@v4 - name: Set up Python 3.13 uses: actions/setup-python@v5 with: python-version: "3.13" - name: Install tox run: pip install tox-uv - name: Run tests run: tox -e py313-test-opentelemetry-exporter-otlp-combined -- -ra py39-test-opentelemetry-exporter-otlp-proto-grpc-oldest_ubuntu-latest: name: opentelemetry-exporter-otlp-proto-grpc-oldest 3.9 Ubuntu runs-on: ubuntu-latest timeout-minutes: 30 steps: - name: Checkout repo @ SHA - ${{ github.sha }} uses: actions/checkout@v4 - name: Set up Python 3.9 uses: actions/setup-python@v5 with: python-version: "3.9" - name: Install tox run: pip install tox-uv - name: Run tests run: tox -e py39-test-opentelemetry-exporter-otlp-proto-grpc-oldest -- -ra py39-test-opentelemetry-exporter-otlp-proto-grpc-latest_ubuntu-latest: name: opentelemetry-exporter-otlp-proto-grpc-latest 3.9 Ubuntu runs-on: ubuntu-latest timeout-minutes: 30 steps: - name: Checkout repo @ SHA - ${{ github.sha }} uses: actions/checkout@v4 - name: Set up Python 3.9 uses: actions/setup-python@v5 with: python-version: "3.9" - name: Install tox run: pip install tox-uv - name: Run tests run: tox -e py39-test-opentelemetry-exporter-otlp-proto-grpc-latest -- -ra py310-test-opentelemetry-exporter-otlp-proto-grpc-oldest_ubuntu-latest: name: opentelemetry-exporter-otlp-proto-grpc-oldest 3.10 Ubuntu runs-on: ubuntu-latest timeout-minutes: 30 steps: - name: Checkout repo @ SHA - ${{ github.sha }} uses: actions/checkout@v4 - name: Set up Python 3.10 uses: actions/setup-python@v5 with: python-version: "3.10" - name: Install tox run: pip install tox-uv - name: Run tests run: tox -e py310-test-opentelemetry-exporter-otlp-proto-grpc-oldest -- -ra py310-test-opentelemetry-exporter-otlp-proto-grpc-latest_ubuntu-latest: name: opentelemetry-exporter-otlp-proto-grpc-latest 3.10 Ubuntu runs-on: ubuntu-latest timeout-minutes: 30 steps: - name: Checkout repo @ SHA - ${{ github.sha }} uses: actions/checkout@v4 - name: Set up Python 3.10 uses: actions/setup-python@v5 with: python-version: "3.10" - name: Install tox run: pip install tox-uv - name: Run tests run: tox -e py310-test-opentelemetry-exporter-otlp-proto-grpc-latest -- -ra py311-test-opentelemetry-exporter-otlp-proto-grpc-oldest_ubuntu-latest: name: opentelemetry-exporter-otlp-proto-grpc-oldest 3.11 Ubuntu runs-on: ubuntu-latest timeout-minutes: 30 steps: - name: Checkout repo @ SHA - ${{ github.sha }} uses: actions/checkout@v4 - name: Set up Python 3.11 uses: actions/setup-python@v5 with: python-version: "3.11" - name: Install tox run: pip install tox-uv - name: Run tests run: tox -e py311-test-opentelemetry-exporter-otlp-proto-grpc-oldest -- -ra py311-test-opentelemetry-exporter-otlp-proto-grpc-latest_ubuntu-latest: name: opentelemetry-exporter-otlp-proto-grpc-latest 3.11 Ubuntu runs-on: ubuntu-latest timeout-minutes: 30 steps: - name: Checkout repo @ SHA - ${{ github.sha }} uses: actions/checkout@v4 - name: Set up Python 3.11 uses: actions/setup-python@v5 with: python-version: "3.11" - name: Install tox run: pip install tox-uv - name: Run tests run: tox -e py311-test-opentelemetry-exporter-otlp-proto-grpc-latest -- -ra py312-test-opentelemetry-exporter-otlp-proto-grpc-oldest_ubuntu-latest: name: opentelemetry-exporter-otlp-proto-grpc-oldest 3.12 Ubuntu runs-on: ubuntu-latest timeout-minutes: 30 steps: - name: Checkout repo @ SHA - ${{ github.sha }} uses: actions/checkout@v4 - name: Set up Python 3.12 uses: actions/setup-python@v5 with: python-version: "3.12" - name: Install tox run: pip install tox-uv - name: Run tests run: tox -e py312-test-opentelemetry-exporter-otlp-proto-grpc-oldest -- -ra py312-test-opentelemetry-exporter-otlp-proto-grpc-latest_ubuntu-latest: name: opentelemetry-exporter-otlp-proto-grpc-latest 3.12 Ubuntu runs-on: ubuntu-latest timeout-minutes: 30 steps: - name: Checkout repo @ SHA - ${{ github.sha }} uses: actions/checkout@v4 - name: Set up Python 3.12 uses: actions/setup-python@v5 with: python-version: "3.12" - name: Install tox run: pip install tox-uv - name: Run tests run: tox -e py312-test-opentelemetry-exporter-otlp-proto-grpc-latest -- -ra py313-test-opentelemetry-exporter-otlp-proto-grpc-oldest_ubuntu-latest: name: opentelemetry-exporter-otlp-proto-grpc-oldest 3.13 Ubuntu runs-on: ubuntu-latest timeout-minutes: 30 steps: - name: Checkout repo @ SHA - ${{ github.sha }} uses: actions/checkout@v4 - name: Set up Python 3.13 uses: actions/setup-python@v5 with: python-version: "3.13" - name: Install tox run: pip install tox-uv - name: Run tests run: tox -e py313-test-opentelemetry-exporter-otlp-proto-grpc-oldest -- -ra py313-test-opentelemetry-exporter-otlp-proto-grpc-latest_ubuntu-latest: name: opentelemetry-exporter-otlp-proto-grpc-latest 3.13 Ubuntu runs-on: ubuntu-latest timeout-minutes: 30 steps: - name: Checkout repo @ SHA - ${{ github.sha }} uses: actions/checkout@v4 - name: Set up Python 3.13 uses: actions/setup-python@v5 with: python-version: "3.13" - name: Install tox run: pip install tox-uv - name: Run tests run: tox -e py313-test-opentelemetry-exporter-otlp-proto-grpc-latest -- -ra py39-test-opentelemetry-exporter-otlp-proto-http_ubuntu-latest: name: opentelemetry-exporter-otlp-proto-http 3.9 Ubuntu runs-on: ubuntu-latest timeout-minutes: 30 steps: - name: Checkout repo @ SHA - ${{ github.sha }} uses: actions/checkout@v4 - name: Set up Python 3.9 uses: actions/setup-python@v5 with: python-version: "3.9" - name: Install tox run: pip install tox-uv - name: Run tests run: tox -e py39-test-opentelemetry-exporter-otlp-proto-http -- -ra py310-test-opentelemetry-exporter-otlp-proto-http_ubuntu-latest: name: opentelemetry-exporter-otlp-proto-http 3.10 Ubuntu runs-on: ubuntu-latest timeout-minutes: 30 steps: - name: Checkout repo @ SHA - ${{ github.sha }} uses: actions/checkout@v4 - name: Set up Python 3.10 uses: actions/setup-python@v5 with: python-version: "3.10" - name: Install tox run: pip install tox-uv - name: Run tests run: tox -e py310-test-opentelemetry-exporter-otlp-proto-http -- -ra py311-test-opentelemetry-exporter-otlp-proto-http_ubuntu-latest: name: opentelemetry-exporter-otlp-proto-http 3.11 Ubuntu runs-on: ubuntu-latest timeout-minutes: 30 steps: - name: Checkout repo @ SHA - ${{ github.sha }} uses: actions/checkout@v4 - name: Set up Python 3.11 uses: actions/setup-python@v5 with: python-version: "3.11" - name: Install tox run: pip install tox-uv - name: Run tests run: tox -e py311-test-opentelemetry-exporter-otlp-proto-http -- -ra py312-test-opentelemetry-exporter-otlp-proto-http_ubuntu-latest: name: opentelemetry-exporter-otlp-proto-http 3.12 Ubuntu runs-on: ubuntu-latest timeout-minutes: 30 steps: - name: Checkout repo @ SHA - ${{ github.sha }} uses: actions/checkout@v4 - name: Set up Python 3.12 uses: actions/setup-python@v5 with: python-version: "3.12" - name: Install tox run: pip install tox-uv - name: Run tests run: tox -e py312-test-opentelemetry-exporter-otlp-proto-http -- -ra py313-test-opentelemetry-exporter-otlp-proto-http_ubuntu-latest: name: opentelemetry-exporter-otlp-proto-http 3.13 Ubuntu runs-on: ubuntu-latest timeout-minutes: 30 steps: - name: Checkout repo @ SHA - ${{ github.sha }} uses: actions/checkout@v4 - name: Set up Python 3.13 uses: actions/setup-python@v5 with: python-version: "3.13" - name: Install tox run: pip install tox-uv - name: Run tests run: tox -e py313-test-opentelemetry-exporter-otlp-proto-http -- -ra pypy3-test-opentelemetry-exporter-otlp-proto-http_ubuntu-latest: name: opentelemetry-exporter-otlp-proto-http pypy-3.9 Ubuntu runs-on: ubuntu-latest timeout-minutes: 30 steps: - name: Checkout repo @ SHA - ${{ github.sha }} uses: actions/checkout@v4 - name: Set up Python pypy-3.9 uses: actions/setup-python@v5 with: python-version: "pypy-3.9" - name: Install tox run: pip install tox-uv - name: Run tests run: tox -e pypy3-test-opentelemetry-exporter-otlp-proto-http -- -ra py39-test-opentelemetry-exporter-prometheus_ubuntu-latest: name: opentelemetry-exporter-prometheus 3.9 Ubuntu runs-on: ubuntu-latest timeout-minutes: 30 steps: - name: Checkout repo @ SHA - ${{ github.sha }} uses: actions/checkout@v4 - name: Set up Python 3.9 uses: actions/setup-python@v5 with: python-version: "3.9" - name: Install tox run: pip install tox-uv - name: Run tests run: tox -e py39-test-opentelemetry-exporter-prometheus -- -ra py310-test-opentelemetry-exporter-prometheus_ubuntu-latest: name: opentelemetry-exporter-prometheus 3.10 Ubuntu runs-on: ubuntu-latest timeout-minutes: 30 steps: - name: Checkout repo @ SHA - ${{ github.sha }} uses: actions/checkout@v4 - name: Set up Python 3.10 uses: actions/setup-python@v5 with: python-version: "3.10" - name: Install tox run: pip install tox-uv - name: Run tests run: tox -e py310-test-opentelemetry-exporter-prometheus -- -ra py311-test-opentelemetry-exporter-prometheus_ubuntu-latest: name: opentelemetry-exporter-prometheus 3.11 Ubuntu runs-on: ubuntu-latest timeout-minutes: 30 steps: - name: Checkout repo @ SHA - ${{ github.sha }} uses: actions/checkout@v4 - name: Set up Python 3.11 uses: actions/setup-python@v5 with: python-version: "3.11" - name: Install tox run: pip install tox-uv - name: Run tests run: tox -e py311-test-opentelemetry-exporter-prometheus -- -ra py312-test-opentelemetry-exporter-prometheus_ubuntu-latest: name: opentelemetry-exporter-prometheus 3.12 Ubuntu runs-on: ubuntu-latest timeout-minutes: 30 steps: - name: Checkout repo @ SHA - ${{ github.sha }} uses: actions/checkout@v4 - name: Set up Python 3.12 uses: actions/setup-python@v5 with: python-version: "3.12" - name: Install tox run: pip install tox-uv - name: Run tests run: tox -e py312-test-opentelemetry-exporter-prometheus -- -ra py313-test-opentelemetry-exporter-prometheus_ubuntu-latest: name: opentelemetry-exporter-prometheus 3.13 Ubuntu runs-on: ubuntu-latest timeout-minutes: 30 steps: - name: Checkout repo @ SHA - ${{ github.sha }} uses: actions/checkout@v4 - name: Set up Python 3.13 uses: actions/setup-python@v5 with: python-version: "3.13" - name: Install tox run: pip install tox-uv - name: Run tests run: tox -e py313-test-opentelemetry-exporter-prometheus -- -ra pypy3-test-opentelemetry-exporter-prometheus_ubuntu-latest: name: opentelemetry-exporter-prometheus pypy-3.9 Ubuntu runs-on: ubuntu-latest timeout-minutes: 30 steps: - name: Checkout repo @ SHA - ${{ github.sha }} uses: actions/checkout@v4 - name: Set up Python pypy-3.9 uses: actions/setup-python@v5 with: python-version: "pypy-3.9" - name: Install tox run: pip install tox-uv - name: Run tests run: tox -e pypy3-test-opentelemetry-exporter-prometheus -- -ra py39-test-opentelemetry-exporter-zipkin-combined_ubuntu-latest: name: opentelemetry-exporter-zipkin-combined 3.9 Ubuntu runs-on: ubuntu-latest timeout-minutes: 30 steps: - name: Checkout repo @ SHA - ${{ github.sha }} uses: actions/checkout@v4 - name: Set up Python 3.9 uses: actions/setup-python@v5 with: python-version: "3.9" - name: Install tox run: pip install tox-uv - name: Run tests run: tox -e py39-test-opentelemetry-exporter-zipkin-combined -- -ra py310-test-opentelemetry-exporter-zipkin-combined_ubuntu-latest: name: opentelemetry-exporter-zipkin-combined 3.10 Ubuntu runs-on: ubuntu-latest timeout-minutes: 30 steps: - name: Checkout repo @ SHA - ${{ github.sha }} uses: actions/checkout@v4 - name: Set up Python 3.10 uses: actions/setup-python@v5 with: python-version: "3.10" - name: Install tox run: pip install tox-uv - name: Run tests run: tox -e py310-test-opentelemetry-exporter-zipkin-combined -- -ra py311-test-opentelemetry-exporter-zipkin-combined_ubuntu-latest: name: opentelemetry-exporter-zipkin-combined 3.11 Ubuntu runs-on: ubuntu-latest timeout-minutes: 30 steps: - name: Checkout repo @ SHA - ${{ github.sha }} uses: actions/checkout@v4 - name: Set up Python 3.11 uses: actions/setup-python@v5 with: python-version: "3.11" - name: Install tox run: pip install tox-uv - name: Run tests run: tox -e py311-test-opentelemetry-exporter-zipkin-combined -- -ra py312-test-opentelemetry-exporter-zipkin-combined_ubuntu-latest: name: opentelemetry-exporter-zipkin-combined 3.12 Ubuntu runs-on: ubuntu-latest timeout-minutes: 30 steps: - name: Checkout repo @ SHA - ${{ github.sha }} uses: actions/checkout@v4 - name: Set up Python 3.12 uses: actions/setup-python@v5 with: python-version: "3.12" - name: Install tox run: pip install tox-uv - name: Run tests run: tox -e py312-test-opentelemetry-exporter-zipkin-combined -- -ra py313-test-opentelemetry-exporter-zipkin-combined_ubuntu-latest: name: opentelemetry-exporter-zipkin-combined 3.13 Ubuntu runs-on: ubuntu-latest timeout-minutes: 30 steps: - name: Checkout repo @ SHA - ${{ github.sha }} uses: actions/checkout@v4 - name: Set up Python 3.13 uses: actions/setup-python@v5 with: python-version: "3.13" - name: Install tox run: pip install tox-uv - name: Run tests run: tox -e py313-test-opentelemetry-exporter-zipkin-combined -- -ra pypy3-test-opentelemetry-exporter-zipkin-combined_ubuntu-latest: name: opentelemetry-exporter-zipkin-combined pypy-3.9 Ubuntu runs-on: ubuntu-latest timeout-minutes: 30 steps: - name: Checkout repo @ SHA - ${{ github.sha }} uses: actions/checkout@v4 - name: Set up Python pypy-3.9 uses: actions/setup-python@v5 with: python-version: "pypy-3.9" - name: Install tox run: pip install tox-uv - name: Run tests run: tox -e pypy3-test-opentelemetry-exporter-zipkin-combined -- -ra py39-test-opentelemetry-exporter-zipkin-proto-http_ubuntu-latest: name: opentelemetry-exporter-zipkin-proto-http 3.9 Ubuntu runs-on: ubuntu-latest timeout-minutes: 30 steps: - name: Checkout repo @ SHA - ${{ github.sha }} uses: actions/checkout@v4 - name: Set up Python 3.9 uses: actions/setup-python@v5 with: python-version: "3.9" - name: Install tox run: pip install tox-uv - name: Run tests run: tox -e py39-test-opentelemetry-exporter-zipkin-proto-http -- -ra py310-test-opentelemetry-exporter-zipkin-proto-http_ubuntu-latest: name: opentelemetry-exporter-zipkin-proto-http 3.10 Ubuntu runs-on: ubuntu-latest timeout-minutes: 30 steps: - name: Checkout repo @ SHA - ${{ github.sha }} uses: actions/checkout@v4 - name: Set up Python 3.10 uses: actions/setup-python@v5 with: python-version: "3.10" - name: Install tox run: pip install tox-uv - name: Run tests run: tox -e py310-test-opentelemetry-exporter-zipkin-proto-http -- -ra py311-test-opentelemetry-exporter-zipkin-proto-http_ubuntu-latest: name: opentelemetry-exporter-zipkin-proto-http 3.11 Ubuntu runs-on: ubuntu-latest timeout-minutes: 30 steps: - name: Checkout repo @ SHA - ${{ github.sha }} uses: actions/checkout@v4 - name: Set up Python 3.11 uses: actions/setup-python@v5 with: python-version: "3.11" - name: Install tox run: pip install tox-uv - name: Run tests run: tox -e py311-test-opentelemetry-exporter-zipkin-proto-http -- -ra py312-test-opentelemetry-exporter-zipkin-proto-http_ubuntu-latest: name: opentelemetry-exporter-zipkin-proto-http 3.12 Ubuntu runs-on: ubuntu-latest timeout-minutes: 30 steps: - name: Checkout repo @ SHA - ${{ github.sha }} uses: actions/checkout@v4 - name: Set up Python 3.12 uses: actions/setup-python@v5 with: python-version: "3.12" - name: Install tox run: pip install tox-uv - name: Run tests run: tox -e py312-test-opentelemetry-exporter-zipkin-proto-http -- -ra py313-test-opentelemetry-exporter-zipkin-proto-http_ubuntu-latest: name: opentelemetry-exporter-zipkin-proto-http 3.13 Ubuntu runs-on: ubuntu-latest timeout-minutes: 30 steps: - name: Checkout repo @ SHA - ${{ github.sha }} uses: actions/checkout@v4 - name: Set up Python 3.13 uses: actions/setup-python@v5 with: python-version: "3.13" - name: Install tox run: pip install tox-uv - name: Run tests run: tox -e py313-test-opentelemetry-exporter-zipkin-proto-http -- -ra pypy3-test-opentelemetry-exporter-zipkin-proto-http_ubuntu-latest: name: opentelemetry-exporter-zipkin-proto-http pypy-3.9 Ubuntu runs-on: ubuntu-latest timeout-minutes: 30 steps: - name: Checkout repo @ SHA - ${{ github.sha }} uses: actions/checkout@v4 - name: Set up Python pypy-3.9 uses: actions/setup-python@v5 with: python-version: "pypy-3.9" - name: Install tox run: pip install tox-uv - name: Run tests run: tox -e pypy3-test-opentelemetry-exporter-zipkin-proto-http -- -ra py39-test-opentelemetry-exporter-zipkin-json_ubuntu-latest: name: opentelemetry-exporter-zipkin-json 3.9 Ubuntu runs-on: ubuntu-latest timeout-minutes: 30 steps: - name: Checkout repo @ SHA - ${{ github.sha }} uses: actions/checkout@v4 - name: Set up Python 3.9 uses: actions/setup-python@v5 with: python-version: "3.9" - name: Install tox run: pip install tox-uv - name: Run tests run: tox -e py39-test-opentelemetry-exporter-zipkin-json -- -ra py310-test-opentelemetry-exporter-zipkin-json_ubuntu-latest: name: opentelemetry-exporter-zipkin-json 3.10 Ubuntu runs-on: ubuntu-latest timeout-minutes: 30 steps: - name: Checkout repo @ SHA - ${{ github.sha }} uses: actions/checkout@v4 - name: Set up Python 3.10 uses: actions/setup-python@v5 with: python-version: "3.10" - name: Install tox run: pip install tox-uv - name: Run tests run: tox -e py310-test-opentelemetry-exporter-zipkin-json -- -ra py311-test-opentelemetry-exporter-zipkin-json_ubuntu-latest: name: opentelemetry-exporter-zipkin-json 3.11 Ubuntu runs-on: ubuntu-latest timeout-minutes: 30 steps: - name: Checkout repo @ SHA - ${{ github.sha }} uses: actions/checkout@v4 - name: Set up Python 3.11 uses: actions/setup-python@v5 with: python-version: "3.11" - name: Install tox run: pip install tox-uv - name: Run tests run: tox -e py311-test-opentelemetry-exporter-zipkin-json -- -ra py312-test-opentelemetry-exporter-zipkin-json_ubuntu-latest: name: opentelemetry-exporter-zipkin-json 3.12 Ubuntu runs-on: ubuntu-latest timeout-minutes: 30 steps: - name: Checkout repo @ SHA - ${{ github.sha }} uses: actions/checkout@v4 - name: Set up Python 3.12 uses: actions/setup-python@v5 with: python-version: "3.12" - name: Install tox run: pip install tox-uv - name: Run tests run: tox -e py312-test-opentelemetry-exporter-zipkin-json -- -ra py313-test-opentelemetry-exporter-zipkin-json_ubuntu-latest: name: opentelemetry-exporter-zipkin-json 3.13 Ubuntu runs-on: ubuntu-latest timeout-minutes: 30 steps: - name: Checkout repo @ SHA - ${{ github.sha }} uses: actions/checkout@v4 - name: Set up Python 3.13 uses: actions/setup-python@v5 with: python-version: "3.13" - name: Install tox run: pip install tox-uv - name: Run tests run: tox -e py313-test-opentelemetry-exporter-zipkin-json -- -ra pypy3-test-opentelemetry-exporter-zipkin-json_ubuntu-latest: name: opentelemetry-exporter-zipkin-json pypy-3.9 Ubuntu runs-on: ubuntu-latest timeout-minutes: 30 steps: - name: Checkout repo @ SHA - ${{ github.sha }} uses: actions/checkout@v4 - name: Set up Python pypy-3.9 uses: actions/setup-python@v5 with: python-version: "pypy-3.9" - name: Install tox run: pip install tox-uv - name: Run tests run: tox -e pypy3-test-opentelemetry-exporter-zipkin-json -- -ra py39-test-opentelemetry-propagator-b3_ubuntu-latest: name: opentelemetry-propagator-b3 3.9 Ubuntu runs-on: ubuntu-latest timeout-minutes: 30 steps: - name: Checkout repo @ SHA - ${{ github.sha }} uses: actions/checkout@v4 - name: Set up Python 3.9 uses: actions/setup-python@v5 with: python-version: "3.9" - name: Install tox run: pip install tox-uv - name: Run tests run: tox -e py39-test-opentelemetry-propagator-b3 -- -ra py310-test-opentelemetry-propagator-b3_ubuntu-latest: name: opentelemetry-propagator-b3 3.10 Ubuntu runs-on: ubuntu-latest timeout-minutes: 30 steps: - name: Checkout repo @ SHA - ${{ github.sha }} uses: actions/checkout@v4 - name: Set up Python 3.10 uses: actions/setup-python@v5 with: python-version: "3.10" - name: Install tox run: pip install tox-uv - name: Run tests run: tox -e py310-test-opentelemetry-propagator-b3 -- -ra py311-test-opentelemetry-propagator-b3_ubuntu-latest: name: opentelemetry-propagator-b3 3.11 Ubuntu runs-on: ubuntu-latest timeout-minutes: 30 steps: - name: Checkout repo @ SHA - ${{ github.sha }} uses: actions/checkout@v4 - name: Set up Python 3.11 uses: actions/setup-python@v5 with: python-version: "3.11" - name: Install tox run: pip install tox-uv - name: Run tests run: tox -e py311-test-opentelemetry-propagator-b3 -- -ra py312-test-opentelemetry-propagator-b3_ubuntu-latest: name: opentelemetry-propagator-b3 3.12 Ubuntu runs-on: ubuntu-latest timeout-minutes: 30 steps: - name: Checkout repo @ SHA - ${{ github.sha }} uses: actions/checkout@v4 - name: Set up Python 3.12 uses: actions/setup-python@v5 with: python-version: "3.12" - name: Install tox run: pip install tox-uv - name: Run tests run: tox -e py312-test-opentelemetry-propagator-b3 -- -ra py313-test-opentelemetry-propagator-b3_ubuntu-latest: name: opentelemetry-propagator-b3 3.13 Ubuntu runs-on: ubuntu-latest timeout-minutes: 30 steps: - name: Checkout repo @ SHA - ${{ github.sha }} uses: actions/checkout@v4 - name: Set up Python 3.13 uses: actions/setup-python@v5 with: python-version: "3.13" - name: Install tox run: pip install tox-uv - name: Run tests run: tox -e py313-test-opentelemetry-propagator-b3 -- -ra pypy3-test-opentelemetry-propagator-b3_ubuntu-latest: name: opentelemetry-propagator-b3 pypy-3.9 Ubuntu runs-on: ubuntu-latest timeout-minutes: 30 steps: - name: Checkout repo @ SHA - ${{ github.sha }} uses: actions/checkout@v4 - name: Set up Python pypy-3.9 uses: actions/setup-python@v5 with: python-version: "pypy-3.9" - name: Install tox run: pip install tox-uv - name: Run tests run: tox -e pypy3-test-opentelemetry-propagator-b3 -- -ra py39-test-opentelemetry-propagator-jaeger_ubuntu-latest: name: opentelemetry-propagator-jaeger 3.9 Ubuntu runs-on: ubuntu-latest timeout-minutes: 30 steps: - name: Checkout repo @ SHA - ${{ github.sha }} uses: actions/checkout@v4 - name: Set up Python 3.9 uses: actions/setup-python@v5 with: python-version: "3.9" - name: Install tox run: pip install tox-uv - name: Run tests run: tox -e py39-test-opentelemetry-propagator-jaeger -- -ra py310-test-opentelemetry-propagator-jaeger_ubuntu-latest: name: opentelemetry-propagator-jaeger 3.10 Ubuntu runs-on: ubuntu-latest timeout-minutes: 30 steps: - name: Checkout repo @ SHA - ${{ github.sha }} uses: actions/checkout@v4 - name: Set up Python 3.10 uses: actions/setup-python@v5 with: python-version: "3.10" - name: Install tox run: pip install tox-uv - name: Run tests run: tox -e py310-test-opentelemetry-propagator-jaeger -- -ra py311-test-opentelemetry-propagator-jaeger_ubuntu-latest: name: opentelemetry-propagator-jaeger 3.11 Ubuntu runs-on: ubuntu-latest timeout-minutes: 30 steps: - name: Checkout repo @ SHA - ${{ github.sha }} uses: actions/checkout@v4 - name: Set up Python 3.11 uses: actions/setup-python@v5 with: python-version: "3.11" - name: Install tox run: pip install tox-uv - name: Run tests run: tox -e py311-test-opentelemetry-propagator-jaeger -- -ra py312-test-opentelemetry-propagator-jaeger_ubuntu-latest: name: opentelemetry-propagator-jaeger 3.12 Ubuntu runs-on: ubuntu-latest timeout-minutes: 30 steps: - name: Checkout repo @ SHA - ${{ github.sha }} uses: actions/checkout@v4 - name: Set up Python 3.12 uses: actions/setup-python@v5 with: python-version: "3.12" - name: Install tox run: pip install tox-uv - name: Run tests run: tox -e py312-test-opentelemetry-propagator-jaeger -- -ra py313-test-opentelemetry-propagator-jaeger_ubuntu-latest: name: opentelemetry-propagator-jaeger 3.13 Ubuntu runs-on: ubuntu-latest timeout-minutes: 30 steps: - name: Checkout repo @ SHA - ${{ github.sha }} uses: actions/checkout@v4 - name: Set up Python 3.13 uses: actions/setup-python@v5 with: python-version: "3.13" - name: Install tox run: pip install tox-uv - name: Run tests run: tox -e py313-test-opentelemetry-propagator-jaeger -- -ra pypy3-test-opentelemetry-propagator-jaeger_ubuntu-latest: name: opentelemetry-propagator-jaeger pypy-3.9 Ubuntu runs-on: ubuntu-latest timeout-minutes: 30 steps: - name: Checkout repo @ SHA - ${{ github.sha }} uses: actions/checkout@v4 - name: Set up Python pypy-3.9 uses: actions/setup-python@v5 with: python-version: "pypy-3.9" - name: Install tox run: pip install tox-uv - name: Run tests run: tox -e pypy3-test-opentelemetry-propagator-jaeger -- -ra py39-test-opentelemetry-test-utils_ubuntu-latest: name: opentelemetry-test-utils 3.9 Ubuntu runs-on: ubuntu-latest timeout-minutes: 30 steps: - name: Checkout repo @ SHA - ${{ github.sha }} uses: actions/checkout@v4 - name: Set up Python 3.9 uses: actions/setup-python@v5 with: python-version: "3.9" - name: Install tox run: pip install tox-uv - name: Run tests run: tox -e py39-test-opentelemetry-test-utils -- -ra py310-test-opentelemetry-test-utils_ubuntu-latest: name: opentelemetry-test-utils 3.10 Ubuntu runs-on: ubuntu-latest timeout-minutes: 30 steps: - name: Checkout repo @ SHA - ${{ github.sha }} uses: actions/checkout@v4 - name: Set up Python 3.10 uses: actions/setup-python@v5 with: python-version: "3.10" - name: Install tox run: pip install tox-uv - name: Run tests run: tox -e py310-test-opentelemetry-test-utils -- -ra py311-test-opentelemetry-test-utils_ubuntu-latest: name: opentelemetry-test-utils 3.11 Ubuntu runs-on: ubuntu-latest timeout-minutes: 30 steps: - name: Checkout repo @ SHA - ${{ github.sha }} uses: actions/checkout@v4 - name: Set up Python 3.11 uses: actions/setup-python@v5 with: python-version: "3.11" - name: Install tox run: pip install tox-uv - name: Run tests run: tox -e py311-test-opentelemetry-test-utils -- -ra py312-test-opentelemetry-test-utils_ubuntu-latest: name: opentelemetry-test-utils 3.12 Ubuntu runs-on: ubuntu-latest timeout-minutes: 30 steps: - name: Checkout repo @ SHA - ${{ github.sha }} uses: actions/checkout@v4 - name: Set up Python 3.12 uses: actions/setup-python@v5 with: python-version: "3.12" - name: Install tox run: pip install tox-uv - name: Run tests run: tox -e py312-test-opentelemetry-test-utils -- -ra py313-test-opentelemetry-test-utils_ubuntu-latest: name: opentelemetry-test-utils 3.13 Ubuntu runs-on: ubuntu-latest timeout-minutes: 30 steps: - name: Checkout repo @ SHA - ${{ github.sha }} uses: actions/checkout@v4 - name: Set up Python 3.13 uses: actions/setup-python@v5 with: python-version: "3.13" - name: Install tox run: pip install tox-uv - name: Run tests run: tox -e py313-test-opentelemetry-test-utils -- -ra pypy3-test-opentelemetry-test-utils_ubuntu-latest: name: opentelemetry-test-utils pypy-3.9 Ubuntu runs-on: ubuntu-latest timeout-minutes: 30 steps: - name: Checkout repo @ SHA - ${{ github.sha }} uses: actions/checkout@v4 - name: Set up Python pypy-3.9 uses: actions/setup-python@v5 with: python-version: "pypy-3.9" - name: Install tox run: pip install tox-uv - name: Run tests run: tox -e pypy3-test-opentelemetry-test-utils -- -ra py39-test-opentelemetry-api_windows-latest: name: opentelemetry-api 3.9 Windows runs-on: windows-latest timeout-minutes: 30 steps: - name: Checkout repo @ SHA - ${{ github.sha }} uses: actions/checkout@v4 - name: Set up Python 3.9 uses: actions/setup-python@v5 with: python-version: "3.9" - name: Install tox run: pip install tox-uv - name: Configure git to support long filenames run: git config --system core.longpaths true - name: Run tests run: tox -e py39-test-opentelemetry-api -- -ra py310-test-opentelemetry-api_windows-latest: name: opentelemetry-api 3.10 Windows runs-on: windows-latest timeout-minutes: 30 steps: - name: Checkout repo @ SHA - ${{ github.sha }} uses: actions/checkout@v4 - name: Set up Python 3.10 uses: actions/setup-python@v5 with: python-version: "3.10" - name: Install tox run: pip install tox-uv - name: Configure git to support long filenames run: git config --system core.longpaths true - name: Run tests run: tox -e py310-test-opentelemetry-api -- -ra py311-test-opentelemetry-api_windows-latest: name: opentelemetry-api 3.11 Windows runs-on: windows-latest timeout-minutes: 30 steps: - name: Checkout repo @ SHA - ${{ github.sha }} uses: actions/checkout@v4 - name: Set up Python 3.11 uses: actions/setup-python@v5 with: python-version: "3.11" - name: Install tox run: pip install tox-uv - name: Configure git to support long filenames run: git config --system core.longpaths true - name: Run tests run: tox -e py311-test-opentelemetry-api -- -ra py312-test-opentelemetry-api_windows-latest: name: opentelemetry-api 3.12 Windows runs-on: windows-latest timeout-minutes: 30 steps: - name: Checkout repo @ SHA - ${{ github.sha }} uses: actions/checkout@v4 - name: Set up Python 3.12 uses: actions/setup-python@v5 with: python-version: "3.12" - name: Install tox run: pip install tox-uv - name: Configure git to support long filenames run: git config --system core.longpaths true - name: Run tests run: tox -e py312-test-opentelemetry-api -- -ra py313-test-opentelemetry-api_windows-latest: name: opentelemetry-api 3.13 Windows runs-on: windows-latest timeout-minutes: 30 steps: - name: Checkout repo @ SHA - ${{ github.sha }} uses: actions/checkout@v4 - name: Set up Python 3.13 uses: actions/setup-python@v5 with: python-version: "3.13" - name: Install tox run: pip install tox-uv - name: Configure git to support long filenames run: git config --system core.longpaths true - name: Run tests run: tox -e py313-test-opentelemetry-api -- -ra pypy3-test-opentelemetry-api_windows-latest: name: opentelemetry-api pypy-3.9 Windows runs-on: windows-latest timeout-minutes: 30 steps: - name: Checkout repo @ SHA - ${{ github.sha }} uses: actions/checkout@v4 - name: Set up Python pypy-3.9 uses: actions/setup-python@v5 with: python-version: "pypy-3.9" - name: Install tox run: pip install tox-uv - name: Configure git to support long filenames run: git config --system core.longpaths true - name: Run tests run: tox -e pypy3-test-opentelemetry-api -- -ra py39-test-opentelemetry-proto-gen-oldest_windows-latest: name: opentelemetry-proto-gen-oldest 3.9 Windows runs-on: windows-latest timeout-minutes: 30 steps: - name: Checkout repo @ SHA - ${{ github.sha }} uses: actions/checkout@v4 - name: Set up Python 3.9 uses: actions/setup-python@v5 with: python-version: "3.9" - name: Install tox run: pip install tox-uv - name: Configure git to support long filenames run: git config --system core.longpaths true - name: Run tests run: tox -e py39-test-opentelemetry-proto-gen-oldest -- -ra py39-test-opentelemetry-proto-gen-latest_windows-latest: name: opentelemetry-proto-gen-latest 3.9 Windows runs-on: windows-latest timeout-minutes: 30 steps: - name: Checkout repo @ SHA - ${{ github.sha }} uses: actions/checkout@v4 - name: Set up Python 3.9 uses: actions/setup-python@v5 with: python-version: "3.9" - name: Install tox run: pip install tox-uv - name: Configure git to support long filenames run: git config --system core.longpaths true - name: Run tests run: tox -e py39-test-opentelemetry-proto-gen-latest -- -ra py310-test-opentelemetry-proto-gen-oldest_windows-latest: name: opentelemetry-proto-gen-oldest 3.10 Windows runs-on: windows-latest timeout-minutes: 30 steps: - name: Checkout repo @ SHA - ${{ github.sha }} uses: actions/checkout@v4 - name: Set up Python 3.10 uses: actions/setup-python@v5 with: python-version: "3.10" - name: Install tox run: pip install tox-uv - name: Configure git to support long filenames run: git config --system core.longpaths true - name: Run tests run: tox -e py310-test-opentelemetry-proto-gen-oldest -- -ra py310-test-opentelemetry-proto-gen-latest_windows-latest: name: opentelemetry-proto-gen-latest 3.10 Windows runs-on: windows-latest timeout-minutes: 30 steps: - name: Checkout repo @ SHA - ${{ github.sha }} uses: actions/checkout@v4 - name: Set up Python 3.10 uses: actions/setup-python@v5 with: python-version: "3.10" - name: Install tox run: pip install tox-uv - name: Configure git to support long filenames run: git config --system core.longpaths true - name: Run tests run: tox -e py310-test-opentelemetry-proto-gen-latest -- -ra py311-test-opentelemetry-proto-gen-oldest_windows-latest: name: opentelemetry-proto-gen-oldest 3.11 Windows runs-on: windows-latest timeout-minutes: 30 steps: - name: Checkout repo @ SHA - ${{ github.sha }} uses: actions/checkout@v4 - name: Set up Python 3.11 uses: actions/setup-python@v5 with: python-version: "3.11" - name: Install tox run: pip install tox-uv - name: Configure git to support long filenames run: git config --system core.longpaths true - name: Run tests run: tox -e py311-test-opentelemetry-proto-gen-oldest -- -ra py311-test-opentelemetry-proto-gen-latest_windows-latest: name: opentelemetry-proto-gen-latest 3.11 Windows runs-on: windows-latest timeout-minutes: 30 steps: - name: Checkout repo @ SHA - ${{ github.sha }} uses: actions/checkout@v4 - name: Set up Python 3.11 uses: actions/setup-python@v5 with: python-version: "3.11" - name: Install tox run: pip install tox-uv - name: Configure git to support long filenames run: git config --system core.longpaths true - name: Run tests run: tox -e py311-test-opentelemetry-proto-gen-latest -- -ra py312-test-opentelemetry-proto-gen-oldest_windows-latest: name: opentelemetry-proto-gen-oldest 3.12 Windows runs-on: windows-latest timeout-minutes: 30 steps: - name: Checkout repo @ SHA - ${{ github.sha }} uses: actions/checkout@v4 - name: Set up Python 3.12 uses: actions/setup-python@v5 with: python-version: "3.12" - name: Install tox run: pip install tox-uv - name: Configure git to support long filenames run: git config --system core.longpaths true - name: Run tests run: tox -e py312-test-opentelemetry-proto-gen-oldest -- -ra py312-test-opentelemetry-proto-gen-latest_windows-latest: name: opentelemetry-proto-gen-latest 3.12 Windows runs-on: windows-latest timeout-minutes: 30 steps: - name: Checkout repo @ SHA - ${{ github.sha }} uses: actions/checkout@v4 - name: Set up Python 3.12 uses: actions/setup-python@v5 with: python-version: "3.12" - name: Install tox run: pip install tox-uv - name: Configure git to support long filenames run: git config --system core.longpaths true - name: Run tests run: tox -e py312-test-opentelemetry-proto-gen-latest -- -ra py313-test-opentelemetry-proto-gen-oldest_windows-latest: name: opentelemetry-proto-gen-oldest 3.13 Windows runs-on: windows-latest timeout-minutes: 30 steps: - name: Checkout repo @ SHA - ${{ github.sha }} uses: actions/checkout@v4 - name: Set up Python 3.13 uses: actions/setup-python@v5 with: python-version: "3.13" - name: Install tox run: pip install tox-uv - name: Configure git to support long filenames run: git config --system core.longpaths true - name: Run tests run: tox -e py313-test-opentelemetry-proto-gen-oldest -- -ra py313-test-opentelemetry-proto-gen-latest_windows-latest: name: opentelemetry-proto-gen-latest 3.13 Windows runs-on: windows-latest timeout-minutes: 30 steps: - name: Checkout repo @ SHA - ${{ github.sha }} uses: actions/checkout@v4 - name: Set up Python 3.13 uses: actions/setup-python@v5 with: python-version: "3.13" - name: Install tox run: pip install tox-uv - name: Configure git to support long filenames run: git config --system core.longpaths true - name: Run tests run: tox -e py313-test-opentelemetry-proto-gen-latest -- -ra pypy3-test-opentelemetry-proto-gen-oldest_windows-latest: name: opentelemetry-proto-gen-oldest pypy-3.9 Windows runs-on: windows-latest timeout-minutes: 30 steps: - name: Checkout repo @ SHA - ${{ github.sha }} uses: actions/checkout@v4 - name: Set up Python pypy-3.9 uses: actions/setup-python@v5 with: python-version: "pypy-3.9" - name: Install tox run: pip install tox-uv - name: Configure git to support long filenames run: git config --system core.longpaths true - name: Run tests run: tox -e pypy3-test-opentelemetry-proto-gen-oldest -- -ra pypy3-test-opentelemetry-proto-gen-latest_windows-latest: name: opentelemetry-proto-gen-latest pypy-3.9 Windows runs-on: windows-latest timeout-minutes: 30 steps: - name: Checkout repo @ SHA - ${{ github.sha }} uses: actions/checkout@v4 - name: Set up Python pypy-3.9 uses: actions/setup-python@v5 with: python-version: "pypy-3.9" - name: Install tox run: pip install tox-uv - name: Configure git to support long filenames run: git config --system core.longpaths true - name: Run tests run: tox -e pypy3-test-opentelemetry-proto-gen-latest -- -ra py39-test-opentelemetry-sdk_windows-latest: name: opentelemetry-sdk 3.9 Windows runs-on: windows-latest timeout-minutes: 30 steps: - name: Checkout repo @ SHA - ${{ github.sha }} uses: actions/checkout@v4 - name: Set up Python 3.9 uses: actions/setup-python@v5 with: python-version: "3.9" - name: Install tox run: pip install tox-uv - name: Configure git to support long filenames run: git config --system core.longpaths true - name: Run tests run: tox -e py39-test-opentelemetry-sdk -- -ra py310-test-opentelemetry-sdk_windows-latest: name: opentelemetry-sdk 3.10 Windows runs-on: windows-latest timeout-minutes: 30 steps: - name: Checkout repo @ SHA - ${{ github.sha }} uses: actions/checkout@v4 - name: Set up Python 3.10 uses: actions/setup-python@v5 with: python-version: "3.10" - name: Install tox run: pip install tox-uv - name: Configure git to support long filenames run: git config --system core.longpaths true - name: Run tests run: tox -e py310-test-opentelemetry-sdk -- -ra py311-test-opentelemetry-sdk_windows-latest: name: opentelemetry-sdk 3.11 Windows runs-on: windows-latest timeout-minutes: 30 steps: - name: Checkout repo @ SHA - ${{ github.sha }} uses: actions/checkout@v4 - name: Set up Python 3.11 uses: actions/setup-python@v5 with: python-version: "3.11" - name: Install tox run: pip install tox-uv - name: Configure git to support long filenames run: git config --system core.longpaths true - name: Run tests run: tox -e py311-test-opentelemetry-sdk -- -ra py312-test-opentelemetry-sdk_windows-latest: name: opentelemetry-sdk 3.12 Windows runs-on: windows-latest timeout-minutes: 30 steps: - name: Checkout repo @ SHA - ${{ github.sha }} uses: actions/checkout@v4 - name: Set up Python 3.12 uses: actions/setup-python@v5 with: python-version: "3.12" - name: Install tox run: pip install tox-uv - name: Configure git to support long filenames run: git config --system core.longpaths true - name: Run tests run: tox -e py312-test-opentelemetry-sdk -- -ra py313-test-opentelemetry-sdk_windows-latest: name: opentelemetry-sdk 3.13 Windows runs-on: windows-latest timeout-minutes: 30 steps: - name: Checkout repo @ SHA - ${{ github.sha }} uses: actions/checkout@v4 - name: Set up Python 3.13 uses: actions/setup-python@v5 with: python-version: "3.13" - name: Install tox run: pip install tox-uv - name: Configure git to support long filenames run: git config --system core.longpaths true - name: Run tests run: tox -e py313-test-opentelemetry-sdk -- -ra pypy3-test-opentelemetry-sdk_windows-latest: name: opentelemetry-sdk pypy-3.9 Windows runs-on: windows-latest timeout-minutes: 30 steps: - name: Checkout repo @ SHA - ${{ github.sha }} uses: actions/checkout@v4 - name: Set up Python pypy-3.9 uses: actions/setup-python@v5 with: python-version: "pypy-3.9" - name: Install tox run: pip install tox-uv - name: Configure git to support long filenames run: git config --system core.longpaths true - name: Run tests run: tox -e pypy3-test-opentelemetry-sdk -- -ra py39-test-opentelemetry-semantic-conventions_windows-latest: name: opentelemetry-semantic-conventions 3.9 Windows runs-on: windows-latest timeout-minutes: 30 steps: - name: Checkout repo @ SHA - ${{ github.sha }} uses: actions/checkout@v4 - name: Set up Python 3.9 uses: actions/setup-python@v5 with: python-version: "3.9" - name: Install tox run: pip install tox-uv - name: Configure git to support long filenames run: git config --system core.longpaths true - name: Run tests run: tox -e py39-test-opentelemetry-semantic-conventions -- -ra py310-test-opentelemetry-semantic-conventions_windows-latest: name: opentelemetry-semantic-conventions 3.10 Windows runs-on: windows-latest timeout-minutes: 30 steps: - name: Checkout repo @ SHA - ${{ github.sha }} uses: actions/checkout@v4 - name: Set up Python 3.10 uses: actions/setup-python@v5 with: python-version: "3.10" - name: Install tox run: pip install tox-uv - name: Configure git to support long filenames run: git config --system core.longpaths true - name: Run tests run: tox -e py310-test-opentelemetry-semantic-conventions -- -ra py311-test-opentelemetry-semantic-conventions_windows-latest: name: opentelemetry-semantic-conventions 3.11 Windows runs-on: windows-latest timeout-minutes: 30 steps: - name: Checkout repo @ SHA - ${{ github.sha }} uses: actions/checkout@v4 - name: Set up Python 3.11 uses: actions/setup-python@v5 with: python-version: "3.11" - name: Install tox run: pip install tox-uv - name: Configure git to support long filenames run: git config --system core.longpaths true - name: Run tests run: tox -e py311-test-opentelemetry-semantic-conventions -- -ra py312-test-opentelemetry-semantic-conventions_windows-latest: name: opentelemetry-semantic-conventions 3.12 Windows runs-on: windows-latest timeout-minutes: 30 steps: - name: Checkout repo @ SHA - ${{ github.sha }} uses: actions/checkout@v4 - name: Set up Python 3.12 uses: actions/setup-python@v5 with: python-version: "3.12" - name: Install tox run: pip install tox-uv - name: Configure git to support long filenames run: git config --system core.longpaths true - name: Run tests run: tox -e py312-test-opentelemetry-semantic-conventions -- -ra py313-test-opentelemetry-semantic-conventions_windows-latest: name: opentelemetry-semantic-conventions 3.13 Windows runs-on: windows-latest timeout-minutes: 30 steps: - name: Checkout repo @ SHA - ${{ github.sha }} uses: actions/checkout@v4 - name: Set up Python 3.13 uses: actions/setup-python@v5 with: python-version: "3.13" - name: Install tox run: pip install tox-uv - name: Configure git to support long filenames run: git config --system core.longpaths true - name: Run tests run: tox -e py313-test-opentelemetry-semantic-conventions -- -ra pypy3-test-opentelemetry-semantic-conventions_windows-latest: name: opentelemetry-semantic-conventions pypy-3.9 Windows runs-on: windows-latest timeout-minutes: 30 steps: - name: Checkout repo @ SHA - ${{ github.sha }} uses: actions/checkout@v4 - name: Set up Python pypy-3.9 uses: actions/setup-python@v5 with: python-version: "pypy-3.9" - name: Install tox run: pip install tox-uv - name: Configure git to support long filenames run: git config --system core.longpaths true - name: Run tests run: tox -e pypy3-test-opentelemetry-semantic-conventions -- -ra py39-test-opentelemetry-getting-started_windows-latest: name: opentelemetry-getting-started 3.9 Windows runs-on: windows-latest timeout-minutes: 30 steps: - name: Checkout repo @ SHA - ${{ github.sha }} uses: actions/checkout@v4 - name: Set up Python 3.9 uses: actions/setup-python@v5 with: python-version: "3.9" - name: Install tox run: pip install tox-uv - name: Configure git to support long filenames run: git config --system core.longpaths true - name: Run tests run: tox -e py39-test-opentelemetry-getting-started -- -ra py310-test-opentelemetry-getting-started_windows-latest: name: opentelemetry-getting-started 3.10 Windows runs-on: windows-latest timeout-minutes: 30 steps: - name: Checkout repo @ SHA - ${{ github.sha }} uses: actions/checkout@v4 - name: Set up Python 3.10 uses: actions/setup-python@v5 with: python-version: "3.10" - name: Install tox run: pip install tox-uv - name: Configure git to support long filenames run: git config --system core.longpaths true - name: Run tests run: tox -e py310-test-opentelemetry-getting-started -- -ra py311-test-opentelemetry-getting-started_windows-latest: name: opentelemetry-getting-started 3.11 Windows runs-on: windows-latest timeout-minutes: 30 steps: - name: Checkout repo @ SHA - ${{ github.sha }} uses: actions/checkout@v4 - name: Set up Python 3.11 uses: actions/setup-python@v5 with: python-version: "3.11" - name: Install tox run: pip install tox-uv - name: Configure git to support long filenames run: git config --system core.longpaths true - name: Run tests run: tox -e py311-test-opentelemetry-getting-started -- -ra py312-test-opentelemetry-getting-started_windows-latest: name: opentelemetry-getting-started 3.12 Windows runs-on: windows-latest timeout-minutes: 30 steps: - name: Checkout repo @ SHA - ${{ github.sha }} uses: actions/checkout@v4 - name: Set up Python 3.12 uses: actions/setup-python@v5 with: python-version: "3.12" - name: Install tox run: pip install tox-uv - name: Configure git to support long filenames run: git config --system core.longpaths true - name: Run tests run: tox -e py312-test-opentelemetry-getting-started -- -ra py313-test-opentelemetry-getting-started_windows-latest: name: opentelemetry-getting-started 3.13 Windows runs-on: windows-latest timeout-minutes: 30 steps: - name: Checkout repo @ SHA - ${{ github.sha }} uses: actions/checkout@v4 - name: Set up Python 3.13 uses: actions/setup-python@v5 with: python-version: "3.13" - name: Install tox run: pip install tox-uv - name: Configure git to support long filenames run: git config --system core.longpaths true - name: Run tests run: tox -e py313-test-opentelemetry-getting-started -- -ra py39-test-opentelemetry-opentracing-shim_windows-latest: name: opentelemetry-opentracing-shim 3.9 Windows runs-on: windows-latest timeout-minutes: 30 steps: - name: Checkout repo @ SHA - ${{ github.sha }} uses: actions/checkout@v4 - name: Set up Python 3.9 uses: actions/setup-python@v5 with: python-version: "3.9" - name: Install tox run: pip install tox-uv - name: Configure git to support long filenames run: git config --system core.longpaths true - name: Run tests run: tox -e py39-test-opentelemetry-opentracing-shim -- -ra py310-test-opentelemetry-opentracing-shim_windows-latest: name: opentelemetry-opentracing-shim 3.10 Windows runs-on: windows-latest timeout-minutes: 30 steps: - name: Checkout repo @ SHA - ${{ github.sha }} uses: actions/checkout@v4 - name: Set up Python 3.10 uses: actions/setup-python@v5 with: python-version: "3.10" - name: Install tox run: pip install tox-uv - name: Configure git to support long filenames run: git config --system core.longpaths true - name: Run tests run: tox -e py310-test-opentelemetry-opentracing-shim -- -ra py311-test-opentelemetry-opentracing-shim_windows-latest: name: opentelemetry-opentracing-shim 3.11 Windows runs-on: windows-latest timeout-minutes: 30 steps: - name: Checkout repo @ SHA - ${{ github.sha }} uses: actions/checkout@v4 - name: Set up Python 3.11 uses: actions/setup-python@v5 with: python-version: "3.11" - name: Install tox run: pip install tox-uv - name: Configure git to support long filenames run: git config --system core.longpaths true - name: Run tests run: tox -e py311-test-opentelemetry-opentracing-shim -- -ra py312-test-opentelemetry-opentracing-shim_windows-latest: name: opentelemetry-opentracing-shim 3.12 Windows runs-on: windows-latest timeout-minutes: 30 steps: - name: Checkout repo @ SHA - ${{ github.sha }} uses: actions/checkout@v4 - name: Set up Python 3.12 uses: actions/setup-python@v5 with: python-version: "3.12" - name: Install tox run: pip install tox-uv - name: Configure git to support long filenames run: git config --system core.longpaths true - name: Run tests run: tox -e py312-test-opentelemetry-opentracing-shim -- -ra py313-test-opentelemetry-opentracing-shim_windows-latest: name: opentelemetry-opentracing-shim 3.13 Windows runs-on: windows-latest timeout-minutes: 30 steps: - name: Checkout repo @ SHA - ${{ github.sha }} uses: actions/checkout@v4 - name: Set up Python 3.13 uses: actions/setup-python@v5 with: python-version: "3.13" - name: Install tox run: pip install tox-uv - name: Configure git to support long filenames run: git config --system core.longpaths true - name: Run tests run: tox -e py313-test-opentelemetry-opentracing-shim -- -ra pypy3-test-opentelemetry-opentracing-shim_windows-latest: name: opentelemetry-opentracing-shim pypy-3.9 Windows runs-on: windows-latest timeout-minutes: 30 steps: - name: Checkout repo @ SHA - ${{ github.sha }} uses: actions/checkout@v4 - name: Set up Python pypy-3.9 uses: actions/setup-python@v5 with: python-version: "pypy-3.9" - name: Install tox run: pip install tox-uv - name: Configure git to support long filenames run: git config --system core.longpaths true - name: Run tests run: tox -e pypy3-test-opentelemetry-opentracing-shim -- -ra py39-test-opentelemetry-opencensus-shim_windows-latest: name: opentelemetry-opencensus-shim 3.9 Windows runs-on: windows-latest timeout-minutes: 30 steps: - name: Checkout repo @ SHA - ${{ github.sha }} uses: actions/checkout@v4 - name: Set up Python 3.9 uses: actions/setup-python@v5 with: python-version: "3.9" - name: Install tox run: pip install tox-uv - name: Configure git to support long filenames run: git config --system core.longpaths true - name: Run tests run: tox -e py39-test-opentelemetry-opencensus-shim -- -ra py310-test-opentelemetry-opencensus-shim_windows-latest: name: opentelemetry-opencensus-shim 3.10 Windows runs-on: windows-latest timeout-minutes: 30 steps: - name: Checkout repo @ SHA - ${{ github.sha }} uses: actions/checkout@v4 - name: Set up Python 3.10 uses: actions/setup-python@v5 with: python-version: "3.10" - name: Install tox run: pip install tox-uv - name: Configure git to support long filenames run: git config --system core.longpaths true - name: Run tests run: tox -e py310-test-opentelemetry-opencensus-shim -- -ra py311-test-opentelemetry-opencensus-shim_windows-latest: name: opentelemetry-opencensus-shim 3.11 Windows runs-on: windows-latest timeout-minutes: 30 steps: - name: Checkout repo @ SHA - ${{ github.sha }} uses: actions/checkout@v4 - name: Set up Python 3.11 uses: actions/setup-python@v5 with: python-version: "3.11" - name: Install tox run: pip install tox-uv - name: Configure git to support long filenames run: git config --system core.longpaths true - name: Run tests run: tox -e py311-test-opentelemetry-opencensus-shim -- -ra py312-test-opentelemetry-opencensus-shim_windows-latest: name: opentelemetry-opencensus-shim 3.12 Windows runs-on: windows-latest timeout-minutes: 30 steps: - name: Checkout repo @ SHA - ${{ github.sha }} uses: actions/checkout@v4 - name: Set up Python 3.12 uses: actions/setup-python@v5 with: python-version: "3.12" - name: Install tox run: pip install tox-uv - name: Configure git to support long filenames run: git config --system core.longpaths true - name: Run tests run: tox -e py312-test-opentelemetry-opencensus-shim -- -ra py313-test-opentelemetry-opencensus-shim_windows-latest: name: opentelemetry-opencensus-shim 3.13 Windows runs-on: windows-latest timeout-minutes: 30 steps: - name: Checkout repo @ SHA - ${{ github.sha }} uses: actions/checkout@v4 - name: Set up Python 3.13 uses: actions/setup-python@v5 with: python-version: "3.13" - name: Install tox run: pip install tox-uv - name: Configure git to support long filenames run: git config --system core.longpaths true - name: Run tests run: tox -e py313-test-opentelemetry-opencensus-shim -- -ra py39-test-opentelemetry-exporter-opencensus_windows-latest: name: opentelemetry-exporter-opencensus 3.9 Windows runs-on: windows-latest timeout-minutes: 30 steps: - name: Checkout repo @ SHA - ${{ github.sha }} uses: actions/checkout@v4 - name: Set up Python 3.9 uses: actions/setup-python@v5 with: python-version: "3.9" - name: Install tox run: pip install tox-uv - name: Configure git to support long filenames run: git config --system core.longpaths true - name: Run tests run: tox -e py39-test-opentelemetry-exporter-opencensus -- -ra py310-test-opentelemetry-exporter-opencensus_windows-latest: name: opentelemetry-exporter-opencensus 3.10 Windows runs-on: windows-latest timeout-minutes: 30 steps: - name: Checkout repo @ SHA - ${{ github.sha }} uses: actions/checkout@v4 - name: Set up Python 3.10 uses: actions/setup-python@v5 with: python-version: "3.10" - name: Install tox run: pip install tox-uv - name: Configure git to support long filenames run: git config --system core.longpaths true - name: Run tests run: tox -e py310-test-opentelemetry-exporter-opencensus -- -ra py311-test-opentelemetry-exporter-opencensus_windows-latest: name: opentelemetry-exporter-opencensus 3.11 Windows runs-on: windows-latest timeout-minutes: 30 steps: - name: Checkout repo @ SHA - ${{ github.sha }} uses: actions/checkout@v4 - name: Set up Python 3.11 uses: actions/setup-python@v5 with: python-version: "3.11" - name: Install tox run: pip install tox-uv - name: Configure git to support long filenames run: git config --system core.longpaths true - name: Run tests run: tox -e py311-test-opentelemetry-exporter-opencensus -- -ra py312-test-opentelemetry-exporter-opencensus_windows-latest: name: opentelemetry-exporter-opencensus 3.12 Windows runs-on: windows-latest timeout-minutes: 30 steps: - name: Checkout repo @ SHA - ${{ github.sha }} uses: actions/checkout@v4 - name: Set up Python 3.12 uses: actions/setup-python@v5 with: python-version: "3.12" - name: Install tox run: pip install tox-uv - name: Configure git to support long filenames run: git config --system core.longpaths true - name: Run tests run: tox -e py312-test-opentelemetry-exporter-opencensus -- -ra py313-test-opentelemetry-exporter-opencensus_windows-latest: name: opentelemetry-exporter-opencensus 3.13 Windows runs-on: windows-latest timeout-minutes: 30 steps: - name: Checkout repo @ SHA - ${{ github.sha }} uses: actions/checkout@v4 - name: Set up Python 3.13 uses: actions/setup-python@v5 with: python-version: "3.13" - name: Install tox run: pip install tox-uv - name: Configure git to support long filenames run: git config --system core.longpaths true - name: Run tests run: tox -e py313-test-opentelemetry-exporter-opencensus -- -ra py39-test-opentelemetry-exporter-otlp-proto-common_windows-latest: name: opentelemetry-exporter-otlp-proto-common 3.9 Windows runs-on: windows-latest timeout-minutes: 30 steps: - name: Checkout repo @ SHA - ${{ github.sha }} uses: actions/checkout@v4 - name: Set up Python 3.9 uses: actions/setup-python@v5 with: python-version: "3.9" - name: Install tox run: pip install tox-uv - name: Configure git to support long filenames run: git config --system core.longpaths true - name: Run tests run: tox -e py39-test-opentelemetry-exporter-otlp-proto-common -- -ra py310-test-opentelemetry-exporter-otlp-proto-common_windows-latest: name: opentelemetry-exporter-otlp-proto-common 3.10 Windows runs-on: windows-latest timeout-minutes: 30 steps: - name: Checkout repo @ SHA - ${{ github.sha }} uses: actions/checkout@v4 - name: Set up Python 3.10 uses: actions/setup-python@v5 with: python-version: "3.10" - name: Install tox run: pip install tox-uv - name: Configure git to support long filenames run: git config --system core.longpaths true - name: Run tests run: tox -e py310-test-opentelemetry-exporter-otlp-proto-common -- -ra py311-test-opentelemetry-exporter-otlp-proto-common_windows-latest: name: opentelemetry-exporter-otlp-proto-common 3.11 Windows runs-on: windows-latest timeout-minutes: 30 steps: - name: Checkout repo @ SHA - ${{ github.sha }} uses: actions/checkout@v4 - name: Set up Python 3.11 uses: actions/setup-python@v5 with: python-version: "3.11" - name: Install tox run: pip install tox-uv - name: Configure git to support long filenames run: git config --system core.longpaths true - name: Run tests run: tox -e py311-test-opentelemetry-exporter-otlp-proto-common -- -ra py312-test-opentelemetry-exporter-otlp-proto-common_windows-latest: name: opentelemetry-exporter-otlp-proto-common 3.12 Windows runs-on: windows-latest timeout-minutes: 30 steps: - name: Checkout repo @ SHA - ${{ github.sha }} uses: actions/checkout@v4 - name: Set up Python 3.12 uses: actions/setup-python@v5 with: python-version: "3.12" - name: Install tox run: pip install tox-uv - name: Configure git to support long filenames run: git config --system core.longpaths true - name: Run tests run: tox -e py312-test-opentelemetry-exporter-otlp-proto-common -- -ra py313-test-opentelemetry-exporter-otlp-proto-common_windows-latest: name: opentelemetry-exporter-otlp-proto-common 3.13 Windows runs-on: windows-latest timeout-minutes: 30 steps: - name: Checkout repo @ SHA - ${{ github.sha }} uses: actions/checkout@v4 - name: Set up Python 3.13 uses: actions/setup-python@v5 with: python-version: "3.13" - name: Install tox run: pip install tox-uv - name: Configure git to support long filenames run: git config --system core.longpaths true - name: Run tests run: tox -e py313-test-opentelemetry-exporter-otlp-proto-common -- -ra pypy3-test-opentelemetry-exporter-otlp-proto-common_windows-latest: name: opentelemetry-exporter-otlp-proto-common pypy-3.9 Windows runs-on: windows-latest timeout-minutes: 30 steps: - name: Checkout repo @ SHA - ${{ github.sha }} uses: actions/checkout@v4 - name: Set up Python pypy-3.9 uses: actions/setup-python@v5 with: python-version: "pypy-3.9" - name: Install tox run: pip install tox-uv - name: Configure git to support long filenames run: git config --system core.longpaths true - name: Run tests run: tox -e pypy3-test-opentelemetry-exporter-otlp-proto-common -- -ra py39-test-opentelemetry-exporter-otlp-combined_windows-latest: name: opentelemetry-exporter-otlp-combined 3.9 Windows runs-on: windows-latest timeout-minutes: 30 steps: - name: Checkout repo @ SHA - ${{ github.sha }} uses: actions/checkout@v4 - name: Set up Python 3.9 uses: actions/setup-python@v5 with: python-version: "3.9" - name: Install tox run: pip install tox-uv - name: Configure git to support long filenames run: git config --system core.longpaths true - name: Run tests run: tox -e py39-test-opentelemetry-exporter-otlp-combined -- -ra py310-test-opentelemetry-exporter-otlp-combined_windows-latest: name: opentelemetry-exporter-otlp-combined 3.10 Windows runs-on: windows-latest timeout-minutes: 30 steps: - name: Checkout repo @ SHA - ${{ github.sha }} uses: actions/checkout@v4 - name: Set up Python 3.10 uses: actions/setup-python@v5 with: python-version: "3.10" - name: Install tox run: pip install tox-uv - name: Configure git to support long filenames run: git config --system core.longpaths true - name: Run tests run: tox -e py310-test-opentelemetry-exporter-otlp-combined -- -ra py311-test-opentelemetry-exporter-otlp-combined_windows-latest: name: opentelemetry-exporter-otlp-combined 3.11 Windows runs-on: windows-latest timeout-minutes: 30 steps: - name: Checkout repo @ SHA - ${{ github.sha }} uses: actions/checkout@v4 - name: Set up Python 3.11 uses: actions/setup-python@v5 with: python-version: "3.11" - name: Install tox run: pip install tox-uv - name: Configure git to support long filenames run: git config --system core.longpaths true - name: Run tests run: tox -e py311-test-opentelemetry-exporter-otlp-combined -- -ra py312-test-opentelemetry-exporter-otlp-combined_windows-latest: name: opentelemetry-exporter-otlp-combined 3.12 Windows runs-on: windows-latest timeout-minutes: 30 steps: - name: Checkout repo @ SHA - ${{ github.sha }} uses: actions/checkout@v4 - name: Set up Python 3.12 uses: actions/setup-python@v5 with: python-version: "3.12" - name: Install tox run: pip install tox-uv - name: Configure git to support long filenames run: git config --system core.longpaths true - name: Run tests run: tox -e py312-test-opentelemetry-exporter-otlp-combined -- -ra py313-test-opentelemetry-exporter-otlp-combined_windows-latest: name: opentelemetry-exporter-otlp-combined 3.13 Windows runs-on: windows-latest timeout-minutes: 30 steps: - name: Checkout repo @ SHA - ${{ github.sha }} uses: actions/checkout@v4 - name: Set up Python 3.13 uses: actions/setup-python@v5 with: python-version: "3.13" - name: Install tox run: pip install tox-uv - name: Configure git to support long filenames run: git config --system core.longpaths true - name: Run tests run: tox -e py313-test-opentelemetry-exporter-otlp-combined -- -ra py39-test-opentelemetry-exporter-otlp-proto-grpc-oldest_windows-latest: name: opentelemetry-exporter-otlp-proto-grpc-oldest 3.9 Windows runs-on: windows-latest timeout-minutes: 30 steps: - name: Checkout repo @ SHA - ${{ github.sha }} uses: actions/checkout@v4 - name: Set up Python 3.9 uses: actions/setup-python@v5 with: python-version: "3.9" - name: Install tox run: pip install tox-uv - name: Configure git to support long filenames run: git config --system core.longpaths true - name: Run tests run: tox -e py39-test-opentelemetry-exporter-otlp-proto-grpc-oldest -- -ra py39-test-opentelemetry-exporter-otlp-proto-grpc-latest_windows-latest: name: opentelemetry-exporter-otlp-proto-grpc-latest 3.9 Windows runs-on: windows-latest timeout-minutes: 30 steps: - name: Checkout repo @ SHA - ${{ github.sha }} uses: actions/checkout@v4 - name: Set up Python 3.9 uses: actions/setup-python@v5 with: python-version: "3.9" - name: Install tox run: pip install tox-uv - name: Configure git to support long filenames run: git config --system core.longpaths true - name: Run tests run: tox -e py39-test-opentelemetry-exporter-otlp-proto-grpc-latest -- -ra py310-test-opentelemetry-exporter-otlp-proto-grpc-oldest_windows-latest: name: opentelemetry-exporter-otlp-proto-grpc-oldest 3.10 Windows runs-on: windows-latest timeout-minutes: 30 steps: - name: Checkout repo @ SHA - ${{ github.sha }} uses: actions/checkout@v4 - name: Set up Python 3.10 uses: actions/setup-python@v5 with: python-version: "3.10" - name: Install tox run: pip install tox-uv - name: Configure git to support long filenames run: git config --system core.longpaths true - name: Run tests run: tox -e py310-test-opentelemetry-exporter-otlp-proto-grpc-oldest -- -ra py310-test-opentelemetry-exporter-otlp-proto-grpc-latest_windows-latest: name: opentelemetry-exporter-otlp-proto-grpc-latest 3.10 Windows runs-on: windows-latest timeout-minutes: 30 steps: - name: Checkout repo @ SHA - ${{ github.sha }} uses: actions/checkout@v4 - name: Set up Python 3.10 uses: actions/setup-python@v5 with: python-version: "3.10" - name: Install tox run: pip install tox-uv - name: Configure git to support long filenames run: git config --system core.longpaths true - name: Run tests run: tox -e py310-test-opentelemetry-exporter-otlp-proto-grpc-latest -- -ra py311-test-opentelemetry-exporter-otlp-proto-grpc-oldest_windows-latest: name: opentelemetry-exporter-otlp-proto-grpc-oldest 3.11 Windows runs-on: windows-latest timeout-minutes: 30 steps: - name: Checkout repo @ SHA - ${{ github.sha }} uses: actions/checkout@v4 - name: Set up Python 3.11 uses: actions/setup-python@v5 with: python-version: "3.11" - name: Install tox run: pip install tox-uv - name: Configure git to support long filenames run: git config --system core.longpaths true - name: Run tests run: tox -e py311-test-opentelemetry-exporter-otlp-proto-grpc-oldest -- -ra py311-test-opentelemetry-exporter-otlp-proto-grpc-latest_windows-latest: name: opentelemetry-exporter-otlp-proto-grpc-latest 3.11 Windows runs-on: windows-latest timeout-minutes: 30 steps: - name: Checkout repo @ SHA - ${{ github.sha }} uses: actions/checkout@v4 - name: Set up Python 3.11 uses: actions/setup-python@v5 with: python-version: "3.11" - name: Install tox run: pip install tox-uv - name: Configure git to support long filenames run: git config --system core.longpaths true - name: Run tests run: tox -e py311-test-opentelemetry-exporter-otlp-proto-grpc-latest -- -ra py312-test-opentelemetry-exporter-otlp-proto-grpc-oldest_windows-latest: name: opentelemetry-exporter-otlp-proto-grpc-oldest 3.12 Windows runs-on: windows-latest timeout-minutes: 30 steps: - name: Checkout repo @ SHA - ${{ github.sha }} uses: actions/checkout@v4 - name: Set up Python 3.12 uses: actions/setup-python@v5 with: python-version: "3.12" - name: Install tox run: pip install tox-uv - name: Configure git to support long filenames run: git config --system core.longpaths true - name: Run tests run: tox -e py312-test-opentelemetry-exporter-otlp-proto-grpc-oldest -- -ra py312-test-opentelemetry-exporter-otlp-proto-grpc-latest_windows-latest: name: opentelemetry-exporter-otlp-proto-grpc-latest 3.12 Windows runs-on: windows-latest timeout-minutes: 30 steps: - name: Checkout repo @ SHA - ${{ github.sha }} uses: actions/checkout@v4 - name: Set up Python 3.12 uses: actions/setup-python@v5 with: python-version: "3.12" - name: Install tox run: pip install tox-uv - name: Configure git to support long filenames run: git config --system core.longpaths true - name: Run tests run: tox -e py312-test-opentelemetry-exporter-otlp-proto-grpc-latest -- -ra py313-test-opentelemetry-exporter-otlp-proto-grpc-oldest_windows-latest: name: opentelemetry-exporter-otlp-proto-grpc-oldest 3.13 Windows runs-on: windows-latest timeout-minutes: 30 steps: - name: Checkout repo @ SHA - ${{ github.sha }} uses: actions/checkout@v4 - name: Set up Python 3.13 uses: actions/setup-python@v5 with: python-version: "3.13" - name: Install tox run: pip install tox-uv - name: Configure git to support long filenames run: git config --system core.longpaths true - name: Run tests run: tox -e py313-test-opentelemetry-exporter-otlp-proto-grpc-oldest -- -ra py313-test-opentelemetry-exporter-otlp-proto-grpc-latest_windows-latest: name: opentelemetry-exporter-otlp-proto-grpc-latest 3.13 Windows runs-on: windows-latest timeout-minutes: 30 steps: - name: Checkout repo @ SHA - ${{ github.sha }} uses: actions/checkout@v4 - name: Set up Python 3.13 uses: actions/setup-python@v5 with: python-version: "3.13" - name: Install tox run: pip install tox-uv - name: Configure git to support long filenames run: git config --system core.longpaths true - name: Run tests run: tox -e py313-test-opentelemetry-exporter-otlp-proto-grpc-latest -- -ra py39-test-opentelemetry-exporter-otlp-proto-http_windows-latest: name: opentelemetry-exporter-otlp-proto-http 3.9 Windows runs-on: windows-latest timeout-minutes: 30 steps: - name: Checkout repo @ SHA - ${{ github.sha }} uses: actions/checkout@v4 - name: Set up Python 3.9 uses: actions/setup-python@v5 with: python-version: "3.9" - name: Install tox run: pip install tox-uv - name: Configure git to support long filenames run: git config --system core.longpaths true - name: Run tests run: tox -e py39-test-opentelemetry-exporter-otlp-proto-http -- -ra py310-test-opentelemetry-exporter-otlp-proto-http_windows-latest: name: opentelemetry-exporter-otlp-proto-http 3.10 Windows runs-on: windows-latest timeout-minutes: 30 steps: - name: Checkout repo @ SHA - ${{ github.sha }} uses: actions/checkout@v4 - name: Set up Python 3.10 uses: actions/setup-python@v5 with: python-version: "3.10" - name: Install tox run: pip install tox-uv - name: Configure git to support long filenames run: git config --system core.longpaths true - name: Run tests run: tox -e py310-test-opentelemetry-exporter-otlp-proto-http -- -ra py311-test-opentelemetry-exporter-otlp-proto-http_windows-latest: name: opentelemetry-exporter-otlp-proto-http 3.11 Windows runs-on: windows-latest timeout-minutes: 30 steps: - name: Checkout repo @ SHA - ${{ github.sha }} uses: actions/checkout@v4 - name: Set up Python 3.11 uses: actions/setup-python@v5 with: python-version: "3.11" - name: Install tox run: pip install tox-uv - name: Configure git to support long filenames run: git config --system core.longpaths true - name: Run tests run: tox -e py311-test-opentelemetry-exporter-otlp-proto-http -- -ra py312-test-opentelemetry-exporter-otlp-proto-http_windows-latest: name: opentelemetry-exporter-otlp-proto-http 3.12 Windows runs-on: windows-latest timeout-minutes: 30 steps: - name: Checkout repo @ SHA - ${{ github.sha }} uses: actions/checkout@v4 - name: Set up Python 3.12 uses: actions/setup-python@v5 with: python-version: "3.12" - name: Install tox run: pip install tox-uv - name: Configure git to support long filenames run: git config --system core.longpaths true - name: Run tests run: tox -e py312-test-opentelemetry-exporter-otlp-proto-http -- -ra py313-test-opentelemetry-exporter-otlp-proto-http_windows-latest: name: opentelemetry-exporter-otlp-proto-http 3.13 Windows runs-on: windows-latest timeout-minutes: 30 steps: - name: Checkout repo @ SHA - ${{ github.sha }} uses: actions/checkout@v4 - name: Set up Python 3.13 uses: actions/setup-python@v5 with: python-version: "3.13" - name: Install tox run: pip install tox-uv - name: Configure git to support long filenames run: git config --system core.longpaths true - name: Run tests run: tox -e py313-test-opentelemetry-exporter-otlp-proto-http -- -ra pypy3-test-opentelemetry-exporter-otlp-proto-http_windows-latest: name: opentelemetry-exporter-otlp-proto-http pypy-3.9 Windows runs-on: windows-latest timeout-minutes: 30 steps: - name: Checkout repo @ SHA - ${{ github.sha }} uses: actions/checkout@v4 - name: Set up Python pypy-3.9 uses: actions/setup-python@v5 with: python-version: "pypy-3.9" - name: Install tox run: pip install tox-uv - name: Configure git to support long filenames run: git config --system core.longpaths true - name: Run tests run: tox -e pypy3-test-opentelemetry-exporter-otlp-proto-http -- -ra py39-test-opentelemetry-exporter-prometheus_windows-latest: name: opentelemetry-exporter-prometheus 3.9 Windows runs-on: windows-latest timeout-minutes: 30 steps: - name: Checkout repo @ SHA - ${{ github.sha }} uses: actions/checkout@v4 - name: Set up Python 3.9 uses: actions/setup-python@v5 with: python-version: "3.9" - name: Install tox run: pip install tox-uv - name: Configure git to support long filenames run: git config --system core.longpaths true - name: Run tests run: tox -e py39-test-opentelemetry-exporter-prometheus -- -ra py310-test-opentelemetry-exporter-prometheus_windows-latest: name: opentelemetry-exporter-prometheus 3.10 Windows runs-on: windows-latest timeout-minutes: 30 steps: - name: Checkout repo @ SHA - ${{ github.sha }} uses: actions/checkout@v4 - name: Set up Python 3.10 uses: actions/setup-python@v5 with: python-version: "3.10" - name: Install tox run: pip install tox-uv - name: Configure git to support long filenames run: git config --system core.longpaths true - name: Run tests run: tox -e py310-test-opentelemetry-exporter-prometheus -- -ra py311-test-opentelemetry-exporter-prometheus_windows-latest: name: opentelemetry-exporter-prometheus 3.11 Windows runs-on: windows-latest timeout-minutes: 30 steps: - name: Checkout repo @ SHA - ${{ github.sha }} uses: actions/checkout@v4 - name: Set up Python 3.11 uses: actions/setup-python@v5 with: python-version: "3.11" - name: Install tox run: pip install tox-uv - name: Configure git to support long filenames run: git config --system core.longpaths true - name: Run tests run: tox -e py311-test-opentelemetry-exporter-prometheus -- -ra py312-test-opentelemetry-exporter-prometheus_windows-latest: name: opentelemetry-exporter-prometheus 3.12 Windows runs-on: windows-latest timeout-minutes: 30 steps: - name: Checkout repo @ SHA - ${{ github.sha }} uses: actions/checkout@v4 - name: Set up Python 3.12 uses: actions/setup-python@v5 with: python-version: "3.12" - name: Install tox run: pip install tox-uv - name: Configure git to support long filenames run: git config --system core.longpaths true - name: Run tests run: tox -e py312-test-opentelemetry-exporter-prometheus -- -ra py313-test-opentelemetry-exporter-prometheus_windows-latest: name: opentelemetry-exporter-prometheus 3.13 Windows runs-on: windows-latest timeout-minutes: 30 steps: - name: Checkout repo @ SHA - ${{ github.sha }} uses: actions/checkout@v4 - name: Set up Python 3.13 uses: actions/setup-python@v5 with: python-version: "3.13" - name: Install tox run: pip install tox-uv - name: Configure git to support long filenames run: git config --system core.longpaths true - name: Run tests run: tox -e py313-test-opentelemetry-exporter-prometheus -- -ra pypy3-test-opentelemetry-exporter-prometheus_windows-latest: name: opentelemetry-exporter-prometheus pypy-3.9 Windows runs-on: windows-latest timeout-minutes: 30 steps: - name: Checkout repo @ SHA - ${{ github.sha }} uses: actions/checkout@v4 - name: Set up Python pypy-3.9 uses: actions/setup-python@v5 with: python-version: "pypy-3.9" - name: Install tox run: pip install tox-uv - name: Configure git to support long filenames run: git config --system core.longpaths true - name: Run tests run: tox -e pypy3-test-opentelemetry-exporter-prometheus -- -ra py39-test-opentelemetry-exporter-zipkin-combined_windows-latest: name: opentelemetry-exporter-zipkin-combined 3.9 Windows runs-on: windows-latest timeout-minutes: 30 steps: - name: Checkout repo @ SHA - ${{ github.sha }} uses: actions/checkout@v4 - name: Set up Python 3.9 uses: actions/setup-python@v5 with: python-version: "3.9" - name: Install tox run: pip install tox-uv - name: Configure git to support long filenames run: git config --system core.longpaths true - name: Run tests run: tox -e py39-test-opentelemetry-exporter-zipkin-combined -- -ra py310-test-opentelemetry-exporter-zipkin-combined_windows-latest: name: opentelemetry-exporter-zipkin-combined 3.10 Windows runs-on: windows-latest timeout-minutes: 30 steps: - name: Checkout repo @ SHA - ${{ github.sha }} uses: actions/checkout@v4 - name: Set up Python 3.10 uses: actions/setup-python@v5 with: python-version: "3.10" - name: Install tox run: pip install tox-uv - name: Configure git to support long filenames run: git config --system core.longpaths true - name: Run tests run: tox -e py310-test-opentelemetry-exporter-zipkin-combined -- -ra py311-test-opentelemetry-exporter-zipkin-combined_windows-latest: name: opentelemetry-exporter-zipkin-combined 3.11 Windows runs-on: windows-latest timeout-minutes: 30 steps: - name: Checkout repo @ SHA - ${{ github.sha }} uses: actions/checkout@v4 - name: Set up Python 3.11 uses: actions/setup-python@v5 with: python-version: "3.11" - name: Install tox run: pip install tox-uv - name: Configure git to support long filenames run: git config --system core.longpaths true - name: Run tests run: tox -e py311-test-opentelemetry-exporter-zipkin-combined -- -ra py312-test-opentelemetry-exporter-zipkin-combined_windows-latest: name: opentelemetry-exporter-zipkin-combined 3.12 Windows runs-on: windows-latest timeout-minutes: 30 steps: - name: Checkout repo @ SHA - ${{ github.sha }} uses: actions/checkout@v4 - name: Set up Python 3.12 uses: actions/setup-python@v5 with: python-version: "3.12" - name: Install tox run: pip install tox-uv - name: Configure git to support long filenames run: git config --system core.longpaths true - name: Run tests run: tox -e py312-test-opentelemetry-exporter-zipkin-combined -- -ra py313-test-opentelemetry-exporter-zipkin-combined_windows-latest: name: opentelemetry-exporter-zipkin-combined 3.13 Windows runs-on: windows-latest timeout-minutes: 30 steps: - name: Checkout repo @ SHA - ${{ github.sha }} uses: actions/checkout@v4 - name: Set up Python 3.13 uses: actions/setup-python@v5 with: python-version: "3.13" - name: Install tox run: pip install tox-uv - name: Configure git to support long filenames run: git config --system core.longpaths true - name: Run tests run: tox -e py313-test-opentelemetry-exporter-zipkin-combined -- -ra pypy3-test-opentelemetry-exporter-zipkin-combined_windows-latest: name: opentelemetry-exporter-zipkin-combined pypy-3.9 Windows runs-on: windows-latest timeout-minutes: 30 steps: - name: Checkout repo @ SHA - ${{ github.sha }} uses: actions/checkout@v4 - name: Set up Python pypy-3.9 uses: actions/setup-python@v5 with: python-version: "pypy-3.9" - name: Install tox run: pip install tox-uv - name: Configure git to support long filenames run: git config --system core.longpaths true - name: Run tests run: tox -e pypy3-test-opentelemetry-exporter-zipkin-combined -- -ra py39-test-opentelemetry-exporter-zipkin-proto-http_windows-latest: name: opentelemetry-exporter-zipkin-proto-http 3.9 Windows runs-on: windows-latest timeout-minutes: 30 steps: - name: Checkout repo @ SHA - ${{ github.sha }} uses: actions/checkout@v4 - name: Set up Python 3.9 uses: actions/setup-python@v5 with: python-version: "3.9" - name: Install tox run: pip install tox-uv - name: Configure git to support long filenames run: git config --system core.longpaths true - name: Run tests run: tox -e py39-test-opentelemetry-exporter-zipkin-proto-http -- -ra py310-test-opentelemetry-exporter-zipkin-proto-http_windows-latest: name: opentelemetry-exporter-zipkin-proto-http 3.10 Windows runs-on: windows-latest timeout-minutes: 30 steps: - name: Checkout repo @ SHA - ${{ github.sha }} uses: actions/checkout@v4 - name: Set up Python 3.10 uses: actions/setup-python@v5 with: python-version: "3.10" - name: Install tox run: pip install tox-uv - name: Configure git to support long filenames run: git config --system core.longpaths true - name: Run tests run: tox -e py310-test-opentelemetry-exporter-zipkin-proto-http -- -ra py311-test-opentelemetry-exporter-zipkin-proto-http_windows-latest: name: opentelemetry-exporter-zipkin-proto-http 3.11 Windows runs-on: windows-latest timeout-minutes: 30 steps: - name: Checkout repo @ SHA - ${{ github.sha }} uses: actions/checkout@v4 - name: Set up Python 3.11 uses: actions/setup-python@v5 with: python-version: "3.11" - name: Install tox run: pip install tox-uv - name: Configure git to support long filenames run: git config --system core.longpaths true - name: Run tests run: tox -e py311-test-opentelemetry-exporter-zipkin-proto-http -- -ra py312-test-opentelemetry-exporter-zipkin-proto-http_windows-latest: name: opentelemetry-exporter-zipkin-proto-http 3.12 Windows runs-on: windows-latest timeout-minutes: 30 steps: - name: Checkout repo @ SHA - ${{ github.sha }} uses: actions/checkout@v4 - name: Set up Python 3.12 uses: actions/setup-python@v5 with: python-version: "3.12" - name: Install tox run: pip install tox-uv - name: Configure git to support long filenames run: git config --system core.longpaths true - name: Run tests run: tox -e py312-test-opentelemetry-exporter-zipkin-proto-http -- -ra py313-test-opentelemetry-exporter-zipkin-proto-http_windows-latest: name: opentelemetry-exporter-zipkin-proto-http 3.13 Windows runs-on: windows-latest timeout-minutes: 30 steps: - name: Checkout repo @ SHA - ${{ github.sha }} uses: actions/checkout@v4 - name: Set up Python 3.13 uses: actions/setup-python@v5 with: python-version: "3.13" - name: Install tox run: pip install tox-uv - name: Configure git to support long filenames run: git config --system core.longpaths true - name: Run tests run: tox -e py313-test-opentelemetry-exporter-zipkin-proto-http -- -ra pypy3-test-opentelemetry-exporter-zipkin-proto-http_windows-latest: name: opentelemetry-exporter-zipkin-proto-http pypy-3.9 Windows runs-on: windows-latest timeout-minutes: 30 steps: - name: Checkout repo @ SHA - ${{ github.sha }} uses: actions/checkout@v4 - name: Set up Python pypy-3.9 uses: actions/setup-python@v5 with: python-version: "pypy-3.9" - name: Install tox run: pip install tox-uv - name: Configure git to support long filenames run: git config --system core.longpaths true - name: Run tests run: tox -e pypy3-test-opentelemetry-exporter-zipkin-proto-http -- -ra py39-test-opentelemetry-exporter-zipkin-json_windows-latest: name: opentelemetry-exporter-zipkin-json 3.9 Windows runs-on: windows-latest timeout-minutes: 30 steps: - name: Checkout repo @ SHA - ${{ github.sha }} uses: actions/checkout@v4 - name: Set up Python 3.9 uses: actions/setup-python@v5 with: python-version: "3.9" - name: Install tox run: pip install tox-uv - name: Configure git to support long filenames run: git config --system core.longpaths true - name: Run tests run: tox -e py39-test-opentelemetry-exporter-zipkin-json -- -ra py310-test-opentelemetry-exporter-zipkin-json_windows-latest: name: opentelemetry-exporter-zipkin-json 3.10 Windows runs-on: windows-latest timeout-minutes: 30 steps: - name: Checkout repo @ SHA - ${{ github.sha }} uses: actions/checkout@v4 - name: Set up Python 3.10 uses: actions/setup-python@v5 with: python-version: "3.10" - name: Install tox run: pip install tox-uv - name: Configure git to support long filenames run: git config --system core.longpaths true - name: Run tests run: tox -e py310-test-opentelemetry-exporter-zipkin-json -- -ra py311-test-opentelemetry-exporter-zipkin-json_windows-latest: name: opentelemetry-exporter-zipkin-json 3.11 Windows runs-on: windows-latest timeout-minutes: 30 steps: - name: Checkout repo @ SHA - ${{ github.sha }} uses: actions/checkout@v4 - name: Set up Python 3.11 uses: actions/setup-python@v5 with: python-version: "3.11" - name: Install tox run: pip install tox-uv - name: Configure git to support long filenames run: git config --system core.longpaths true - name: Run tests run: tox -e py311-test-opentelemetry-exporter-zipkin-json -- -ra py312-test-opentelemetry-exporter-zipkin-json_windows-latest: name: opentelemetry-exporter-zipkin-json 3.12 Windows runs-on: windows-latest timeout-minutes: 30 steps: - name: Checkout repo @ SHA - ${{ github.sha }} uses: actions/checkout@v4 - name: Set up Python 3.12 uses: actions/setup-python@v5 with: python-version: "3.12" - name: Install tox run: pip install tox-uv - name: Configure git to support long filenames run: git config --system core.longpaths true - name: Run tests run: tox -e py312-test-opentelemetry-exporter-zipkin-json -- -ra py313-test-opentelemetry-exporter-zipkin-json_windows-latest: name: opentelemetry-exporter-zipkin-json 3.13 Windows runs-on: windows-latest timeout-minutes: 30 steps: - name: Checkout repo @ SHA - ${{ github.sha }} uses: actions/checkout@v4 - name: Set up Python 3.13 uses: actions/setup-python@v5 with: python-version: "3.13" - name: Install tox run: pip install tox-uv - name: Configure git to support long filenames run: git config --system core.longpaths true - name: Run tests run: tox -e py313-test-opentelemetry-exporter-zipkin-json -- -ra pypy3-test-opentelemetry-exporter-zipkin-json_windows-latest: name: opentelemetry-exporter-zipkin-json pypy-3.9 Windows runs-on: windows-latest timeout-minutes: 30 steps: - name: Checkout repo @ SHA - ${{ github.sha }} uses: actions/checkout@v4 - name: Set up Python pypy-3.9 uses: actions/setup-python@v5 with: python-version: "pypy-3.9" - name: Install tox run: pip install tox-uv - name: Configure git to support long filenames run: git config --system core.longpaths true - name: Run tests run: tox -e pypy3-test-opentelemetry-exporter-zipkin-json -- -ra py39-test-opentelemetry-propagator-b3_windows-latest: name: opentelemetry-propagator-b3 3.9 Windows runs-on: windows-latest timeout-minutes: 30 steps: - name: Checkout repo @ SHA - ${{ github.sha }} uses: actions/checkout@v4 - name: Set up Python 3.9 uses: actions/setup-python@v5 with: python-version: "3.9" - name: Install tox run: pip install tox-uv - name: Configure git to support long filenames run: git config --system core.longpaths true - name: Run tests run: tox -e py39-test-opentelemetry-propagator-b3 -- -ra py310-test-opentelemetry-propagator-b3_windows-latest: name: opentelemetry-propagator-b3 3.10 Windows runs-on: windows-latest timeout-minutes: 30 steps: - name: Checkout repo @ SHA - ${{ github.sha }} uses: actions/checkout@v4 - name: Set up Python 3.10 uses: actions/setup-python@v5 with: python-version: "3.10" - name: Install tox run: pip install tox-uv - name: Configure git to support long filenames run: git config --system core.longpaths true - name: Run tests run: tox -e py310-test-opentelemetry-propagator-b3 -- -ra py311-test-opentelemetry-propagator-b3_windows-latest: name: opentelemetry-propagator-b3 3.11 Windows runs-on: windows-latest timeout-minutes: 30 steps: - name: Checkout repo @ SHA - ${{ github.sha }} uses: actions/checkout@v4 - name: Set up Python 3.11 uses: actions/setup-python@v5 with: python-version: "3.11" - name: Install tox run: pip install tox-uv - name: Configure git to support long filenames run: git config --system core.longpaths true - name: Run tests run: tox -e py311-test-opentelemetry-propagator-b3 -- -ra py312-test-opentelemetry-propagator-b3_windows-latest: name: opentelemetry-propagator-b3 3.12 Windows runs-on: windows-latest timeout-minutes: 30 steps: - name: Checkout repo @ SHA - ${{ github.sha }} uses: actions/checkout@v4 - name: Set up Python 3.12 uses: actions/setup-python@v5 with: python-version: "3.12" - name: Install tox run: pip install tox-uv - name: Configure git to support long filenames run: git config --system core.longpaths true - name: Run tests run: tox -e py312-test-opentelemetry-propagator-b3 -- -ra py313-test-opentelemetry-propagator-b3_windows-latest: name: opentelemetry-propagator-b3 3.13 Windows runs-on: windows-latest timeout-minutes: 30 steps: - name: Checkout repo @ SHA - ${{ github.sha }} uses: actions/checkout@v4 - name: Set up Python 3.13 uses: actions/setup-python@v5 with: python-version: "3.13" - name: Install tox run: pip install tox-uv - name: Configure git to support long filenames run: git config --system core.longpaths true - name: Run tests run: tox -e py313-test-opentelemetry-propagator-b3 -- -ra pypy3-test-opentelemetry-propagator-b3_windows-latest: name: opentelemetry-propagator-b3 pypy-3.9 Windows runs-on: windows-latest timeout-minutes: 30 steps: - name: Checkout repo @ SHA - ${{ github.sha }} uses: actions/checkout@v4 - name: Set up Python pypy-3.9 uses: actions/setup-python@v5 with: python-version: "pypy-3.9" - name: Install tox run: pip install tox-uv - name: Configure git to support long filenames run: git config --system core.longpaths true - name: Run tests run: tox -e pypy3-test-opentelemetry-propagator-b3 -- -ra py39-test-opentelemetry-propagator-jaeger_windows-latest: name: opentelemetry-propagator-jaeger 3.9 Windows runs-on: windows-latest timeout-minutes: 30 steps: - name: Checkout repo @ SHA - ${{ github.sha }} uses: actions/checkout@v4 - name: Set up Python 3.9 uses: actions/setup-python@v5 with: python-version: "3.9" - name: Install tox run: pip install tox-uv - name: Configure git to support long filenames run: git config --system core.longpaths true - name: Run tests run: tox -e py39-test-opentelemetry-propagator-jaeger -- -ra py310-test-opentelemetry-propagator-jaeger_windows-latest: name: opentelemetry-propagator-jaeger 3.10 Windows runs-on: windows-latest timeout-minutes: 30 steps: - name: Checkout repo @ SHA - ${{ github.sha }} uses: actions/checkout@v4 - name: Set up Python 3.10 uses: actions/setup-python@v5 with: python-version: "3.10" - name: Install tox run: pip install tox-uv - name: Configure git to support long filenames run: git config --system core.longpaths true - name: Run tests run: tox -e py310-test-opentelemetry-propagator-jaeger -- -ra py311-test-opentelemetry-propagator-jaeger_windows-latest: name: opentelemetry-propagator-jaeger 3.11 Windows runs-on: windows-latest timeout-minutes: 30 steps: - name: Checkout repo @ SHA - ${{ github.sha }} uses: actions/checkout@v4 - name: Set up Python 3.11 uses: actions/setup-python@v5 with: python-version: "3.11" - name: Install tox run: pip install tox-uv - name: Configure git to support long filenames run: git config --system core.longpaths true - name: Run tests run: tox -e py311-test-opentelemetry-propagator-jaeger -- -ra py312-test-opentelemetry-propagator-jaeger_windows-latest: name: opentelemetry-propagator-jaeger 3.12 Windows runs-on: windows-latest timeout-minutes: 30 steps: - name: Checkout repo @ SHA - ${{ github.sha }} uses: actions/checkout@v4 - name: Set up Python 3.12 uses: actions/setup-python@v5 with: python-version: "3.12" - name: Install tox run: pip install tox-uv - name: Configure git to support long filenames run: git config --system core.longpaths true - name: Run tests run: tox -e py312-test-opentelemetry-propagator-jaeger -- -ra py313-test-opentelemetry-propagator-jaeger_windows-latest: name: opentelemetry-propagator-jaeger 3.13 Windows runs-on: windows-latest timeout-minutes: 30 steps: - name: Checkout repo @ SHA - ${{ github.sha }} uses: actions/checkout@v4 - name: Set up Python 3.13 uses: actions/setup-python@v5 with: python-version: "3.13" - name: Install tox run: pip install tox-uv - name: Configure git to support long filenames run: git config --system core.longpaths true - name: Run tests run: tox -e py313-test-opentelemetry-propagator-jaeger -- -ra pypy3-test-opentelemetry-propagator-jaeger_windows-latest: name: opentelemetry-propagator-jaeger pypy-3.9 Windows runs-on: windows-latest timeout-minutes: 30 steps: - name: Checkout repo @ SHA - ${{ github.sha }} uses: actions/checkout@v4 - name: Set up Python pypy-3.9 uses: actions/setup-python@v5 with: python-version: "pypy-3.9" - name: Install tox run: pip install tox-uv - name: Configure git to support long filenames run: git config --system core.longpaths true - name: Run tests run: tox -e pypy3-test-opentelemetry-propagator-jaeger -- -ra py39-test-opentelemetry-test-utils_windows-latest: name: opentelemetry-test-utils 3.9 Windows runs-on: windows-latest timeout-minutes: 30 steps: - name: Checkout repo @ SHA - ${{ github.sha }} uses: actions/checkout@v4 - name: Set up Python 3.9 uses: actions/setup-python@v5 with: python-version: "3.9" - name: Install tox run: pip install tox-uv - name: Configure git to support long filenames run: git config --system core.longpaths true - name: Run tests run: tox -e py39-test-opentelemetry-test-utils -- -ra py310-test-opentelemetry-test-utils_windows-latest: name: opentelemetry-test-utils 3.10 Windows runs-on: windows-latest timeout-minutes: 30 steps: - name: Checkout repo @ SHA - ${{ github.sha }} uses: actions/checkout@v4 - name: Set up Python 3.10 uses: actions/setup-python@v5 with: python-version: "3.10" - name: Install tox run: pip install tox-uv - name: Configure git to support long filenames run: git config --system core.longpaths true - name: Run tests run: tox -e py310-test-opentelemetry-test-utils -- -ra py311-test-opentelemetry-test-utils_windows-latest: name: opentelemetry-test-utils 3.11 Windows runs-on: windows-latest timeout-minutes: 30 steps: - name: Checkout repo @ SHA - ${{ github.sha }} uses: actions/checkout@v4 - name: Set up Python 3.11 uses: actions/setup-python@v5 with: python-version: "3.11" - name: Install tox run: pip install tox-uv - name: Configure git to support long filenames run: git config --system core.longpaths true - name: Run tests run: tox -e py311-test-opentelemetry-test-utils -- -ra py312-test-opentelemetry-test-utils_windows-latest: name: opentelemetry-test-utils 3.12 Windows runs-on: windows-latest timeout-minutes: 30 steps: - name: Checkout repo @ SHA - ${{ github.sha }} uses: actions/checkout@v4 - name: Set up Python 3.12 uses: actions/setup-python@v5 with: python-version: "3.12" - name: Install tox run: pip install tox-uv - name: Configure git to support long filenames run: git config --system core.longpaths true - name: Run tests run: tox -e py312-test-opentelemetry-test-utils -- -ra py313-test-opentelemetry-test-utils_windows-latest: name: opentelemetry-test-utils 3.13 Windows runs-on: windows-latest timeout-minutes: 30 steps: - name: Checkout repo @ SHA - ${{ github.sha }} uses: actions/checkout@v4 - name: Set up Python 3.13 uses: actions/setup-python@v5 with: python-version: "3.13" - name: Install tox run: pip install tox-uv - name: Configure git to support long filenames run: git config --system core.longpaths true - name: Run tests run: tox -e py313-test-opentelemetry-test-utils -- -ra pypy3-test-opentelemetry-test-utils_windows-latest: name: opentelemetry-test-utils pypy-3.9 Windows runs-on: windows-latest timeout-minutes: 30 steps: - name: Checkout repo @ SHA - ${{ github.sha }} uses: actions/checkout@v4 - name: Set up Python pypy-3.9 uses: actions/setup-python@v5 with: python-version: "pypy-3.9" - name: Install tox run: pip install tox-uv - name: Configure git to support long filenames run: git config --system core.longpaths true - name: Run tests run: tox -e pypy3-test-opentelemetry-test-utils -- -ra python-opentelemetry-1.39.1/.gitignore000066400000000000000000000011771511654350100200170ustar00rootroot00000000000000*.py[cod] *.sw[op] # C extensions *.so # Packages *.egg *.egg-info dist build eggs parts bin include var sdist develop-eggs .installed.cfg pyvenv.cfg lib share/ lib64 __pycache__ venv*/ .venv*/ # Installer logs pip-log.txt # Unit test / coverage reports coverage.xml .coverage .nox .tox .cache htmlcov # Translations *.mo # Mac .DS_Store # Mr Developer .mr.developer.cfg .project .pydevproject # JetBrains .idea # VSCode .vscode # Sphinx _build/ # mypy .mypy_cache/ target # Django example docs/examples/django/db.sqlite3 # Semantic conventions scripts/semconv/semantic-conventions # Benchmark result files *-benchmark.json python-opentelemetry-1.39.1/.pre-commit-config.yaml000066400000000000000000000011331511654350100223000ustar00rootroot00000000000000repos: - repo: https://github.com/astral-sh/ruff-pre-commit # Ruff version. rev: v0.14.1 hooks: # Run the linter. - id: ruff args: ["--fix", "--show-fixes"] # Run the formatter. - id: ruff-format - repo: https://github.com/astral-sh/uv-pre-commit # uv version. rev: 0.6.0 hooks: - id: uv-lock - repo: https://github.com/rstcheck/rstcheck rev: 77490ffa33bfc0928975ae3cf904219903db755d # frozen: v6.2.5 hooks: - id: rstcheck additional_dependencies: ['rstcheck[sphinx]'] args: ["--report-level", "warning"] python-opentelemetry-1.39.1/.pylintrc000066400000000000000000000363771511654350100177060ustar00rootroot00000000000000[MASTER] # A comma-separated list of package or module names from where C extensions may # be loaded. Extensions are loading into the active Python interpreter and may # run arbitrary code. extension-pkg-whitelist= # Add list of files or directories to be excluded. They should be base names, not # paths. ignore=CVS,gen,proto # Add files or directories matching the regex patterns to be excluded. The # regex matches against base names, not paths. ignore-patterns= # Python code to execute, usually for sys.path manipulation such as # pygtk.require(). #init-hook= # Use multiple processes to speed up Pylint. Specifying 0 will auto-detect the # number of processors available to use. jobs=0 # Control the amount of potential inferred values when inferring a single # object. This can help the performance when dealing with large functions or # complex, nested conditions. limit-inference-results=100 # List of plugins (as comma separated values of python modules names) to load, # usually to register additional checkers. load-plugins=pylint.extensions.no_self_use # Pickle collected data for later comparisons. persistent=yes # Specify a configuration file. #rcfile= # When enabled, pylint would attempt to guess common misconfiguration and emit # user-friendly hints instead of false-positive error messages. suggestion-mode=yes # Allow loading of arbitrary C extensions. Extensions are imported into the # active Python interpreter and may run arbitrary code. unsafe-load-any-extension=no # Run python dependant checks considering the baseline version py-version=3.9 [MESSAGES CONTROL] # Only show warnings with the listed confidence levels. Leave empty to show # all. Valid levels: HIGH, INFERENCE, INFERENCE_FAILURE, UNDEFINED. confidence= # Disable the message, report, category or checker with the given id(s). You # can either give multiple identifiers separated by comma (,) or put this # option multiple times (only on the command line, not in the configuration # file where it should appear only once). You can also use "--disable=all" to # disable everything first and then reenable specific checks. For example, if # you want to run only the similarities checker, you can use "--disable=all # --enable=similarities". If you want to run only the classes checker, but have # no Warning level messages displayed, use "--disable=all --enable=classes # --disable=W". disable=missing-docstring, fixme, # Warns about FIXME, TODO, etc. comments. too-few-public-methods, # Might be good to re-enable this later. too-many-instance-attributes, too-many-arguments, too-many-positional-arguments, duplicate-code, ungrouped-imports, # Leave this up to isort wrong-import-order, # Leave this up to isort line-too-long, # Leave this up to black exec-used, super-with-arguments, # temp-pylint-upgrade isinstance-second-argument-not-valid-type, # temp-pylint-upgrade raise-missing-from, # temp-pylint-upgrade unused-argument, # temp-pylint-upgrade redefined-builtin, cyclic-import, # Enable the message, report, category or checker with the given id(s). You can # either give multiple identifier separated by comma (,) or put this option # multiple time (only on the command line, not in the configuration file where # it should appear only once). See also the "--disable" option for examples. # enable=c-extension-no-member [REPORTS] # Python expression which should return a note less than 10 (10 is the highest # note). You have access to the variables errors warning, statement which # respectively contain the number of errors / warnings messages and the total # number of statements analyzed. This is used by the global evaluation report # (RP0004). #evaluation=10.0 - ((float(5 * error + warning + refactor + convention) / statement) * 10) # Template used to display messages. This is a python new-style format string # used to format the message information. See doc for all details. #msg-template= # Set the output format. Available formats are text, parseable, colorized, json # and msvs (visual studio). You can also give a reporter class, e.g. # mypackage.mymodule.MyReporterClass. #output-format=text # Tells whether to display a full report or only the messages. #reports=no # Activate the evaluation score. score=yes [REFACTORING] # Maximum number of nested blocks for function / method body max-nested-blocks=5 # Complete name of functions that never returns. When checking for # inconsistent-return-statements if a never returning function is called then # it will be considered as an explicit return statement and no message will be # printed. never-returning-functions=sys.exit [LOGGING] # Format style used to check logging format string. `old` means using % # formatting, while `new` is for `{}` formatting. logging-format-style=old # Logging modules to check that the string format arguments are in logging # function parameter format. logging-modules=logging [SPELLING] # Limits count of emitted suggestions for spelling mistakes. max-spelling-suggestions=4 # Spelling dictionary name. Available dictionaries: none. To make it working # install python-enchant package.. spelling-dict= # List of comma separated words that should not be checked. spelling-ignore-words= # A path to a file that contains private dictionary; one word per line. spelling-private-dict-file= # Tells whether to store unknown words to indicated private dictionary in # --spelling-private-dict-file option instead of raising a message. spelling-store-unknown-words=no [MISCELLANEOUS] # List of note tags to take in consideration, separated by a comma. notes=FIXME, XXX, TODO [TYPECHECK] # List of decorators that produce context managers, such as # contextlib.contextmanager. Add to this list to register other decorators that # produce valid context managers. contextmanager-decorators=contextlib.contextmanager, _agnosticcontextmanager # List of members which are set dynamically and missed by pylint inference # system, and so shouldn't trigger E1101 when accessed. Python regular # expressions are accepted. generated-members=zipkin_pb2.* # Tells whether missing members accessed in mixin class should be ignored. A # mixin class is detected if its name ends with "mixin" (case insensitive). #ignore-mixin-members=yes # Tells whether to warn about missing members when the owner of the attribute # is inferred to be None. #ignore-none=yes # This flag controls whether pylint should warn about no-member and similar # checks whenever an opaque object is returned when inferring. The inference # can return multiple potential results while evaluating a Python object, but # some branches might not be evaluated, which results in partial inference. In # that case, it might be useful to still emit no-member and other checks for # the rest of the inferred objects. #ignore-on-opaque-inference=yes # List of class names for which member attributes should not be checked (useful # for classes with dynamically set attributes). This supports the use of # qualified names. ignored-classes=optparse.Values,thread._local,_thread._local # List of module names for which member attributes should not be checked # (useful for modules/projects where namespaces are manipulated during runtime # and thus existing member attributes cannot be deduced by static analysis. It # supports qualified module names, as well as Unix pattern matching. ignored-modules= # Show a hint with possible names when a member name was not found. The aspect # of finding the hint is based on edit distance. missing-member-hint=yes # The minimum edit distance a name should have in order to be considered a # similar match for a missing member name. missing-member-hint-distance=1 # The total number of similar names that should be taken in consideration when # showing a hint for a missing member. missing-member-max-choices=1 [VARIABLES] # List of additional names supposed to be defined in builtins. Remember that # you should avoid defining new builtins when possible. additional-builtins= # Tells whether unused global variables should be treated as a violation. allow-global-unused-variables=yes # List of strings which can identify a callback function by name. A callback # name must start or end with one of those strings. callbacks=cb_, _cb # A regular expression matching the name of dummy variables (i.e. expected to # not be used). dummy-variables-rgx=_+$|(_[a-zA-Z0-9_]*[a-zA-Z0-9]+?$)|dummy|^ignored_|^unused_ # Argument names that match this expression will be ignored. Default to name # with leading underscore. ignored-argument-names=_.*|^ignored_|^unused_|^kwargs|^args # Tells whether we should check for unused import in __init__ files. init-import=no # List of qualified module names which can have objects that can redefine # builtins. redefining-builtins-modules=six.moves,past.builtins,future.builtins,builtins,io [FORMAT] # Expected format of line ending, e.g. empty (any line ending), LF or CRLF. expected-line-ending-format=LF # Regexp for a line that is allowed to be longer than the limit. ignore-long-lines=^\s*(# )??$ # Number of spaces of indent required inside a hanging or continued line. indent-after-paren=4 # String used as indentation unit. This is usually " " (4 spaces) or "\t" (1 # tab). indent-string=' ' # Maximum number of characters on a single line. max-line-length=79 # Maximum number of lines in a module. max-module-lines=1000 # Allow the body of a class to be on the same line as the declaration if body # contains single statement. single-line-class-stmt=no # Allow the body of an if to be on the same line as the test if there is no # else. single-line-if-stmt=no [SIMILARITIES] # Ignore comments when computing similarities. ignore-comments=yes # Ignore docstrings when computing similarities. ignore-docstrings=yes # Ignore imports when computing similarities. ignore-imports=no # Minimum lines number of a similarity. min-similarity-lines=4 [BASIC] # Naming style matching correct argument names. argument-naming-style=snake_case # Regular expression matching correct argument names. Overrides argument- # naming-style. #argument-rgx= # Naming style matching correct attribute names. attr-naming-style=snake_case # Regular expression matching correct attribute names. Overrides attr-naming- # style. #attr-rgx= # Bad variable names which should always be refused, separated by a comma. bad-names=foo, bar, baz, toto, tutu, tata # Naming style matching correct class attribute names. class-attribute-naming-style=any # Regular expression matching correct class attribute names. Overrides class- # attribute-naming-style. #class-attribute-rgx= # Naming style matching correct class names. class-naming-style=PascalCase # Regular expression matching correct class names. Overrides class-naming- # style. #class-rgx= # Naming style matching correct constant names. const-naming-style=any # Regular expression matching correct constant names. Overrides const-naming- # style. #const-rgx= # Minimum line length for functions/classes that require docstrings, shorter # ones are exempt. docstring-min-length=-1 # Naming style matching correct function names. function-naming-style=snake_case # Regular expression matching correct function names. Overrides function- # naming-style. #function-rgx= # Good variable names which should always be accepted, separated by a comma. good-names=_, log, logger # Include a hint for the correct naming format with invalid-name. include-naming-hint=yes # Naming style matching correct inline iteration names. inlinevar-naming-style=any # Regular expression matching correct inline iteration names. Overrides # inlinevar-naming-style. #inlinevar-rgx= # Naming style matching correct method names. method-naming-style=snake_case # Regular expression matching correct method names. Overrides method-naming- # style. #method-rgx= # Naming style matching correct module names. module-naming-style=snake_case # Regular expression matching correct module names. Overrides module-naming- # style. #module-rgx= # Colon-delimited sets of names that determine each other's naming style when # the name regexes allow several styles. name-group= # Regular expression which should only match function or class names that do # not require a docstring. no-docstring-rgx=^_ # List of decorators that produce properties, such as abc.abstractproperty. Add # to this list to register other decorators that produce valid properties. # These decorators are taken in consideration only for invalid-name. property-classes=abc.abstractproperty # Naming style matching correct variable names. variable-naming-style=snake_case # Regular expression matching correct variable names. Overrides variable- # naming-style. variable-rgx=(([a-z_][a-z0-9_]{1,})|(_[a-z0-9_]*)|(__[a-z][a-z0-9_]+__))$ [IMPORTS] # Allow wildcard imports from modules that define __all__. allow-wildcard-with-all=no # Analyse import fallback blocks. This can be used to support both Python 2 and # 3 compatible code, which means that the block might have code that exists # only in one or another interpreter, leading to false positives when analysed. analyse-fallback-blocks=yes # Deprecated modules which should not be used, separated by a comma. deprecated-modules=optparse,tkinter.tix # Create a graph of external dependencies in the given file (report RP0402 must # not be disabled). ext-import-graph= # Create a graph of every (i.e. internal and external) dependencies in the # given file (report RP0402 must not be disabled). import-graph= # Create a graph of internal dependencies in the given file (report RP0402 must # not be disabled). int-import-graph= # Force import order to recognize a module as part of the standard # compatibility libraries. known-standard-library=six # Force import order to recognize a module as part of a third party library. known-third-party=enchant [CLASSES] # List of method names used to declare (i.e. assign) instance attributes. defining-attr-methods=__init__, __new__, setUp # List of member names, which should be excluded from the protected access # warning. exclude-protected=_asdict, _fields, _replace, _source, _make, _Span # List of valid names for the first argument in a class method. valid-classmethod-first-arg=cls # List of valid names for the first argument in a metaclass class method. valid-metaclass-classmethod-first-arg=cls [DESIGN] # Maximum number of arguments for function / method. max-args=5 # Maximum number of attributes for a class (see R0902). max-attributes=7 # Maximum number of boolean expressions in an if statement. max-bool-expr=5 # Maximum number of branch for function / method body. max-branches=12 # Maximum number of locals for function / method body. max-locals=15 # Maximum number of parents for a class (see R0901). max-parents=7 # Maximum number of public methods for a class (see R0904). max-public-methods=20 # Maximum number of return / yield for function / method body. max-returns=6 # Maximum number of statements in function / method body. max-statements=50 # Minimum number of public methods for a class (see R0903). min-public-methods=2 [EXCEPTIONS] # Exceptions that will emit a warning when being caught. overgeneral-exceptions=builtins.Exception python-opentelemetry-1.39.1/.readthedocs.yml000066400000000000000000000004311511654350100211050ustar00rootroot00000000000000# Read the Docs configuration file # See https://docs.readthedocs.io/en/stable/config-file/v2.html for details version: 2 build: os: "ubuntu-22.04" tools: python: "3.9" sphinx: configuration: docs/conf.py python: install: - requirements: docs-requirements.txt python-opentelemetry-1.39.1/.rstcheck.cfg000066400000000000000000000001151511654350100203630ustar00rootroot00000000000000[rstcheck] ignore_directives = automodule ignore_roles = scm_web,scm_raw_web python-opentelemetry-1.39.1/CHANGELOG.md000066400000000000000000003333461511654350100176460ustar00rootroot00000000000000# Changelog All notable changes to this project will be documented in this file. The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). **Breaking changes ongoing** > [!IMPORTANT] > We are working on stabilizing the Log signal that would require making deprecations and breaking changes. We will try to reduce the releases that may require an update to your code, especially for instrumentations or for sdk developers. ## Version 1.39.1/0.60b1 (2025-12-11) - Silence events API warnings for internal users ([#4847](https://github.com/open-telemetry/opentelemetry-python/pull/4847)) ## Version 1.39.0/0.60b0 (2025-12-03) - `opentelemetry-api`: Convert objects of any type other than AnyValue in attributes to string to be exportable ([#4808](https://github.com/open-telemetry/opentelemetry-python/pull/4808)) - docs: Added sqlcommenter example ([#4734](https://github.com/open-telemetry/opentelemetry-python/pull/4734)) - build: bump ruff to 0.14.1 ([#4782](https://github.com/open-telemetry/opentelemetry-python/pull/4782)) - Add `opentelemetry-exporter-credential-provider-gcp` as an optional dependency to `opentelemetry-exporter-otlp-proto-grpc` and `opentelemetry-exporter-otlp-proto-http` ([#4760](https://github.com/open-telemetry/opentelemetry-python/pull/4760)) - semantic-conventions: Bump to 1.38.0 ([#4791](https://github.com/open-telemetry/opentelemetry-python/pull/4791)) - [BREAKING] Remove LogData and extend SDK LogRecord to have instrumentation scope ([#4676](https://github.com/open-telemetry/opentelemetry-python/pull/4676)) - [BREAKING] Rename several classes from Log to LogRecord ([#4647](https://github.com/open-telemetry/opentelemetry-python/pull/4647)) **Migration Guide:** `LogData` has been removed. Users should update their code as follows: - **For Log Exporters:** Change from `Sequence[LogData]` to `Sequence[ReadableLogRecord]` ```python # Before from opentelemetry.sdk._logs import LogData def export(self, batch: Sequence[LogData]) -> LogRecordExportResult: ... # After from opentelemetry.sdk._logs import ReadableLogRecord def export(self, batch: Sequence[ReadableLogRecord]) -> LogRecordExportResult: ... ``` - **For Log Processors:** Use `ReadWriteLogRecord` for processing, `ReadableLogRecord` for exporting ```python # Before from opentelemetry.sdk._logs import LogData def on_emit(self, log_data: LogData): ... # After from opentelemetry.sdk._logs import ReadWriteLogRecord, ReadableLogRecord def on_emit(self, log_record: ReadWriteLogRecord): # Convert to ReadableLogRecord before exporting readable = ReadableLogRecord( log_record=log_record.log_record, resource=log_record.resource or Resource.create({}), instrumentation_scope=log_record.instrumentation_scope, limits=log_record.limits, ) ... ``` - **Accessing log data:** Use the same attributes on `ReadableLogRecord`/`ReadWriteLogRecord` - `log_record.log_record` - The API LogRecord (contains body, severity, attributes, etc.) - `log_record.resource` - The Resource - `log_record.instrumentation_scope` - The InstrumentationScope (now included, was in LogData before) - `log_record.limits` - The LogRecordLimits - Mark the Events API/SDK as deprecated. The Logs API/SDK should be used instead, an event is now a `LogRecord` with the `event_name` field set ([#4654](https://github.com/open-telemetry/opentelemetry-python/pull/4654)). - Fix type checking for built-in metric exporters ([#4820](https://github.com/open-telemetry/opentelemetry-python/pull/4820)) ## Version 1.38.0/0.59b0 (2025-10-16) - Add `rstcheck` to pre-commit to stop introducing invalid RST ([#4755](https://github.com/open-telemetry/opentelemetry-python/pull/4755)) - logs: extend Logger.emit to accept separated keyword arguments ([#4737](https://github.com/open-telemetry/opentelemetry-python/pull/4737)) - logs: add warnings for classes that would be deprecated and renamed in 1.39.0 ([#4771](https://github.com/open-telemetry/opentelemetry-python/pull/4771)) ## Version 1.37.0/0.58b0 (2025-09-11) - Add experimental composite samplers ([#4714](https://github.com/open-telemetry/opentelemetry-python/pull/4714)) - Add new environment variables to the SDK `OTEL_PYTHON_EXPORTER_OTLP_{HTTP/GRPC}_{METRICS/TRACES/LOGS}_CREDENTIAL_PROVIDER` that can be used to inject a `requests.Session` or `grpc.ChannelCredentials` object into OTLP exporters created during auto instrumentation [#4689](https://github.com/open-telemetry/opentelemetry-python/pull/4689). - Filter duplicate logs out of some internal `logger`'s logs on the export logs path that might otherwise endlessly log or cause a recursion depth exceeded issue in cases where logging itself results in an exception. ([#4695](https://github.com/open-telemetry/opentelemetry-python/pull/4695)). - docs: linked the examples with their github source code location and added Prometheus example ([#4728](https://github.com/open-telemetry/opentelemetry-python/pull/4728)) - Permit to override default HTTP OTLP exporters headers ([#4634](https://github.com/open-telemetry/opentelemetry-python/pull/4634)) - semantic-conventions: Bump to 1.37.0 ([#4731](https://github.com/open-telemetry/opentelemetry-python/pull/4731)) - opentelemetry-sdk: fix handling of OTEL_ATTRIBUTE_COUNT_LIMIT in logs ([#4677](https://github.com/open-telemetry/opentelemetry-python/pull/4677)) - Performance: Cache `importlib_metadata.entry_points` ([#4735](https://github.com/open-telemetry/opentelemetry-python/pull/4735)) - opentelemetry-sdk: fix calling Logger.emit with an API LogRecord instance ([#4741](https://github.com/open-telemetry/opentelemetry-python/pull/4741)) ## Version 1.36.0/0.57b0 (2025-07-29) - Add missing Prometheus exporter documentation ([#4485](https://github.com/open-telemetry/opentelemetry-python/pull/4485)) - Overwrite logging.config.fileConfig and logging.config.dictConfig to ensure the OTLP `LogHandler` remains attached to the root logger. Fix a bug that can cause a deadlock to occur over `logging._lock` in some cases ([#4636](https://github.com/open-telemetry/opentelemetry-python/pull/4636)). - otlp-http-exporter: set default value for param `timeout_sec` in `_export` method ([#4691](https://github.com/open-telemetry/opentelemetry-python/pull/4691)) - Update OTLP gRPC/HTTP exporters: calling shutdown will now interrupt exporters that are sleeping before a retry attempt, and cause them to return failure immediately. Update BatchSpan/LogRecordProcessors: shutdown will now complete after 30 seconds of trying to finish exporting any buffered telemetry, instead of continuing to export until all telemetry was exported. ([#4638](https://github.com/open-telemetry/opentelemetry-python/pull/4638)). ## Version 1.35.0/0.56b0 (2025-07-11) - Update OTLP proto to v1.7 [#4645](https://github.com/open-telemetry/opentelemetry-python/pull/4645). - Add `event_name` as a top level field in the `LogRecord`. Events are now simply logs with the `event_name` field set, the logs SDK should be used to emit events ([#4652](https://github.com/open-telemetry/opentelemetry-python/pull/4652)). - Update OTLP gRPC/HTTP exporters: the export timeout is now inclusive of all retries and backoffs. A +/-20% jitter was added to all backoffs. A pointless 32 second sleep that occurred after all retries had completed/failed was removed. ([#4564](https://github.com/open-telemetry/opentelemetry-python/pull/4564)). - Update ConsoleLogExporter.export to handle LogRecord's containing bytes type in the body ([#4614](https://github.com/open-telemetry/opentelemetry-python/pull/4614/)). - opentelemetry-sdk: Fix invalid `type: ignore` that causes mypy to ignore the whole file ([#4618](https://github.com/open-telemetry/opentelemetry-python/pull/4618)) - Add `span_exporter` property back to `BatchSpanProcessor` class ([#4621](https://github.com/open-telemetry/opentelemetry-python/pull/4621)) - Fix license field in pyproject.toml files ([#4625](https://github.com/open-telemetry/opentelemetry-python/pull/4625)) - Update logger level to NOTSET in logs example ([#4637](https://github.com/open-telemetry/opentelemetry-python/pull/4637)) - Logging API accepts optional `context`; deprecates `trace_id`, `span_id`, `trace_flags`. ([#4597](https://github.com/open-telemetry/opentelemetry-python/pull/4597)) and ([#4668](https://github.com/open-telemetry/opentelemetry-python/pull/4668)) - sdk: use context instead of trace_id,span_id for initializing LogRecord ([#4653](https://github.com/open-telemetry/opentelemetry-python/pull/4653)) - Rename LogRecordProcessor.emit to on_emit ([#4648](https://github.com/open-telemetry/opentelemetry-python/pull/4648)) - Logging API hide std_to_otel function to convert python logging severity to otel severity ([#4649](https://github.com/open-telemetry/opentelemetry-python/pull/4649)) - proto: relax protobuf version requirement to support v6 ([#4620](https://github.com/open-telemetry/opentelemetry-python/pull/4620)) - Bump semantic-conventions to 1.36.0 ([#4669](https://github.com/open-telemetry/opentelemetry-python/pull/4669)) - Set expected User-Agent in HTTP headers for grpc OTLP exporter ([#4658](https://github.com/open-telemetry/opentelemetry-python/pull/4658)) ## Version 1.34.0/0.55b0 (2025-06-04) - typecheck: add sdk/resources and drop mypy ([#4578](https://github.com/open-telemetry/opentelemetry-python/pull/4578)) - Use PEP702 for marking deprecations ([#4522](https://github.com/open-telemetry/opentelemetry-python/pull/4522)) - Refactor `BatchLogRecordProcessor` and `BatchSpanProcessor` to simplify code and make the control flow more clear ([#4562](https://github.com/open-telemetry/opentelemetry-python/pull/4562/) [#4535](https://github.com/open-telemetry/opentelemetry-python/pull/4535), and [#4580](https://github.com/open-telemetry/opentelemetry-python/pull/4580)). - Remove log messages from `BatchLogRecordProcessor.emit`, this caused the program to crash at shutdown with a max recursion error ([#4586](https://github.com/open-telemetry/opentelemetry-python/pull/4586)). - Configurable max retry timeout for grpc exporter ([#4333](https://github.com/open-telemetry/opentelemetry-python/pull/4333)) - opentelemetry-api: allow importlib-metadata 8.7.0 ([#4593](https://github.com/open-telemetry/opentelemetry-python/pull/4593)) - opentelemetry-test-utils: assert explicit bucket boundaries in histogram metrics ([#4595](https://github.com/open-telemetry/opentelemetry-python/pull/4595)) - Bump semantic conventions to 1.34.0 ([#4599](https://github.com/open-telemetry/opentelemetry-python/pull/4599)) - Drop support for Python 3.8 ([#4520](https://github.com/open-telemetry/opentelemetry-python/pull/4520)) ## Version 1.33.0/0.54b0 (2025-05-09) - Fix intermittent `Connection aborted` error when using otlp/http exporters ([#4477](https://github.com/open-telemetry/opentelemetry-python/pull/4477)) - opentelemetry-sdk: use stable code attributes: `code.function` -> `code.function.name`, `code.lineno` -> `code.line.number`, `code.filepath` -> `code.file.path` ([#4508](https://github.com/open-telemetry/opentelemetry-python/pull/4508)) - Fix serialization of extended attributes for logs signal ([#4342](https://github.com/open-telemetry/opentelemetry-python/pull/4342)) - Handle OTEL_PROPAGATORS contains None ([#4553](https://github.com/open-telemetry/opentelemetry-python/pull/4553)) - docs: updated and added to the metrics and log examples ([#4559](https://github.com/open-telemetry/opentelemetry-python/pull/4559)) - Bump semantic conventions to 1.33.0 ([#4567](https://github.com/open-telemetry/opentelemetry-python/pull/4567)) ## Version 1.32.0/0.53b0 (2025-04-10) - Fix user agent in OTLP HTTP metrics exporter ([#4475](https://github.com/open-telemetry/opentelemetry-python/pull/4475)) - Improve performance of baggage operations ([#4466](https://github.com/open-telemetry/opentelemetry-python/pull/4466)) - sdk: remove duplicated constant definitions for `environment_variables` ([#4491](https://github.com/open-telemetry/opentelemetry-python/pull/4491)) - api: Revert record `BaseException` change in `trace_api.use_span()` ([#4494](https://github.com/open-telemetry/opentelemetry-python/pull/4494)) - Improve CI by cancelling stale runs and setting timeouts ([#4498](https://github.com/open-telemetry/opentelemetry-python/pull/4498)) - Patch logging.basicConfig so OTel logs don't cause console logs to disappear ([#4436](https://github.com/open-telemetry/opentelemetry-python/pull/4436)) - Bump semantic conventions to 1.32.0 ([#4530](https://github.com/open-telemetry/opentelemetry-python/pull/4530)) - Fix ExplicitBucketHistogramAggregation to handle multiple explicit bucket boundaries advisories ([#4521](https://github.com/open-telemetry/opentelemetry-python/pull/4521)) - opentelemetry-sdk: Fix serialization of objects in log handler ([#4528](https://github.com/open-telemetry/opentelemetry-python/pull/4528)) ## Version 1.31.0/0.52b0 (2025-03-12) - semantic-conventions: Bump to 1.31.0 ([#4471](https://github.com/open-telemetry/opentelemetry-python/pull/4471)) - Add type annotations to context's attach & detach ([#4346](https://github.com/open-telemetry/opentelemetry-python/pull/4346)) - Fix OTLP encoders missing instrumentation scope schema url and attributes ([#4359](https://github.com/open-telemetry/opentelemetry-python/pull/4359)) - prometheus-exporter: fix labels out of place for data points with different attribute sets ([#4413](https://github.com/open-telemetry/opentelemetry-python/pull/4413)) - Type indent parameter in to_json ([#4402](https://github.com/open-telemetry/opentelemetry-python/pull/4402)) - Tolerates exceptions when loading resource detectors via `OTEL_EXPERIMENTAL_RESOURCE_DETECTORS` ([#4373](https://github.com/open-telemetry/opentelemetry-python/pull/4373)) - Disconnect gRPC client stub when shutting down `OTLPSpanExporter` ([#4370](https://github.com/open-telemetry/opentelemetry-python/pull/4370)) - opentelemetry-sdk: fix OTLP exporting of Histograms with explicit buckets advisory ([#4434](https://github.com/open-telemetry/opentelemetry-python/pull/4434)) - opentelemetry-exporter-otlp-proto-grpc: better dependency version range for Python 3.13 ([#4444](https://github.com/open-telemetry/opentelemetry-python/pull/4444)) - opentelemetry-exporter-opencensus: better dependency version range for Python 3.13 ([#4444](https://github.com/open-telemetry/opentelemetry-python/pull/4444)) - Updated `tracecontext-integration-test` gitref to `d782773b2cf2fa4afd6a80a93b289d8a74ca894d` ([#4448](https://github.com/open-telemetry/opentelemetry-python/pull/4448)) - Make `trace_api.use_span()` record `BaseException` as well as `Exception` ([#4406](https://github.com/open-telemetry/opentelemetry-python/pull/4406)) - Fix env var error message for TraceLimits/SpanLimits ([#4458](https://github.com/open-telemetry/opentelemetry-python/pull/4458)) - pylint-ci updated python version to 3.13 ([#4450](https://github.com/open-telemetry/opentelemetry-python/pull/4450)) - Fix memory leak in Log & Trace exporter ([#4449](https://github.com/open-telemetry/opentelemetry-python/pull/4449)) ## Version 1.30.0/0.51b0 (2025-02-03) - Always setup logs sdk, OTEL_PYTHON_LOGGING_AUTO_INSTRUMENTATION_ENABLED only controls python `logging` module handler setup ([#4340](https://github.com/open-telemetry/opentelemetry-python/pull/4340)) - Add `attributes` field in `metrics.get_meter` wrapper function ([#4364](https://github.com/open-telemetry/opentelemetry-python/pull/4364)) - Add Python 3.13 support ([#4353](https://github.com/open-telemetry/opentelemetry-python/pull/4353)) - sdk: don't log or print warnings when the SDK has been disabled ([#4371](https://github.com/open-telemetry/opentelemetry-python/pull/4371)) - Fix span context manager typing by using ParamSpec from typing_extensions ([#4389](https://github.com/open-telemetry/opentelemetry-python/pull/4389)) - Fix serialization of None values in logs body to match 1.31.0+ data model ([#4400](https://github.com/open-telemetry/opentelemetry-python/pull/4400)) - [BREAKING] semantic-conventions: Remove `opentelemetry.semconv.attributes.network_attributes.NETWORK_INTERFACE_NAME` introduced by mistake in the wrong module. ([#4391](https://github.com/open-telemetry/opentelemetry-python/pull/4391)) - Add support for explicit bucket boundaries advisory for Histograms ([#4361](https://github.com/open-telemetry/opentelemetry-python/pull/4361)) - semantic-conventions: Bump to 1.30.0 ([#4337](https://github.com/open-telemetry/opentelemetry-python/pull/4397)) ## Version 1.29.0/0.50b0 (2024-12-11) - Fix crash exporting a log record with None body ([#4276](https://github.com/open-telemetry/opentelemetry-python/pull/4276)) - Fix metrics export with exemplar and no context and filtering observable instruments ([#4251](https://github.com/open-telemetry/opentelemetry-python/pull/4251)) - Fix recursion error with sdk disabled and handler added to root logger ([#4259](https://github.com/open-telemetry/opentelemetry-python/pull/4259)) - sdk: setup EventLogger when OTEL_PYTHON_LOGGING_AUTO_INSTRUMENTATION_ENABLED is set ([#4270](https://github.com/open-telemetry/opentelemetry-python/pull/4270)) - api: fix logging of duplicate EventLogger setup warning ([#4299](https://github.com/open-telemetry/opentelemetry-python/pull/4299)) - sdk: fix setting of process owner in ProcessResourceDetector ([#4311](https://github.com/open-telemetry/opentelemetry-python/pull/4311)) - sdk: fix serialization of logs severity_number field to int ([#4324](https://github.com/open-telemetry/opentelemetry-python/pull/4324)) - Remove `TestBase.assertEqualSpanInstrumentationInfo` method, use `assertEqualSpanInstrumentationScope` instead ([#4310](https://github.com/open-telemetry/opentelemetry-python/pull/4310)) - sdk: instantiate lazily `ExemplarBucket`s in `ExemplarReservoir`s ([#4260](https://github.com/open-telemetry/opentelemetry-python/pull/4260)) - semantic-conventions: Bump to 1.29.0 ([#4337](https://github.com/open-telemetry/opentelemetry-python/pull/4337)) ## Version 1.28.0/0.49b0 (2024-11-05) - Removed superfluous py.typed markers and added them where they were missing ([#4172](https://github.com/open-telemetry/opentelemetry-python/pull/4172)) - Include metric info in encoding exceptions ([#4154](https://github.com/open-telemetry/opentelemetry-python/pull/4154)) - sdk: Add support for log formatting ([#4137](https://github.com/open-telemetry/opentelemetry-python/pull/4166)) - sdk: Add Host resource detector ([#4182](https://github.com/open-telemetry/opentelemetry-python/pull/4182)) - sdk: Implementation of exemplars ([#4094](https://github.com/open-telemetry/opentelemetry-python/pull/4094)) - Implement events sdk ([#4176](https://github.com/open-telemetry/opentelemetry-python/pull/4176)) - Update semantic conventions to version 1.28.0 ([#4218](https://github.com/open-telemetry/opentelemetry-python/pull/4218)) - Add support to protobuf 5+ and drop support to protobuf 3 and 4 ([#4206](https://github.com/open-telemetry/opentelemetry-python/pull/4206)) - Update environment variable descriptions to match signal ([#4222](https://github.com/open-telemetry/opentelemetry-python/pull/4222)) - Record logger name as the instrumentation scope name ([#4208](https://github.com/open-telemetry/opentelemetry-python/pull/4208)) - Fix memory leak in exporter and reader ([#4224](https://github.com/open-telemetry/opentelemetry-python/pull/4224)) - Drop `OTEL_PYTHON_EXPERIMENTAL_DISABLE_PROMETHEUS_UNIT_NORMALIZATION` environment variable ([#4217](https://github.com/open-telemetry/opentelemetry-python/pull/4217)) - Improve compatibility with other logging libraries that override `LogRecord.getMessage()` in order to customize message formatting ([#4216](https://github.com/open-telemetry/opentelemetry-python/pull/4216)) ## Version 1.27.0/0.48b0 (2024-08-28) - Implementation of Events API ([#4054](https://github.com/open-telemetry/opentelemetry-python/pull/4054)) - Make log sdk add `exception.message` to logRecord for exceptions whose argument is an exception not a string message ([#4122](https://github.com/open-telemetry/opentelemetry-python/pull/4122)) - Fix use of `link.attributes.dropped`, which may not exist ([#4119](https://github.com/open-telemetry/opentelemetry-python/pull/4119)) - Running mypy on SDK resources ([#4053](https://github.com/open-telemetry/opentelemetry-python/pull/4053)) - Added py.typed file to top-level module ([#4084](https://github.com/open-telemetry/opentelemetry-python/pull/4084)) - Drop Final annotation from Enum in semantic conventions ([#4085](https://github.com/open-telemetry/opentelemetry-python/pull/4085)) - Update log export example to not use root logger ([#4090](https://github.com/open-telemetry/opentelemetry-python/pull/4090)) - sdk: Add OS resource detector ([#3992](https://github.com/open-telemetry/opentelemetry-python/pull/3992)) - sdk: Accept non URL-encoded headers in `OTEL_EXPORTER_OTLP_*HEADERS` to match other languages SDKs ([#4103](https://github.com/open-telemetry/opentelemetry-python/pull/4103)) - Update semantic conventions to version 1.27.0 ([#4104](https://github.com/open-telemetry/opentelemetry-python/pull/4104)) - Add support to type bytes for OTLP AnyValue ([#4128](https://github.com/open-telemetry/opentelemetry-python/pull/4128)) - Export ExponentialHistogram and ExponentialHistogramDataPoint ([#4134](https://github.com/open-telemetry/opentelemetry-python/pull/4134)) - Implement Client Key and Certificate File Support for All OTLP Exporters ([#4116](https://github.com/open-telemetry/opentelemetry-python/pull/4116)) - Remove `_start_time_unix_nano` attribute from `_ViewInstrumentMatch` in favor of using `time_ns()` at the moment when the aggregation object is created ([#4137](https://github.com/open-telemetry/opentelemetry-python/pull/4137)) ## Version 1.26.0/0.47b0 (2024-07-25) - Standardizing timeout calculation in measurement consumer collect to nanoseconds ([#4074](https://github.com/open-telemetry/opentelemetry-python/pull/4074)) - optional scope attributes for logger creation ([#4035](https://github.com/open-telemetry/opentelemetry-python/pull/4035)) - optional scope attribute for tracer creation ([#4028](https://github.com/open-telemetry/opentelemetry-python/pull/4028)) - OTLP exporter is encoding invalid span/trace IDs in the logs fix ([#4006](https://github.com/open-telemetry/opentelemetry-python/pull/4006)) - Update sdk process resource detector `process.command_args` attribute to also include the executable itself ([#4032](https://github.com/open-telemetry/opentelemetry-python/pull/4032)) - Fix `start_time_unix_nano` for delta collection for explicit bucket histogram aggregation ([#4009](https://github.com/open-telemetry/opentelemetry-python/pull/4009)) - Fix `start_time_unix_nano` for delta collection for sum aggregation ([#4011](https://github.com/open-telemetry/opentelemetry-python/pull/4011)) - Update opentracing and opencesus docs examples to not use JaegerExporter ([#4023](https://github.com/open-telemetry/opentelemetry-python/pull/4023)) - Do not execute Flask Tests in debug mode ([#3956](https://github.com/open-telemetry/opentelemetry-python/pull/3956)) - When encountering an error encoding metric attributes in the OTLP exporter, log the key that had an error. ([#3838](https://github.com/open-telemetry/opentelemetry-python/pull/3838)) - Fix `ExponentialHistogramAggregation` ([#3978](https://github.com/open-telemetry/opentelemetry-python/pull/3978)) - Log a warning when a `LogRecord` in `sdk/log` has dropped attributes due to reaching limits ([#3946](https://github.com/open-telemetry/opentelemetry-python/pull/3946)) - Fix RandomIdGenerator can generate invalid Span/Trace Ids ([#3949](https://github.com/open-telemetry/opentelemetry-python/pull/3949)) - Add Python 3.12 to tox ([#3616](https://github.com/open-telemetry/opentelemetry-python/pull/3616)) - Improve resource field structure for LogRecords ([#3972](https://github.com/open-telemetry/opentelemetry-python/pull/3972)) - Update Semantic Conventions code generation scripts: - fix namespace exclusion that resulted in dropping `os` and `net` namespaces. - add `Final` decorator to constants to prevent collisions - enable mypy and fix detected issues - allow to drop specific attributes in preparation for Semantic Conventions v1.26.0 ([#3973](https://github.com/open-telemetry/opentelemetry-python/pull/3966)) - Update semantic conventions to version 1.26.0. ([#3964](https://github.com/open-telemetry/opentelemetry-python/pull/3964)) - Use semconv exception attributes for record exceptions in spans ([#3979](https://github.com/open-telemetry/opentelemetry-python/pull/3979)) - Fix _encode_events assumes events.attributes.dropped exists ([#3965](https://github.com/open-telemetry/opentelemetry-python/pull/3965)) - Validate links at span creation ([#3991](https://github.com/open-telemetry/opentelemetry-python/pull/3991)) - Add attributes field in `MeterProvider.get_meter` and `InstrumentationScope` ([#4015](https://github.com/open-telemetry/opentelemetry-python/pull/4015)) - Fix inaccessible `SCHEMA_URL` constants in `opentelemetry-semantic-conventions` ([#4069](https://github.com/open-telemetry/opentelemetry-python/pull/4069)) ## Version 1.25.0/0.46b0 (2024-05-30) - Fix class BoundedAttributes to have RLock rather than Lock ([#3859](https://github.com/open-telemetry/opentelemetry-python/pull/3859)) - Remove thread lock by loading RuntimeContext explicitly. ([#3763](https://github.com/open-telemetry/opentelemetry-python/pull/3763)) - Update proto version to v1.2.0 ([#3844](https://github.com/open-telemetry/opentelemetry-python/pull/3844)) - Add to_json method to ExponentialHistogram ([#3780](https://github.com/open-telemetry/opentelemetry-python/pull/3780)) - Bump mypy to 1.9.0 ([#3795](https://github.com/open-telemetry/opentelemetry-python/pull/3795)) - Fix exponential histograms ([#3798](https://github.com/open-telemetry/opentelemetry-python/pull/3798)) - Fix otlp exporter to export log_record.observed_timestamp ([#3785](https://github.com/open-telemetry/opentelemetry-python/pull/3785)) - Add capture the fully qualified type name for raised exceptions in spans ([#3837](https://github.com/open-telemetry/opentelemetry-python/pull/3837)) - Prometheus exporter sort label keys to prevent duplicate metrics when user input changes order ([#3698](https://github.com/open-telemetry/opentelemetry-python/pull/3698)) - Update semantic conventions to version 1.25.0. Refactor semantic-convention structure: - `SpanAttributes`, `ResourceAttributes`, and `MetricInstruments` are deprecated. - Attribute and metric definitions are now grouped by the namespace. - Stable attributes and metrics are moved to `opentelemetry.semconv.attributes` and `opentelemetry.semconv.metrics` modules. - Stable and experimental attributes and metrics are defined under `opentelemetry.semconv._incubating` import path. ([#3586](https://github.com/open-telemetry/opentelemetry-python/pull/3586)) - Rename test objects to avoid pytest warnings ([#3823] (https://github.com/open-telemetry/opentelemetry-python/pull/3823)) - Add span flags to OTLP spans and links ([#3881](https://github.com/open-telemetry/opentelemetry-python/pull/3881)) - Record links with invalid SpanContext if either attributes or TraceState are not empty ([#3917](https://github.com/open-telemetry/opentelemetry-python/pull/3917/)) - Add OpenTelemetry trove classifiers to PyPI packages ([#3913] (https://github.com/open-telemetry/opentelemetry-python/pull/3913)) - Fix prometheus metric name and unit conversion ([#3924](https://github.com/open-telemetry/opentelemetry-python/pull/3924)) - this is a breaking change to prometheus metric names so they comply with the [specification](https://github.com/open-telemetry/opentelemetry-specification/blob/v1.33.0/specification/compatibility/prometheus_and_openmetrics.md#otlp-metric-points-to-prometheus). - you can temporarily opt-out of the unit normalization by setting the environment variable `OTEL_PYTHON_EXPERIMENTAL_DISABLE_PROMETHEUS_UNIT_NORMALIZATION=true` - common unit abbreviations are converted to Prometheus conventions (`s` -> `seconds`), following the [collector's implementation](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/c0b51136575aa7ba89326d18edb4549e7e1bbdb9/pkg/translator/prometheus/normalize_name.go#L108) - repeated `_` are replaced with a single `_` - unit annotations (enclosed in curly braces like `{requests}`) are stripped away - units with slash are converted e.g. `m/s` -> `meters_per_second`. - The exporter's API is not changed - Add parameters for Distros and configurators to configure autoinstrumentation in addition to existing environment variables. ([#3864](https://github.com/open-telemetry/opentelemetry-python/pull/3864)) ## Version 1.24.0/0.45b0 (2024-03-28) - Make create_gauge non-abstract method ([#3817](https://github.com/open-telemetry/opentelemetry-python/pull/3817)) - Make `tracer.start_as_current_span()` decorator work with async functions ([#3633](https://github.com/open-telemetry/opentelemetry-python/pull/3633)) - Fix python 3.12 deprecation warning ([#3751](https://github.com/open-telemetry/opentelemetry-python/pull/3751)) - bump mypy to 0.982 ([#3776](https://github.com/open-telemetry/opentelemetry-python/pull/3776)) - Add support for OTEL_SDK_DISABLED environment variable ([#3648](https://github.com/open-telemetry/opentelemetry-python/pull/3648)) - Fix ValueError message for PeriodicExportingMetricsReader ([#3769](https://github.com/open-telemetry/opentelemetry-python/pull/3769)) - Use `BaseException` instead of `Exception` in `record_exception` ([#3354](https://github.com/open-telemetry/opentelemetry-python/pull/3354)) - Make span.record_exception more robust ([#3778](https://github.com/open-telemetry/opentelemetry-python/pull/3778)) - Fix license field in pyproject.toml files ([#3803](https://github.com/open-telemetry/opentelemetry-python/pull/3803)) ## Version 1.23.0/0.44b0 (2024-02-23) - Use Attribute rather than boundattribute in logrecord ([#3567](https://github.com/open-telemetry/opentelemetry-python/pull/3567)) - Fix flush error when no LoggerProvider configured for LoggingHandler ([#3608](https://github.com/open-telemetry/opentelemetry-python/pull/3608)) - Add `Span.add_link()` method to add link after span start ([#3618](https://github.com/open-telemetry/opentelemetry-python/pull/3618)) - Fix `OTLPMetricExporter` ignores `preferred_aggregation` property ([#3603](https://github.com/open-telemetry/opentelemetry-python/pull/3603)) - Logs: set `observed_timestamp` field ([#3565](https://github.com/open-telemetry/opentelemetry-python/pull/3565)) - Add missing Resource SchemaURL in OTLP exporters ([#3652](https://github.com/open-telemetry/opentelemetry-python/pull/3652)) - Fix loglevel warning text ([#3566](https://github.com/open-telemetry/opentelemetry-python/pull/3566)) - Prometheus Exporter string representation for target_info labels ([#3659](https://github.com/open-telemetry/opentelemetry-python/pull/3659)) - Logs: ObservedTimestamp field is missing in console exporter output ([#3564](https://github.com/open-telemetry/opentelemetry-python/pull/3564)) - Fix explicit bucket histogram aggregation ([#3429](https://github.com/open-telemetry/opentelemetry-python/pull/3429)) - Add `code.lineno`, `code.function` and `code.filepath` to all logs ([#3675](https://github.com/open-telemetry/opentelemetry-python/pull/3675)) - Add Synchronous Gauge instrument ([#3462](https://github.com/open-telemetry/opentelemetry-python/pull/3462)) - Drop support for 3.7 ([#3668](https://github.com/open-telemetry/opentelemetry-python/pull/3668)) - Include key in attribute sequence warning ([#3639](https://github.com/open-telemetry/opentelemetry-python/pull/3639)) - Upgrade markupsafe, Flask and related dependencies to dev and test environments ([#3609](https://github.com/open-telemetry/opentelemetry-python/pull/3609)) - Handle HTTP 2XX responses as successful in OTLP exporters ([#3623](https://github.com/open-telemetry/opentelemetry-python/pull/3623)) - Improve Resource Detector timeout messaging ([#3645](https://github.com/open-telemetry/opentelemetry-python/pull/3645)) - Add Proxy classes for logging ([#3575](https://github.com/open-telemetry/opentelemetry-python/pull/3575)) - Remove dependency on 'backoff' library ([#3679](https://github.com/open-telemetry/opentelemetry-python/pull/3679)) ## Version 1.22.0/0.43b0 (2023-12-15) - Prometheus exporter sanitize info metric ([#3572](https://github.com/open-telemetry/opentelemetry-python/pull/3572)) - Remove Jaeger exporters ([#3554](https://github.com/open-telemetry/opentelemetry-python/pull/3554)) - Log stacktrace on `UNKNOWN` status OTLP export error ([#3536](https://github.com/open-telemetry/opentelemetry-python/pull/3536)) - Fix OTLPExporterMixin shutdown timeout period ([#3524](https://github.com/open-telemetry/opentelemetry-python/pull/3524)) - Handle `taskName` `logrecord` attribute ([#3557](https://github.com/open-telemetry/opentelemetry-python/pull/3557)) ## Version 1.21.0/0.42b0 (2023-11-01) - Fix `SumAggregation`  ([#3390](https://github.com/open-telemetry/opentelemetry-python/pull/3390)) - Fix handling of empty metric collection cycles ([#3335](https://github.com/open-telemetry/opentelemetry-python/pull/3335)) - Fix error when no LoggerProvider configured for LoggingHandler ([#3423](https://github.com/open-telemetry/opentelemetry-python/pull/3423)) - Make `opentelemetry_metrics_exporter` entrypoint support pull exporters ([#3428](https://github.com/open-telemetry/opentelemetry-python/pull/3428)) - Allow instrument names to have '/' and up to 255 characters ([#3442](https://github.com/open-telemetry/opentelemetry-python/pull/3442)) - Do not load Resource on sdk import ([#3447](https://github.com/open-telemetry/opentelemetry-python/pull/3447)) - Update semantic conventions to version 1.21.0 ([#3251](https://github.com/open-telemetry/opentelemetry-python/pull/3251)) - Add missing schema_url in global api for logging and metrics ([#3251](https://github.com/open-telemetry/opentelemetry-python/pull/3251)) - Prometheus exporter support for auto instrumentation ([#3413](https://github.com/open-telemetry/opentelemetry-python/pull/3413)) - Implement Process Resource detector ([#3472](https://github.com/open-telemetry/opentelemetry-python/pull/3472)) ## Version 1.20.0/0.41b0 (2023-09-04) - Modify Prometheus exporter to translate non-monotonic Sums into Gauges ([#3306](https://github.com/open-telemetry/opentelemetry-python/pull/3306)) ## Version 1.19.0/0.40b0 (2023-07-13) - Drop `setuptools` runtime requirement. ([#3372](https://github.com/open-telemetry/opentelemetry-python/pull/3372)) - Update the body type in the log ([$3343](https://github.com/open-telemetry/opentelemetry-python/pull/3343)) - Add max_scale option to Exponential Bucket Histogram Aggregation ([#3323](https://github.com/open-telemetry/opentelemetry-python/pull/3323)) - Use BoundedAttributes instead of raw dict to extract attributes from LogRecord ([#3310](https://github.com/open-telemetry/opentelemetry-python/pull/3310)) - Support dropped_attributes_count in LogRecord and exporters ([#3351](https://github.com/open-telemetry/opentelemetry-python/pull/3351)) - Add unit to view instrument selection criteria ([#3341](https://github.com/open-telemetry/opentelemetry-python/pull/3341)) - Upgrade opentelemetry-proto to 0.20 and regen [#3355](https://github.com/open-telemetry/opentelemetry-python/pull/3355)) - Include endpoint in Grpc transient error warning [#3362](https://github.com/open-telemetry/opentelemetry-python/pull/3362)) - Fixed bug where logging export is tracked as trace [#3375](https://github.com/open-telemetry/opentelemetry-python/pull/3375)) - Default LogRecord observed_timestamp to current timestamp [#3377](https://github.com/open-telemetry/opentelemetry-python/pull/3377)) ## Version 1.18.0/0.39b0 (2023-05-19) - Select histogram aggregation with an environment variable ([#3265](https://github.com/open-telemetry/opentelemetry-python/pull/3265)) - Move Protobuf encoding to its own package ([#3169](https://github.com/open-telemetry/opentelemetry-python/pull/3169)) - Add experimental feature to detect resource detectors in auto instrumentation ([#3181](https://github.com/open-telemetry/opentelemetry-python/pull/3181)) - Fix exporting of ExponentialBucketHistogramAggregation from opentelemetry.sdk.metrics.view ([#3240](https://github.com/open-telemetry/opentelemetry-python/pull/3240)) - Fix headers types mismatch for OTLP Exporters ([#3226](https://github.com/open-telemetry/opentelemetry-python/pull/3226)) - Fix suppress instrumentation for log batch processor ([#3223](https://github.com/open-telemetry/opentelemetry-python/pull/3223)) - Add speced out environment variables and arguments for BatchLogRecordProcessor ([#3237](https://github.com/open-telemetry/opentelemetry-python/pull/3237)) - Add benchmark tests for metrics ([#3267](https://github.com/open-telemetry/opentelemetry-python/pull/3267)) ## Version 1.17.0/0.38b0 (2023-03-22) - Implement LowMemory temporality ([#3223](https://github.com/open-telemetry/opentelemetry-python/pull/3223)) - PeriodicExportingMetricReader will continue if collection times out ([#3100](https://github.com/open-telemetry/opentelemetry-python/pull/3100)) - Fix formatting of ConsoleMetricExporter. ([#3197](https://github.com/open-telemetry/opentelemetry-python/pull/3197)) - Fix use of built-in samplers in SDK configuration ([#3176](https://github.com/open-telemetry/opentelemetry-python/pull/3176)) - Implement shutdown procedure forOTLP grpc exporters ([#3138](https://github.com/open-telemetry/opentelemetry-python/pull/3138)) - Add exponential histogram ([#2964](https://github.com/open-telemetry/opentelemetry-python/pull/2964)) - Add OpenCensus trace bridge/shim ([#3210](https://github.com/open-telemetry/opentelemetry-python/pull/3210)) ## Version 1.16.0/0.37b0 (2023-02-17) - Change ``__all__`` to be statically defined. ([#3143](https://github.com/open-telemetry/opentelemetry-python/pull/3143)) - Remove the ability to set a global metric prefix for Prometheus exporter ([#3137](https://github.com/open-telemetry/opentelemetry-python/pull/3137)) - Adds environment variables for log exporter ([#3037](https://github.com/open-telemetry/opentelemetry-python/pull/3037)) - Add attribute name to type warning message. ([3124](https://github.com/open-telemetry/opentelemetry-python/pull/3124)) - Add db metric name to semantic conventions ([#3115](https://github.com/open-telemetry/opentelemetry-python/pull/3115)) - Fix User-Agent header value for OTLP exporters to conform to RFC7231 & RFC7230 ([#3128](https://github.com/open-telemetry/opentelemetry-python/pull/3128)) - Fix validation of baggage values ([#3058](https://github.com/open-telemetry/opentelemetry-python/pull/3058)) - Fix capitalization of baggage keys ([#3151](https://github.com/open-telemetry/opentelemetry-python/pull/3151)) - Bump min required api version for OTLP exporters ([#3156](https://github.com/open-telemetry/opentelemetry-python/pull/3156)) - deprecate jaeger exporters ([#3158](https://github.com/open-telemetry/opentelemetry-python/pull/3158)) - Create a single resource instance ([#3118](https://github.com/open-telemetry/opentelemetry-python/pull/3118)) ## Version 1.15.0/0.36b0 (2022-12-09) - PeriodicExportingMetricsReader with +Inf interval to support explicit metric collection ([#3059](https://github.com/open-telemetry/opentelemetry-python/pull/3059)) - Regenerate opentelemetry-proto to be compatible with protobuf 3 and 4 ([#3070](https://github.com/open-telemetry/opentelemetry-python/pull/3070)) - Rename parse_headers to parse_env_headers and improve error message ([#2376](https://github.com/open-telemetry/opentelemetry-python/pull/2376)) - Add url decode values from OTEL_RESOURCE_ATTRIBUTES ([#3046](https://github.com/open-telemetry/opentelemetry-python/pull/3046)) - Fixed circular dependency issue with custom samplers ([#3026](https://github.com/open-telemetry/opentelemetry-python/pull/3026)) - Add missing entry points for OTLP/HTTP exporter ([#3027](https://github.com/open-telemetry/opentelemetry-python/pull/3027)) - Update logging to include logging api as per specification ([#3038](https://github.com/open-telemetry/opentelemetry-python/pull/3038)) - Fix: Avoid generator in metrics _ViewInstrumentMatch.collect() ([#3035](https://github.com/open-telemetry/opentelemetry-python/pull/3035) - [exporter-otlp-proto-grpc] add user agent string ([#3009](https://github.com/open-telemetry/opentelemetry-python/pull/3009)) ## Version 1.14.0/0.35b0 (2022-11-04) - Add logarithm and exponent mappings ([#2960](https://github.com/open-telemetry/opentelemetry-python/pull/2960)) - Add and use missing metrics environment variables ([#2968](https://github.com/open-telemetry/opentelemetry-python/pull/2968)) - Enabled custom samplers via entry points ([#2972](https://github.com/open-telemetry/opentelemetry-python/pull/2972)) - Update log symbol names ([#2943](https://github.com/open-telemetry/opentelemetry-python/pull/2943)) - Update explicit histogram bucket boundaries ([#2947](https://github.com/open-telemetry/opentelemetry-python/pull/2947)) - `exporter-otlp-proto-http`: add user agent string ([#2959](https://github.com/open-telemetry/opentelemetry-python/pull/2959)) - Add http-metric instrument names to semantic conventions ([#2976](https://github.com/open-telemetry/opentelemetry-python/pull/2976)) - [exporter/opentelemetry-exporter-otlp-proto-http] Add OTLPMetricExporter ([#2891](https://github.com/open-telemetry/opentelemetry-python/pull/2891)) - Add support for py3.11 ([#2997](https://github.com/open-telemetry/opentelemetry-python/pull/2997)) - Fix a bug with exporter retries for with newer versions of the backoff library ([#2980](https://github.com/open-telemetry/opentelemetry-python/pull/2980)) ## Version 1.13.0/0.34b0 (2022-09-26) - Add a configurable max_export_batch_size to the gRPC metrics exporter ([#2809](https://github.com/open-telemetry/opentelemetry-python/pull/2809)) - Remove support for 3.6 ([#2763](https://github.com/open-telemetry/opentelemetry-python/pull/2763)) - Update PeriodicExportingMetricReader to never call export() concurrently ([#2873](https://github.com/open-telemetry/opentelemetry-python/pull/2873)) - Add param for `indent` size to `LogRecord.to_json()` ([#2870](https://github.com/open-telemetry/opentelemetry-python/pull/2870)) - Fix: Remove `LogEmitter.flush()` to align with OTel Log spec ([#2863](https://github.com/open-telemetry/opentelemetry-python/pull/2863)) - Bump minimum required API/SDK version for exporters that support metrics ([#2918](https://github.com/open-telemetry/opentelemetry-python/pull/2918)) - Fix metric reader examples + added `preferred_temporality` and `preferred_aggregation` for `ConsoleMetricExporter` ([#2911](https://github.com/open-telemetry/opentelemetry-python/pull/2911)) - Add support for setting OTLP export protocol with env vars, as defined in the [specifications](https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/protocol/exporter.md#specify-protocol) ([#2893](https://github.com/open-telemetry/opentelemetry-python/pull/2893)) - Add force_flush to span exporters ([#2919](https://github.com/open-telemetry/opentelemetry-python/pull/2919)) ## Version 1.12.0/0.33b0 (2022-08-08) - Add `force_flush` method to metrics exporter ([#2852](https://github.com/open-telemetry/opentelemetry-python/pull/2852)) - Change tracing to use `Resource.to_json()` ([#2784](https://github.com/open-telemetry/opentelemetry-python/pull/2784)) - Fix get_log_emitter instrumenting_module_version args typo ([#2830](https://github.com/open-telemetry/opentelemetry-python/pull/2830)) - Fix OTLP gRPC exporter warning message ([#2781](https://github.com/open-telemetry/opentelemetry-python/pull/2781)) - Fix tracing decorator with late configuration ([#2754](https://github.com/open-telemetry/opentelemetry-python/pull/2754)) - Fix --insecure of CLI argument ([#2696](https://github.com/open-telemetry/opentelemetry-python/pull/2696)) - Add temporality and aggregation configuration for metrics exporters, use `OTEL_EXPORTER_OTLP_METRICS_TEMPORALITY_PREFERENCE` only for OTLP metrics exporter ([#2843](https://github.com/open-telemetry/opentelemetry-python/pull/2843)) - Instrument instances are always created through a Meter ([#2844](https://github.com/open-telemetry/opentelemetry-python/pull/2844)) ## Version 1.12.0rc2/0.32b0 (2022-07-04) - Fix instrument name and unit regexes ([#2796](https://github.com/open-telemetry/opentelemetry-python/pull/2796)) - Add optional sessions parameter to all Exporters leveraging requests.Session ([#2783](https://github.com/open-telemetry/opentelemetry-python/pull/2783) - Add min/max fields to Histogram ([#2759](https://github.com/open-telemetry/opentelemetry-python/pull/2759)) - `opentelemetry-exporter-otlp-proto-http` Add support for OTLP/HTTP log exporter ([#2462](https://github.com/open-telemetry/opentelemetry-python/pull/2462)) - Fix yield of `None`-valued points ([#2745](https://github.com/open-telemetry/opentelemetry-python/pull/2745)) - Add missing `to_json` methods ([#2722](https://github.com/open-telemetry/opentelemetry-python/pull/2722) - Fix type hints for textmap `Getter` and `Setter` ([#2657](https://github.com/open-telemetry/opentelemetry-python/pull/2657)) - Fix LogEmitterProvider.force_flush hanging randomly ([#2714](https://github.com/open-telemetry/opentelemetry-python/pull/2714)) - narrow protobuf dependencies to exclude protobuf >= 4 ([#2720](https://github.com/open-telemetry/opentelemetry-python/pull/2720)) - Specify worker thread names ([#2724](https://github.com/open-telemetry/opentelemetry-python/pull/2724)) - Loosen dependency on `backoff` for newer Python versions ([#2726](https://github.com/open-telemetry/opentelemetry-python/pull/2726)) - fix: frozenset object has no attribute items ([#2727](https://github.com/open-telemetry/opentelemetry-python/pull/2727)) - fix: create suppress HTTP instrumentation key in opentelemetry context ([#2729](https://github.com/open-telemetry/opentelemetry-python/pull/2729)) - Support logs SDK auto instrumentation enable/disable with env ([#2728](https://github.com/open-telemetry/opentelemetry-python/pull/2728)) - fix: update entry point object references for metrics ([#2731](https://github.com/open-telemetry/opentelemetry-python/pull/2731)) - Allow set_status to accept the StatusCode and optional description ([#2735](https://github.com/open-telemetry/opentelemetry-python/pull/2735)) - Configure auto instrumentation to support metrics ([#2705](https://github.com/open-telemetry/opentelemetry-python/pull/2705)) - Add entrypoint for metrics exporter ([#2748](https://github.com/open-telemetry/opentelemetry-python/pull/2748)) - Fix Jaeger propagator usage with NonRecordingSpan ([#2762](https://github.com/open-telemetry/opentelemetry-python/pull/2762)) - Add `opentelemetry.propagate` module and `opentelemetry.propagators` package to the API reference documentation ([#2785](https://github.com/open-telemetry/opentelemetry-python/pull/2785)) ## Version 1.12.0rc1/0.31b0 (2022-05-17) - Fix LoggingHandler to handle LogRecord with exc_info=False ([#2690](https://github.com/open-telemetry/opentelemetry-python/pull/2690)) - Make metrics components public ([#2684](https://github.com/open-telemetry/opentelemetry-python/pull/2684)) - Update to semantic conventions v1.11.0 ([#2669](https://github.com/open-telemetry/opentelemetry-python/pull/2669)) - Update opentelemetry-proto to v0.17.0 ([#2668](https://github.com/open-telemetry/opentelemetry-python/pull/2668)) - Add CallbackOptions to observable instrument callback params ([#2664](https://github.com/open-telemetry/opentelemetry-python/pull/2664)) - Add timeouts to metric SDK ([#2653](https://github.com/open-telemetry/opentelemetry-python/pull/2653)) - Add variadic arguments to metric exporter/reader interfaces ([#2654](https://github.com/open-telemetry/opentelemetry-python/pull/2654)) - Added a `opentelemetry.sdk.resources.ProcessResourceDetector` that adds the 'process.runtime.{name,version,description}' resource attributes when used with the `opentelemetry.sdk.resources.get_aggregated_resources` API ([#2660](https://github.com/open-telemetry/opentelemetry-python/pull/2660)) - Move Metrics API behind internal package ([#2651](https://github.com/open-telemetry/opentelemetry-python/pull/2651)) ## Version 1.11.1/0.30b1 (2022-04-21) - Add parameter to MetricReader constructor to select aggregation per instrument kind ([#2638](https://github.com/open-telemetry/opentelemetry-python/pull/2638)) - Add parameter to MetricReader constructor to select temporality per instrument kind ([#2637](https://github.com/open-telemetry/opentelemetry-python/pull/2637)) - Fix unhandled callback exceptions on async instruments ([#2614](https://github.com/open-telemetry/opentelemetry-python/pull/2614)) - Rename `DefaultCounter`, `DefaultHistogram`, `DefaultObservableCounter`, `DefaultObservableGauge`, `DefaultObservableUpDownCounter`, `DefaultUpDownCounter` instruments to `NoOpCounter`, `NoOpHistogram`, `NoOpObservableCounter`, `NoOpObservableGauge`, `NoOpObservableUpDownCounter`, `NoOpUpDownCounter` ([#2616](https://github.com/open-telemetry/opentelemetry-python/pull/2616)) - Deprecate InstrumentationLibraryInfo and Add InstrumentationScope ([#2583](https://github.com/open-telemetry/opentelemetry-python/pull/2583)) ## Version 1.11.0/0.30b0 (2022-04-18) - Rename API Measurement for async instruments to Observation ([#2617](https://github.com/open-telemetry/opentelemetry-python/pull/2617)) - Add support for zero or more callbacks ([#2602](https://github.com/open-telemetry/opentelemetry-python/pull/2602)) - Fix parsing of trace flags when extracting traceparent ([#2577](https://github.com/open-telemetry/opentelemetry-python/pull/2577)) - Add default aggregation ([#2543](https://github.com/open-telemetry/opentelemetry-python/pull/2543)) - Fix incorrect installation of some exporter “convenience” packages into “site-packages/src” ([#2525](https://github.com/open-telemetry/opentelemetry-python/pull/2525)) - Capture exception information as part of log attributes ([#2531](https://github.com/open-telemetry/opentelemetry-python/pull/2531)) - Change OTLPHandler to LoggingHandler ([#2528](https://github.com/open-telemetry/opentelemetry-python/pull/2528)) - Fix delta histogram sum not being reset on collection ([#2533](https://github.com/open-telemetry/opentelemetry-python/pull/2533)) - Add InMemoryMetricReader to metrics SDK ([#2540](https://github.com/open-telemetry/opentelemetry-python/pull/2540)) - Drop the usage of name field from log model in OTLP ([#2565](https://github.com/open-telemetry/opentelemetry-python/pull/2565)) - Update opentelemetry-proto to v0.15.0 ([#2566](https://github.com/open-telemetry/opentelemetry-python/pull/2566)) - Remove `enable_default_view` option from sdk MeterProvider ([#2547](https://github.com/open-telemetry/opentelemetry-python/pull/2547)) - Update otlp-proto-grpc and otlp-proto-http exporters to have more lax requirements for `backoff` lib ([#2575](https://github.com/open-telemetry/opentelemetry-python/pull/2575)) - Add min/max to histogram point ([#2581](https://github.com/open-telemetry/opentelemetry-python/pull/2581)) - Update opentelemetry-proto to v0.16.0 ([#2619](https://github.com/open-telemetry/opentelemetry-python/pull/2619)) ## Version 1.10.0/0.29b0 (2022-03-10) - Docs rework: [non-API docs are moving](https://github.com/open-telemetry/opentelemetry-python/issues/2172) to [opentelemetry.io](https://opentelemetry.io). For details, including a list of pages that have moved, see [#2453](https://github.com/open-telemetry/opentelemetry-python/pull/2453), and [#2498](https://github.com/open-telemetry/opentelemetry-python/pull/2498). - `opentelemetry-exporter-otlp-proto-grpc` update SDK dependency to ~1.9. ([#2442](https://github.com/open-telemetry/opentelemetry-python/pull/2442)) - bugfix(auto-instrumentation): attach OTLPHandler to root logger ([#2450](https://github.com/open-telemetry/opentelemetry-python/pull/2450)) - Bump semantic conventions from 1.6.1 to 1.8.0 ([#2461](https://github.com/open-telemetry/opentelemetry-python/pull/2461)) - fix exception handling in get_aggregated_resources ([#2464](https://github.com/open-telemetry/opentelemetry-python/pull/2464)) - Fix `OTEL_EXPORTER_OTLP_ENDPOINT` usage in OTLP HTTP trace exporter ([#2493](https://github.com/open-telemetry/opentelemetry-python/pull/2493)) - [exporter/opentelemetry-exporter-prometheus] restore package using the new metrics API ([#2321](https://github.com/open-telemetry/opentelemetry-python/pull/2321)) ## Version 1.9.1/0.28b1 (2022-01-29) - Update opentelemetry-proto to v0.12.0. Note that this update removes deprecated status codes. ([#2415](https://github.com/open-telemetry/opentelemetry-python/pull/2415)) ## Version 1.9.0/0.28b0 (2022-01-26) - Fix SpanLimits global span limit defaulting when set to 0 ([#2398](https://github.com/open-telemetry/opentelemetry-python/pull/2398)) - Add Python version support policy ([#2397](https://github.com/open-telemetry/opentelemetry-python/pull/2397)) - Decode URL-encoded headers in environment variables ([#2312](https://github.com/open-telemetry/opentelemetry-python/pull/2312)) - [exporter/opentelemetry-exporter-otlp-proto-grpc] Add OTLPMetricExporter ([#2323](https://github.com/open-telemetry/opentelemetry-python/pull/2323)) - Complete metric exporter format and update OTLP exporter ([#2364](https://github.com/open-telemetry/opentelemetry-python/pull/2364)) - [api] Add `NoOpTracer` and `NoOpTracerProvider`. Marking `_DefaultTracer` and `_DefaultTracerProvider` as deprecated. ([#2363](https://github.com/open-telemetry/opentelemetry-python/pull/2363)) - [exporter/opentelemetry-exporter-otlp-proto-grpc] Add Sum to OTLPMetricExporter ([#2370](https://github.com/open-telemetry/opentelemetry-python/pull/2370)) - [api] Rename `_DefaultMeter` and `_DefaultMeterProvider` to `NoOpMeter` and `NoOpMeterProvider`. ([#2383](https://github.com/open-telemetry/opentelemetry-python/pull/2383)) - [exporter/opentelemetry-exporter-otlp-proto-grpc] Add Gauge to OTLPMetricExporter ([#2408](https://github.com/open-telemetry/opentelemetry-python/pull/2408)) - [logs] prevent None from causing problems ([#2410](https://github.com/open-telemetry/opentelemetry-python/pull/2410)) ## Version 1.8.0/0.27b0 (2021-12-17) - Adds Aggregation and instruments as part of Metrics SDK ([#2234](https://github.com/open-telemetry/opentelemetry-python/pull/2234)) - Update visibility of OTEL_METRICS_EXPORTER environment variable ([#2303](https://github.com/open-telemetry/opentelemetry-python/pull/2303)) - Adding entrypoints for log emitter provider and console, otlp log exporters ([#2253](https://github.com/open-telemetry/opentelemetry-python/pull/2253)) - Rename ConsoleExporter to ConsoleLogExporter ([#2307](https://github.com/open-telemetry/opentelemetry-python/pull/2307)) - Adding OTEL_LOGS_EXPORTER environment variable ([#2320](https://github.com/open-telemetry/opentelemetry-python/pull/2320)) - Add `setuptools` to `install_requires` ([#2334](https://github.com/open-telemetry/opentelemetry-python/pull/2334)) - Add otlp entrypoint for log exporter ([#2322](https://github.com/open-telemetry/opentelemetry-python/pull/2322)) - Support insecure configuration for OTLP gRPC exporter ([#2350](https://github.com/open-telemetry/opentelemetry-python/pull/2350)) ## Version 1.7.1/0.26b1 (2021-11-11) - Add support for Python 3.10 ([#2207](https://github.com/open-telemetry/opentelemetry-python/pull/2207)) - remove `X-B3-ParentSpanId` for B3 propagator as per OpenTelemetry specification ([#2237](https://github.com/open-telemetry/opentelemetry-python/pull/2237)) - Populate `auto.version` in Resource if using auto-instrumentation ([#2243](https://github.com/open-telemetry/opentelemetry-python/pull/2243)) - Return proxy instruments from ProxyMeter ([#2169](https://github.com/open-telemetry/opentelemetry-python/pull/2169)) - Make Measurement a concrete class ([#2153](https://github.com/open-telemetry/opentelemetry-python/pull/2153)) - Add metrics API ([#1887](https://github.com/open-telemetry/opentelemetry-python/pull/1887)) - Make batch processor fork aware and reinit when needed ([#2242](https://github.com/open-telemetry/opentelemetry-python/pull/2242)) - `opentelemetry-sdk` Sanitize env var resource attribute pairs ([#2256](https://github.com/open-telemetry/opentelemetry-python/pull/2256)) - `opentelemetry-test` start releasing to pypi.org ([#2269](https://github.com/open-telemetry/opentelemetry-python/pull/2269)) ## Version 1.6.2/0.25b2 (2021-10-19) - Fix parental trace relationship for opentracing `follows_from` reference ([#2180](https://github.com/open-telemetry/opentelemetry-python/pull/2180)) ## Version 1.6.1/0.25b1 (2021-10-18) - Fix ReadableSpan property types attempting to create a mapping from a list ([#2215](https://github.com/open-telemetry/opentelemetry-python/pull/2215)) - Upgrade GRPC/protobuf related dependency and regenerate otlp protobufs ([#2201](https://github.com/open-telemetry/opentelemetry-python/pull/2201)) - Propagation: only warn about oversized baggage headers when headers exist ([#2212](https://github.com/open-telemetry/opentelemetry-python/pull/2212)) ## Version 1.6.0/0.25b0 (2021-10-13) - Fix race in `set_tracer_provider()` ([#2182](https://github.com/open-telemetry/opentelemetry-python/pull/2182)) - Automatically load OTEL environment variables as options for `opentelemetry-instrument` ([#1969](https://github.com/open-telemetry/opentelemetry-python/pull/1969)) - `opentelemetry-semantic-conventions` Update to semantic conventions v1.6.1 ([#2077](https://github.com/open-telemetry/opentelemetry-python/pull/2077)) - Do not count invalid attributes for dropped ([#2096](https://github.com/open-telemetry/opentelemetry-python/pull/2096)) - Fix propagation bug caused by counting skipped entries ([#2071](https://github.com/open-telemetry/opentelemetry-python/pull/2071)) - Add entry point for exporters with default protocol ([#2093](https://github.com/open-telemetry/opentelemetry-python/pull/2093)) - Renamed entrypoints `otlp_proto_http_span`, `otlp_proto_grpc_span`, `console_span` to remove redundant `_span` suffix. ([#2093](https://github.com/open-telemetry/opentelemetry-python/pull/2093)) - Do not skip sequence attribute on decode error ([#2097](https://github.com/open-telemetry/opentelemetry-python/pull/2097)) - `opentelemetry-test`: Add `HttpTestBase` to allow tests with actual TCP sockets ([#2101](https://github.com/open-telemetry/opentelemetry-python/pull/2101)) - Fix incorrect headers parsing via environment variables ([#2103](https://github.com/open-telemetry/opentelemetry-python/pull/2103)) - Add support for OTEL_ATTRIBUTE_COUNT_LIMIT ([#2139](https://github.com/open-telemetry/opentelemetry-python/pull/2139)) - Attribute limits no longer apply to Resource attributes ([#2138](https://github.com/open-telemetry/opentelemetry-python/pull/2138)) - `opentelemetry-exporter-otlp`: Add `opentelemetry-otlp-proto-http` as dependency ([#2147](https://github.com/open-telemetry/opentelemetry-python/pull/2147)) - Fix validity calculation for trace and span IDs ([#2145](https://github.com/open-telemetry/opentelemetry-python/pull/2145)) - Add `schema_url` to `TracerProvider.get_tracer` ([#2154](https://github.com/open-telemetry/opentelemetry-python/pull/2154)) - Make baggage implementation w3c spec complaint ([#2167](https://github.com/open-telemetry/opentelemetry-python/pull/2167)) - Add name to `BatchSpanProcessor` worker thread ([#2186](https://github.com/open-telemetry/opentelemetry-python/pull/2186)) ## Version 1.5.0/0.24b0 (2021-08-26) - Add pre and post instrumentation entry points ([#1983](https://github.com/open-telemetry/opentelemetry-python/pull/1983)) - Fix documentation on well known exporters and variable OTEL_TRACES_EXPORTER which were misnamed ([#2023](https://github.com/open-telemetry/opentelemetry-python/pull/2023)) - `opentelemetry-sdk` `get_aggregated_resource()` returns default resource and service name whenever called ([#2013](https://github.com/open-telemetry/opentelemetry-python/pull/2013)) - `opentelemetry-distro` & `opentelemetry-sdk` Moved Auto Instrumentation Configurator code to SDK to let distros use its default implementation ([#1937](https://github.com/open-telemetry/opentelemetry-python/pull/1937)) - Add Trace ID validation to meet [TraceID spec](https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/overview.md#spancontext) ([#1992](https://github.com/open-telemetry/opentelemetry-python/pull/1992)) - Fixed Python 3.10 incompatibility in `opentelemetry-opentracing-shim` tests ([#2018](https://github.com/open-telemetry/opentelemetry-python/pull/2018)) - `opentelemetry-sdk` added support for `OTEL_SPAN_ATTRIBUTE_VALUE_LENGTH_LIMIT` ([#2044](https://github.com/open-telemetry/opentelemetry-python/pull/2044)) - `opentelemetry-sdk` Fixed bugs (#2041, #2042 & #2045) in Span Limits ([#2044](https://github.com/open-telemetry/opentelemetry-python/pull/2044)) - `opentelemetry-sdk` Add support for `OTEL_ATTRIBUTE_VALUE_LENGTH_LIMIT` env var ([#2056](https://github.com/open-telemetry/opentelemetry-python/pull/2056)) - `opentelemetry-sdk` Treat limit even vars set to empty values as unset/unlimited. ([#2054](https://github.com/open-telemetry/opentelemetry-python/pull/2054)) - `opentelemetry-api` Attribute keys must be non-empty strings. ([#2057](https://github.com/open-telemetry/opentelemetry-python/pull/2057)) ## Version 0.23.1 (2021-07-26) ### Changed - Fix opentelemetry-bootstrap dependency script. ([#1987](https://github.com/open-telemetry/opentelemetry-python/pull/1987)) ## Version 1.4.0/0.23b0 (2021-07-21) ### Added - Moved `opentelemetry-instrumentation` to core repository. ([#1959](https://github.com/open-telemetry/opentelemetry-python/pull/1959)) - Add support for OTLP Exporter Protobuf over HTTP ([#1868](https://github.com/open-telemetry/opentelemetry-python/pull/1868)) - Dropped attributes/events/links count available exposed on ReadableSpans. ([#1893](https://github.com/open-telemetry/opentelemetry-python/pull/1893)) - Added dropped count to otlp, jaeger and zipkin exporters. ([#1893](https://github.com/open-telemetry/opentelemetry-python/pull/1893)) ### Added - Give OTLPHandler the ability to process attributes ([#1952](https://github.com/open-telemetry/opentelemetry-python/pull/1952)) - Add global LogEmitterProvider and convenience function get_log_emitter ([#1901](https://github.com/open-telemetry/opentelemetry-python/pull/1901)) - Add OTLPHandler for standard library logging module ([#1903](https://github.com/open-telemetry/opentelemetry-python/pull/1903)) ### Changed - Updated `opentelemetry-opencensus-exporter` to use `service_name` of spans instead of resource ([#1897](https://github.com/open-telemetry/opentelemetry-python/pull/1897)) - Added descriptions to the env variables mentioned in the opentelemetry-specification ([#1898](https://github.com/open-telemetry/opentelemetry-python/pull/1898)) - Ignore calls to `Span.set_status` with `StatusCode.UNSET` and also if previous status already had `StatusCode.OK`. ([#1902](https://github.com/open-telemetry/opentelemetry-python/pull/1902)) - Attributes for `Link` and `Resource` are immutable as they are for `Event`, which means any attempt to modify attributes directly will result in a `TypeError` exception. ([#1909](https://github.com/open-telemetry/opentelemetry-python/pull/1909)) - Added `BoundedAttributes` to the API to make it available for `Link` which is defined in the API. Marked `BoundedDict` in the SDK as deprecated as a result. ([#1915](https://github.com/open-telemetry/opentelemetry-python/pull/1915)) - Fix OTLP SpanExporter to distinguish spans based off Resource and InstrumentationInfo ([#1927](https://github.com/open-telemetry/opentelemetry-python/pull/1927)) - Updating dependency for opentelemetry api/sdk packages to support major version instead of pinning to specific versions. ([#1933](https://github.com/open-telemetry/opentelemetry-python/pull/1933)) - `opentelemetry-semantic-conventions` Generate semconv constants update for OTel Spec 1.5.0 ([#1946](https://github.com/open-telemetry/opentelemetry-python/pull/1946)) ### Fixed - Updated `opentelementry-opentracing-shim` `ScopeShim` to report exceptions in opentelemetry specification format, rather than opentracing spec format. ([#1878](https://github.com/open-telemetry/opentelemetry-python/pull/1878)) ## Version 1.3.0/0.22b0 (2021-06-01) ### Added - Allow span limits to be set programmatically via TracerProvider. ([#1877](https://github.com/open-telemetry/opentelemetry-python/pull/1877)) - Added support for CreateKey functionality. ([#1853](https://github.com/open-telemetry/opentelemetry-python/pull/1853)) ### Changed - Updated get_tracer to return an empty string when passed an invalid name ([#1854](https://github.com/open-telemetry/opentelemetry-python/pull/1854)) - Changed AttributeValue sequences to warn mypy users on adding None values to array ([#1855](https://github.com/open-telemetry/opentelemetry-python/pull/1855)) - Fixed exporter OTLP header parsing to match baggage header formatting. ([#1869](https://github.com/open-telemetry/opentelemetry-python/pull/1869)) - Added optional `schema_url` field to `Resource` class ([#1871](https://github.com/open-telemetry/opentelemetry-python/pull/1871)) - Update protos to latest version release 0.9.0 ([#1873](https://github.com/open-telemetry/opentelemetry-python/pull/1873)) ## Version 1.2.0/0.21b0 (2021-05-11) ### Added - Added example for running Django with auto instrumentation. ([#1803](https://github.com/open-telemetry/opentelemetry-python/pull/1803)) - Added `B3SingleFormat` and `B3MultiFormat` propagators to the `opentelemetry-propagator-b3` package. ([#1823](https://github.com/open-telemetry/opentelemetry-python/pull/1823)) - Added support for OTEL_SERVICE_NAME. ([#1829](https://github.com/open-telemetry/opentelemetry-python/pull/1829)) - Lazily read/configure limits and allow limits to be unset. ([#1839](https://github.com/open-telemetry/opentelemetry-python/pull/1839)) - Added support for OTEL_EXPORTER_JAEGER_TIMEOUT ([#1863](https://github.com/open-telemetry/opentelemetry-python/pull/1863)) ### Changed - Fixed OTLP gRPC exporter silently failing if scheme is not specified in endpoint. ([#1806](https://github.com/open-telemetry/opentelemetry-python/pull/1806)) - Rename CompositeHTTPPropagator to CompositePropagator as per specification. ([#1807](https://github.com/open-telemetry/opentelemetry-python/pull/1807)) - Propagators use the root context as default for `extract` and do not modify the context if extracting from carrier does not work. ([#1811](https://github.com/open-telemetry/opentelemetry-python/pull/1811)) - Fixed `b3` propagator entrypoint to point to `B3SingleFormat` propagator. ([#1823](https://github.com/open-telemetry/opentelemetry-python/pull/1823)) - Added `b3multi` propagator entrypoint to point to `B3MultiFormat` propagator. ([#1823](https://github.com/open-telemetry/opentelemetry-python/pull/1823)) - Improve warning when failing to decode byte attribute ([#1810](https://github.com/open-telemetry/opentelemetry-python/pull/1810)) - Fixed inconsistency in parent_id formatting from the ConsoleSpanExporter ([#1833](https://github.com/open-telemetry/opentelemetry-python/pull/1833)) - Include span parent in Jaeger gRPC export as `CHILD_OF` reference ([#1809])(https://github.com/open-telemetry/opentelemetry-python/pull/1809) - Fixed sequence values in OTLP exporter not translating ([#1818](https://github.com/open-telemetry/opentelemetry-python/pull/1818)) - Update transient errors retry timeout and retryable status codes ([#1842](https://github.com/open-telemetry/opentelemetry-python/pull/1842)) - Apply validation of attributes to `Resource`, move attribute related logic to separate package. ([#1834](https://github.com/open-telemetry/opentelemetry-python/pull/1834)) - Fix start span behavior when excess links and attributes are included ([#1856](https://github.com/open-telemetry/opentelemetry-python/pull/1856)) ### Removed - Moved `opentelemetry-instrumentation` to contrib repository. ([#1797](https://github.com/open-telemetry/opentelemetry-python/pull/1797)) ## Version 1.1.0 (2021-04-20) ### Added - Added `py.typed` file to every package. This should resolve a bunch of mypy errors for users. ([#1720](https://github.com/open-telemetry/opentelemetry-python/pull/1720)) - Add auto generated trace and resource attributes semantic conventions ([#1759](https://github.com/open-telemetry/opentelemetry-python/pull/1759)) - Added `SpanKind` to `should_sample` parameters, suggest using parent span context's tracestate instead of manually passed in tracestate in `should_sample` ([#1764](https://github.com/open-telemetry/opentelemetry-python/pull/1764)) - Added experimental HTTP back propagators. ([#1762](https://github.com/open-telemetry/opentelemetry-python/pull/1762)) - Zipkin exporter: Add support for timeout and implement shutdown ([#1799](https://github.com/open-telemetry/opentelemetry-python/pull/1799)) ### Changed - Adjust `B3Format` propagator to be spec compliant by not modifying context when propagation headers are not present/invalid/empty ([#1728](https://github.com/open-telemetry/opentelemetry-python/pull/1728)) - Silence unnecessary warning when creating a new Status object without description. ([#1721](https://github.com/open-telemetry/opentelemetry-python/pull/1721)) - Update bootstrap cmd to use exact version when installing instrumentation packages. ([#1722](https://github.com/open-telemetry/opentelemetry-python/pull/1722)) - Fix B3 propagator to never return None. ([#1750](https://github.com/open-telemetry/opentelemetry-python/pull/1750)) - Added ProxyTracerProvider and ProxyTracer implementations to allow fetching provider and tracer instances before a global provider is set up. ([#1726](https://github.com/open-telemetry/opentelemetry-python/pull/1726)) - Added `__contains__` to `opentelementry.trace.span.TraceState`. ([#1773](https://github.com/open-telemetry/opentelemetry-python/pull/1773)) - `opentelemetry-opentracing-shim` Fix an issue in the shim where a Span was being wrapped in a NonRecordingSpan when it wasn't necessary. ([#1776](https://github.com/open-telemetry/opentelemetry-python/pull/1776)) - OTLP Exporter now uses the scheme in the endpoint to determine whether to establish a secure connection or not. ([#1771](https://github.com/open-telemetry/opentelemetry-python/pull/1771)) ## Version 1.0.0 (2021-03-26) ### Added - Document how to work with fork process web server models(Gunicorn, uWSGI etc...) ([#1609](https://github.com/open-telemetry/opentelemetry-python/pull/1609)) - Add `max_attr_value_length` support to Jaeger exporter ([#1633](https://github.com/open-telemetry/opentelemetry-python/pull/1633)) - Moved `use_span` from Tracer to `opentelemetry.trace.use_span`. ([#1668](https://github.com/open-telemetry/opentelemetry-python/pull/1668)) - `opentelemetry.trace.use_span()` will now overwrite previously set status on span in case an exception is raised inside the context manager and `set_status_on_exception` is set to `True`. ([#1668](https://github.com/open-telemetry/opentelemetry-python/pull/1668)) - Add `udp_split_oversized_batches` support to jaeger exporter ([#1500](https://github.com/open-telemetry/opentelemetry-python/pull/1500)) ### Changed - remove `service_name` from constructor of jaeger and opencensus exporters and use of env variable `OTEL_PYTHON_SERVICE_NAME` ([#1669])(https://github.com/open-telemetry/opentelemetry-python/pull/1669) - Rename `IdsGenerator` to `IdGenerator` ([#1651](https://github.com/open-telemetry/opentelemetry-python/pull/1651)) - Make TracerProvider's resource attribute private ([#1652](https://github.com/open-telemetry/opentelemetry-python/pull/1652)) - Rename Resource's `create_empty` to `get_empty` ([#1653](https://github.com/open-telemetry/opentelemetry-python/pull/1653)) - Renamed `BatchExportSpanProcessor` to `BatchSpanProcessor` and `SimpleExportSpanProcessor` to `SimpleSpanProcessor` ([#1656](https://github.com/open-telemetry/opentelemetry-python/pull/1656)) - Rename `DefaultSpan` to `NonRecordingSpan` ([#1661](https://github.com/open-telemetry/opentelemetry-python/pull/1661)) - Fixed distro configuration with `OTEL_TRACES_EXPORTER` env var set to `otlp` ([#1657](https://github.com/open-telemetry/opentelemetry-python/pull/1657)) - Moving `Getter`, `Setter` and `TextMapPropagator` out of `opentelemetry.trace.propagation` and into `opentelemetry.propagators` ([#1662](https://github.com/open-telemetry/opentelemetry-python/pull/1662)) - Rename `BaggagePropagator` to `W3CBaggagePropagator` ([#1663](https://github.com/open-telemetry/opentelemetry-python/pull/1663)) - Rename `JaegerSpanExporter` to `JaegerExporter` and rename `ZipkinSpanExporter` to `ZipkinExporter` ([#1664](https://github.com/open-telemetry/opentelemetry-python/pull/1664)) - Expose `StatusCode` from the `opentelemetry.trace` module ([#1681](https://github.com/open-telemetry/opentelemetry-python/pull/1681)) - Status now only sets `description` when `status_code` is set to `StatusCode.ERROR` ([#1673](https://github.com/open-telemetry/opentelemetry-python/pull/1673)) - Update OTLP exporter to use OTLP proto `0.7.0` ([#1674](https://github.com/open-telemetry/opentelemetry-python/pull/1674)) - Remove time_ns from API and add a warning for older versions of Python ([#1602](https://github.com/open-telemetry/opentelemetry-python/pull/1602)) - Hide implementation classes/variables in api/sdk ([#1684](https://github.com/open-telemetry/opentelemetry-python/pull/1684)) - Cleanup OTLP exporter compression options, add tests ([#1671](https://github.com/open-telemetry/opentelemetry-python/pull/1671)) - Initial documentation for environment variables ([#1680](https://github.com/open-telemetry/opentelemetry-python/pull/1680)) - Change Zipkin exporter to obtain service.name from span ([#1696](https://github.com/open-telemetry/opentelemetry-python/pull/1696)) - Split up `opentelemetry-exporter-jaeger` package into `opentelemetry-exporter-jaeger-proto-grpc` and `opentelemetry-exporter-jaeger-thrift` packages to reduce dependencies for each one. ([#1694](https://github.com/open-telemetry/opentelemetry-python/pull/1694)) - Added `opentelemetry-exporter-otlp-proto-grpc` and changed `opentelemetry-exporter-otlp` to install it as a dependency. This will allow for the next package/protocol to also be in its own package. ([#1695](https://github.com/open-telemetry/opentelemetry-python/pull/1695)) - Change Jaeger exporters to obtain service.name from span ([#1703](https://github.com/open-telemetry/opentelemetry-python/pull/1703)) - Fixed an unset `OTEL_TRACES_EXPORTER` resulting in an error ([#1707](https://github.com/open-telemetry/opentelemetry-python/pull/1707)) - Split Zipkin exporter into `opentelemetry-exporter-zipkin-json` and `opentelemetry-exporter-zipkin-proto-http` packages to reduce dependencies. The `opentelemetry-exporter-zipkin` installs both. ([#1699](https://github.com/open-telemetry/opentelemetry-python/pull/1699)) - Make setters and getters optional ([#1690](https://github.com/open-telemetry/opentelemetry-python/pull/1690)) ### Removed - Removed unused `get_hexadecimal_trace_id` and `get_hexadecimal_span_id` methods. ([#1675](https://github.com/open-telemetry/opentelemetry-python/pull/1675)) - Remove `OTEL_EXPORTER_*_ INSECURE` env var ([#1682](https://github.com/open-telemetry/opentelemetry-python/pull/1682)) - Removing support for Python 3.5 ([#1706](https://github.com/open-telemetry/opentelemetry-python/pull/1706)) ## Version 0.19b0 (2021-03-26) ### Changed - remove `service_name` from constructor of jaeger and opencensus exporters and use of env variable `OTEL_PYTHON_SERVICE_NAME` ([#1669])(https://github.com/open-telemetry/opentelemetry-python/pull/1669) - Rename `IdsGenerator` to `IdGenerator` ([#1651](https://github.com/open-telemetry/opentelemetry-python/pull/1651)) ### Removed - Removing support for Python 3.5 ([#1706](https://github.com/open-telemetry/opentelemetry-python/pull/1706)) ## Version 0.18b0 (2021-02-16) ### Added - Add urllib to opentelemetry-bootstrap target list ([#1584](https://github.com/open-telemetry/opentelemetry-python/pull/1584)) ## Version 1.0.0rc1 (2021-02-12) ### Changed - Tracer provider environment variables are now consistent with the rest ([#1571](https://github.com/open-telemetry/opentelemetry-python/pull/1571)) - Rename `TRACE_` to `TRACES_` for environment variables ([#1595](https://github.com/open-telemetry/opentelemetry-python/pull/1595)) - Limits for Span attributes, events and links have been updated to 128 ([1597](https://github.com/open-telemetry/opentelemetry-python/pull/1597)) - Read-only Span attributes have been moved to ReadableSpan class ([#1560](https://github.com/open-telemetry/opentelemetry-python/pull/1560)) - `BatchExportSpanProcessor` flushes export queue when it reaches `max_export_batch_size` ([#1521](https://github.com/open-telemetry/opentelemetry-python/pull/1521)) ### Added - Added `end_on_exit` argument to `start_as_current_span` ([#1519](https://github.com/open-telemetry/opentelemetry-python/pull/1519)) - Add `Span.set_attributes` method to set multiple values with one call ([#1520](https://github.com/open-telemetry/opentelemetry-python/pull/1520)) - Make sure Resources follow semantic conventions ([#1480](https://github.com/open-telemetry/opentelemetry-python/pull/1480)) - Allow missing carrier headers to continue without raising AttributeError ([#1545](https://github.com/open-telemetry/opentelemetry-python/pull/1545)) ### Removed - Remove Configuration ([#1523](https://github.com/open-telemetry/opentelemetry-python/pull/1523)) - Remove Metrics as part of stable, marked as experimental ([#1568](https://github.com/open-telemetry/opentelemetry-python/pull/1568)) ## Version 0.17b0 (2021-01-20) ### Added - Add support for OTLP v0.6.0 ([#1472](https://github.com/open-telemetry/opentelemetry-python/pull/1472)) - Add protobuf via gRPC exporting support for Jaeger ([#1471](https://github.com/open-telemetry/opentelemetry-python/pull/1471)) - Add support for Python 3.9 ([#1441](https://github.com/open-telemetry/opentelemetry-python/pull/1441)) - Added the ability to disable instrumenting libraries specified by OTEL_PYTHON_DISABLED_INSTRUMENTATIONS env variable, when using opentelemetry-instrument command. ([#1461](https://github.com/open-telemetry/opentelemetry-python/pull/1461)) - Add `fields` to propagators ([#1374](https://github.com/open-telemetry/opentelemetry-python/pull/1374)) - Add local/remote samplers to parent based sampler ([#1440](https://github.com/open-telemetry/opentelemetry-python/pull/1440)) - Add support for OTEL*SPAN*{ATTRIBUTE_COUNT_LIMIT,EVENT_COUNT_LIMIT,LINK_COUNT_LIMIT} ([#1377](https://github.com/open-telemetry/opentelemetry-python/pull/1377)) - Return `None` for `DictGetter` if key not found ([#1449](https://github.com/open-telemetry/opentelemetry-python/pull/1449)) - Added support for Jaeger propagator ([#1219](https://github.com/open-telemetry/opentelemetry-python/pull/1219)) - Remove dependency on SDK from `opentelemetry-instrumentation` package. The `opentelemetry-sdk` package now registers an entrypoint `opentelemetry_configurator` to allow `opentelemetry-instrument` to load the configuration for the SDK ([#1420](https://github.com/open-telemetry/opentelemetry-python/pull/1420)) - `opentelemetry-exporter-zipkin` Add support for array attributes in Span and Resource exports ([#1285](https://github.com/open-telemetry/opentelemetry-python/pull/1285)) - Added `__repr__` for `DefaultSpan`, added `trace_flags` to `__repr__` of `SpanContext` ([#1485](https://github.com/open-telemetry/opentelemetry-python/pull/1485)) - `opentelemetry-sdk` Add support for OTEL_TRACE_SAMPLER and OTEL_TRACE_SAMPLER_ARG env variables ([#1496](https://github.com/open-telemetry/opentelemetry-python/pull/1496)) - Adding `opentelemetry-distro` package to add default configuration for span exporter to OTLP ([#1482](https://github.com/open-telemetry/opentelemetry-python/pull/1482)) ### Changed - `opentelemetry-exporter-zipkin` Updated zipkin exporter status code and error tag ([#1486](https://github.com/open-telemetry/opentelemetry-python/pull/1486)) - Recreate span on every run of a `start_as_current_span`-decorated function ([#1451](https://github.com/open-telemetry/opentelemetry-python/pull/1451)) - `opentelemetry-exporter-otlp` Headers are now passed in as tuple as metadata, instead of a string, which was incorrect. ([#1507](https://github.com/open-telemetry/opentelemetry-python/pull/1507)) - `opentelemetry-exporter-jaeger` Updated Jaeger exporter status code tag ([#1488](https://github.com/open-telemetry/opentelemetry-python/pull/1488)) - `opentelemetry-api` `opentelemety-sdk` Moved `idsgenerator` into sdk ([#1514](https://github.com/open-telemetry/opentelemetry-python/pull/1514)) - `opentelemetry-sdk` The B3Format propagator has been moved into its own package: `opentelemetry-propagator-b3` ([#1513](https://github.com/open-telemetry/opentelemetry-python/pull/1513)) - Update default port for OTLP exporter from 55680 to 4317 ([#1516](https://github.com/open-telemetry/opentelemetry-python/pull/1516)) - `opentelemetry-exporter-zipkin` Update boolean attribute value transformation ([#1509](https://github.com/open-telemetry/opentelemetry-python/pull/1509)) - Move opentelemetry-opentracing-shim out of instrumentation folder ([#1533](https://github.com/open-telemetry/opentelemetry-python/pull/1533)) - `opentelemetry-sdk` The JaegerPropagator has been moved into its own package: `opentelemetry-propagator-jaeger` ([#1525](https://github.com/open-telemetry/opentelemetry-python/pull/1525)) - `opentelemetry-exporter-jaeger`, `opentelemetry-exporter-zipkin` Update InstrumentationInfo tag keys for Jaeger and Zipkin exporters ([#1535](https://github.com/open-telemetry/opentelemetry-python/pull/1535)) - `opentelemetry-sdk` Remove rate property setter from TraceIdRatioBasedSampler ([#1536](https://github.com/open-telemetry/opentelemetry-python/pull/1536)) - Fix TraceState to adhere to specs ([#1502](https://github.com/open-telemetry/opentelemetry-python/pull/1502)) - Update Resource `merge` key conflict precedence ([#1544](https://github.com/open-telemetry/opentelemetry-python/pull/1544)) ### Removed - `opentelemetry-api` Remove ThreadLocalRuntimeContext since python3.4 is not supported. ## Version 0.16b1 (2020-11-26) ### Added - Add meter reference to observers ([#1425](https://github.com/open-telemetry/opentelemetry-python/pull/1425)) ## Version 0.16b0 (2020-11-25) ### Added - Add optional parameter to `record_exception` method ([#1314](https://github.com/open-telemetry/opentelemetry-python/pull/1314)) - Add pickle support to SpanContext class ([#1380](https://github.com/open-telemetry/opentelemetry-python/pull/1380)) - Add instrumentation library name and version to OTLP exported metrics ([#1418](https://github.com/open-telemetry/opentelemetry-python/pull/1418)) - Add Gzip compression for exporter ([#1141](https://github.com/open-telemetry/opentelemetry-python/pull/1141)) - Support for v2 api protobuf format ([#1318](https://github.com/open-telemetry/opentelemetry-python/pull/1318)) - Add IDs Generator as Configurable Property of Auto Instrumentation ([#1404](https://github.com/open-telemetry/opentelemetry-python/pull/1404)) - Added support for `OTEL_EXPORTER` to the `opentelemetry-instrument` command ([#1036](https://github.com/open-telemetry/opentelemetry-python/pull/1036)) ### Changed - Change temporality for Counter and UpDownCounter ([#1384](https://github.com/open-telemetry/opentelemetry-python/pull/1384)) - OTLP exporter: Handle error case when no credentials supplied ([#1366](https://github.com/open-telemetry/opentelemetry-python/pull/1366)) - Update protobuf versions ([#1356](https://github.com/open-telemetry/opentelemetry-python/pull/1356)) - Add missing references to instrumented packages ([#1416](https://github.com/open-telemetry/opentelemetry-python/pull/1416)) - Instrumentation Package depends on the OTel SDK ([#1405](https://github.com/open-telemetry/opentelemetry-python/pull/1405)) - Allow samplers to modify tracestate ([#1319](https://github.com/open-telemetry/opentelemetry-python/pull/1319)) - Update exception handling optional parameters, add escaped attribute to record_exception ([#1365](https://github.com/open-telemetry/opentelemetry-python/pull/1365)) - Rename `MetricRecord` to `ExportRecord` ([#1367](https://github.com/open-telemetry/opentelemetry-python/pull/1367)) - Rename `Record` to `Accumulation` ([#1373](https://github.com/open-telemetry/opentelemetry-python/pull/1373)) - Rename `Meter` to `Accumulator` ([#1372](https://github.com/open-telemetry/opentelemetry-python/pull/1372)) - Fix `ParentBased` sampler for implicit parent spans. Fix also `trace_state` erasure for dropped spans or spans sampled by the `TraceIdRatioBased` sampler. ([#1394](https://github.com/open-telemetry/opentelemetry-python/pull/1394)) ## Version 0.15b0 (2020-11-02) ### Added - Add Env variables in OTLP exporter ([#1101](https://github.com/open-telemetry/opentelemetry-python/pull/1101)) - Add support for Jaeger Span Exporter configuration by environment variables and
change JaegerSpanExporter constructor parameters ([#1114](https://github.com/open-telemetry/opentelemetry-python/pull/1114)) ### Changed - Updating status codes to adhere to specs ([#1282](https://github.com/open-telemetry/opentelemetry-python/pull/1282)) - Set initial checkpoint timestamp in aggregators ([#1237](https://github.com/open-telemetry/opentelemetry-python/pull/1237)) - Make `SpanProcessor.on_start` accept parent Context ([#1251](https://github.com/open-telemetry/opentelemetry-python/pull/1251)) - Fix b3 propagator entrypoint ([#1265](https://github.com/open-telemetry/opentelemetry-python/pull/1265)) - Allow None in sequence attributes values ([#998](https://github.com/open-telemetry/opentelemetry-python/pull/998)) - Samplers to accept parent Context ([#1267](https://github.com/open-telemetry/opentelemetry-python/pull/1267)) - Span.is_recording() returns false after span has ended ([#1289](https://github.com/open-telemetry/opentelemetry-python/pull/1289)) - Allow samplers to modify tracestate ([#1319](https://github.com/open-telemetry/opentelemetry-python/pull/1319)) - Remove TracerProvider coupling from Tracer init ([#1295](https://github.com/open-telemetry/opentelemetry-python/pull/1295)) ## Version 0.14b0 (2020-10-13) ### Added - Add optional parameter to `record_exception` method ([#1242](https://github.com/open-telemetry/opentelemetry-python/pull/1242)) - Add support for `OTEL_PROPAGATORS` ([#1123](https://github.com/open-telemetry/opentelemetry-python/pull/1123)) - Add keys method to TextMap propagator Getter ([#1196](https://github.com/open-telemetry/opentelemetry-python/issues/1196)) - Add timestamps to OTLP exporter ([#1199](https://github.com/open-telemetry/opentelemetry-python/pull/1199)) - Add Global Error Handler ([#1080](https://github.com/open-telemetry/opentelemetry-python/pull/1080)) - Add support for `OTEL_BSP_MAX_QUEUE_SIZE`, `OTEL_BSP_SCHEDULE_DELAY_MILLIS`, `OTEL_BSP_MAX_EXPORT_BATCH_SIZE` and `OTEL_BSP_EXPORT_TIMEOUT_MILLIS` environment variables ([#1105](https://github.com/open-telemetry/opentelemetry-python/pull/1120)) - Adding Resource to MeterRecord ([#1209](https://github.com/open-telemetry/opentelemetry-python/pull/1209)) s ### Changed - Store `int`s as `int`s in the global Configuration object ([#1118](https://github.com/open-telemetry/opentelemetry-python/pull/1118)) - Allow for Custom Trace and Span IDs Generation - `IdsGenerator` for TracerProvider ([#1153](https://github.com/open-telemetry/opentelemetry-python/pull/1153)) - Update baggage propagation header ([#1194](https://github.com/open-telemetry/opentelemetry-python/pull/1194)) - Make instances of SpanContext immutable ([#1134](https://github.com/open-telemetry/opentelemetry-python/pull/1134)) - Parent is now always passed in via Context, instead of Span or SpanContext ([#1146](https://github.com/open-telemetry/opentelemetry-python/pull/1146)) - Update OpenTelemetry protos to v0.5.0 ([#1143](https://github.com/open-telemetry/opentelemetry-python/pull/1143)) - Zipkin exporter now accepts a `max_tag_value_length` attribute to customize the maximum allowed size a tag value can have. ([#1151](https://github.com/open-telemetry/opentelemetry-python/pull/1151)) - Fixed OTLP events to Zipkin annotations translation. ([#1161](https://github.com/open-telemetry/opentelemetry-python/pull/1161)) - Fixed bootstrap command to correctly install opentelemetry-instrumentation-falcon instead of opentelemetry-instrumentation-flask. ([#1138](https://github.com/open-telemetry/opentelemetry-python/pull/1138)) - Update sampling result names ([#1128](https://github.com/open-telemetry/opentelemetry-python/pull/1128)) - Event attributes are now immutable ([#1195](https://github.com/open-telemetry/opentelemetry-python/pull/1195)) - Renaming metrics Batcher to Processor ([#1203](https://github.com/open-telemetry/opentelemetry-python/pull/1203)) - Protect access to Span implementation ([#1188](https://github.com/open-telemetry/opentelemetry-python/pull/1188)) - `start_as_current_span` and `use_span` can now optionally auto-record any exceptions raised inside the context manager. ([#1162](https://github.com/open-telemetry/opentelemetry-python/pull/1162)) ## Version 0.13b0 (2020-09-17) ### Added - Add instrumentation info to exported spans ([#1095](https://github.com/open-telemetry/opentelemetry-python/pull/1095)) - Add metric OTLP exporter ([#835](https://github.com/open-telemetry/opentelemetry-python/pull/835)) - Add type hints to OTLP exporter ([#1121](https://github.com/open-telemetry/opentelemetry-python/pull/1121)) - Add support for OTEL_EXPORTER_ZIPKIN_ENDPOINT env var. As part of this change, the configuration of the ZipkinSpanExporter exposes a `url` argument to replace `host_name`, `port`, `protocol`, `endpoint`. This brings this implementation inline with other implementations. ([#1064](https://github.com/open-telemetry/opentelemetry-python/pull/1064)) - Zipkin exporter report instrumentation info. ([#1097](https://github.com/open-telemetry/opentelemetry-python/pull/1097)) - Add status mapping to tags ([#1111](https://github.com/open-telemetry/opentelemetry-python/issues/1111)) - Report instrumentation info ([#1098](https://github.com/open-telemetry/opentelemetry-python/pull/1098)) - Add support for http metrics ([#1116](https://github.com/open-telemetry/opentelemetry-python/pull/1116)) - Populate resource attributes as per semantic conventions ([#1053](https://github.com/open-telemetry/opentelemetry-python/pull/1053)) ### Changed - Refactor `SpanContext.is_valid` from a method to a data attribute ([#1005](https://github.com/open-telemetry/opentelemetry-python/pull/1005)) - Moved samplers from API to SDK ([#1023](https://github.com/open-telemetry/opentelemetry-python/pull/1023)) - Change return value type of `correlationcontext.get_correlations` to immutable `MappingProxyType` ([#1024](https://github.com/open-telemetry/opentelemetry-python/pull/1024)) - Sampling spec changes ([#1034](https://github.com/open-telemetry/opentelemetry-python/pull/1034)) - Remove lazy Event and Link API from Span interface ([#1045](https://github.com/open-telemetry/opentelemetry-python/pull/1045)) - Rename CorrelationContext to Baggage ([#1060](https://github.com/open-telemetry/opentelemetry-python/pull/1060)) - Rename HTTPTextFormat to TextMapPropagator. This change also updates `get_global_httptextformat` and `set_global_httptextformat` to `get_global_textmap` and `set_global_textmap` ([#1085](https://github.com/open-telemetry/opentelemetry-python/pull/1085)) - Fix api/sdk setup.cfg to include missing python files ([#1091](https://github.com/open-telemetry/opentelemetry-python/pull/1091)) - Improve BatchExportSpanProcessor ([#1062](https://github.com/open-telemetry/opentelemetry-python/pull/1062)) - Rename Resource labels to attributes ([#1082](https://github.com/open-telemetry/opentelemetry-python/pull/1082)) - Rename members of `trace.sampling.Decision` enum ([#1115](https://github.com/open-telemetry/opentelemetry-python/pull/1115)) - Merge `OTELResourceDetector` result when creating resources ([#1096](https://github.com/open-telemetry/opentelemetry-python/pull/1096)) ### Removed - Drop support for Python 3.4 ([#1099](https://github.com/open-telemetry/opentelemetry-python/pull/1099)) ## Version 0.12b0 (2020-08-14) ### Added - Implement Views in metrics SDK ([#596](https://github.com/open-telemetry/opentelemetry-python/pull/596)) ### Changed - Update environment variable names, prefix changed from `OPENTELEMETRY` to `OTEL` ([#904](https://github.com/open-telemetry/opentelemetry-python/pull/904)) - Stop TracerProvider and MeterProvider from being overridden ([#959](https://github.com/open-telemetry/opentelemetry-python/pull/959)) - Update default port to 55680 ([#977](https://github.com/open-telemetry/opentelemetry-python/pull/977)) - Add proper length zero padding to hex strings of traceId, spanId, parentId sent on the wire, for compatibility with jaeger-collector ([#908](https://github.com/open-telemetry/opentelemetry-python/pull/908)) - Send start_timestamp and convert labels to strings ([#937](https://github.com/open-telemetry/opentelemetry-python/pull/937)) - Renamed several packages ([#953](https://github.com/open-telemetry/opentelemetry-python/pull/953)) - Thrift URL for Jaeger exporter doesn't allow HTTPS (hardcoded to HTTP) ([#978](https://github.com/open-telemetry/opentelemetry-python/pull/978)) - Change reference names to opentelemetry-instrumentation-opentracing-shim ([#969](https://github.com/open-telemetry/opentelemetry-python/pull/969)) - Changed default Sampler to `ParentOrElse(AlwaysOn)` ([#960](https://github.com/open-telemetry/opentelemetry-python/pull/960)) - Update environment variable names, prefix changed from `OPENTELEMETRY` to `OTEL` ([#904](https://github.com/open-telemetry/opentelemetry-python/pull/904)) - Update environment variable `OTEL_RESOURCE` to `OTEL_RESOURCE_ATTRIBUTES` as per the specification ## Version 0.11b0 (2020-07-28) ### Added - Add support for resources and resource detector ([#853](https://github.com/open-telemetry/opentelemetry-python/pull/853)) ### Changed - Return INVALID_SPAN if no TracerProvider set for get_current_span ([#751](https://github.com/open-telemetry/opentelemetry-python/pull/751)) - Rename record_error to record_exception ([#927](https://github.com/open-telemetry/opentelemetry-python/pull/927)) - Update span exporter to use OpenTelemetry Proto v0.4.0 ([#872](https://github.com/open-telemetry/opentelemetry-python/pull/889)) ## Version 0.10b0 (2020-06-23) ### Changed - Regenerate proto code and add pyi stubs ([#823](https://github.com/open-telemetry/opentelemetry-python/pull/823)) - Rename CounterAggregator -> SumAggregator ([#816](https://github.com/open-telemetry/opentelemetry-python/pull/816)) ## Version 0.9b0 (2020-06-10) ### Added - Adding trace.get_current_span, Removing Tracer.get_current_span ([#552](https://github.com/open-telemetry/opentelemetry-python/pull/552)) - Add SumObserver, UpDownSumObserver and LastValueAggregator in metrics ([#789](https://github.com/open-telemetry/opentelemetry-python/pull/789)) - Add start_pipeline to MeterProvider ([#791](https://github.com/open-telemetry/opentelemetry-python/pull/791)) - Initial release of opentelemetry-ext-otlp, opentelemetry-proto ### Changed - Move stateful & resource from Meter to MeterProvider ([#751](https://github.com/open-telemetry/opentelemetry-python/pull/751)) - Rename Measure to ValueRecorder in metrics ([#761](https://github.com/open-telemetry/opentelemetry-python/pull/761)) - Rename Observer to ValueObserver ([#764](https://github.com/open-telemetry/opentelemetry-python/pull/764)) - Log a warning when replacing the global Tracer/Meter provider ([#856](https://github.com/open-telemetry/opentelemetry-python/pull/856)) - bugfix: byte type attributes are decoded before adding to attributes dict ([#775](https://github.com/open-telemetry/opentelemetry-python/pull/775)) - Rename opentelemetry-auto-instrumentation to opentelemetry-instrumentation, and console script `opentelemetry-auto-instrumentation` to `opentelemetry-instrument` ## Version 0.8b0 (2020-05-27) ### Added - Add a new bootstrap command that enables automatic instrument installations. ([#650](https://github.com/open-telemetry/opentelemetry-python/pull/650)) ### Changed - Handle boolean, integer and float values in Configuration ([#662](https://github.com/open-telemetry/opentelemetry-python/pull/662)) - bugfix: ensure status is always string ([#640](https://github.com/open-telemetry/opentelemetry-python/pull/640)) - Transform resource to tags when exporting ([#707](https://github.com/open-telemetry/opentelemetry-python/pull/707)) - Rename otcollector to opencensus ([#695](https://github.com/open-telemetry/opentelemetry-python/pull/695)) - Transform resource to tags when exporting ([#645](https://github.com/open-telemetry/opentelemetry-python/pull/645)) - `ext/boto`: Could not serialize attribute aws.region to tag when exporting via jaeger Serialize tuple type values by coercing them into a string, since Jaeger does not support tuple types. ([#865](https://github.com/open-telemetry/opentelemetry-python/pull/865)) - Specify to_json indent from arguments ([#718](https://github.com/open-telemetry/opentelemetry-python/pull/718)) - Span.resource will now default to an empty resource ([#724](https://github.com/open-telemetry/opentelemetry-python/pull/724)) - bugfix: Fix error message ([#729](https://github.com/open-telemetry/opentelemetry-python/pull/729)) - deep copy empty attributes ([#714](https://github.com/open-telemetry/opentelemetry-python/pull/714)) ## Version 0.7b1 (2020-05-12) ### Added - Add reset for the global configuration object, for testing purposes ([#636](https://github.com/open-telemetry/opentelemetry-python/pull/636)) - Add support for programmatic instrumentation ([#579](https://github.com/open-telemetry/opentelemetry-python/pull/579)) ### Changed - tracer.get_tracer now optionally accepts a TracerProvider ([#602](https://github.com/open-telemetry/opentelemetry-python/pull/602)) - Configuration object can now be used by any component of opentelemetry, including 3rd party instrumentations ([#563](https://github.com/open-telemetry/opentelemetry-python/pull/563)) - bugfix: configuration object now matches fields in a case-sensitive manner ([#583](https://github.com/open-telemetry/opentelemetry-python/pull/583)) - bugfix: configuration object now accepts all valid python variable names ([#583](https://github.com/open-telemetry/opentelemetry-python/pull/583)) - bugfix: configuration undefined attributes now return None instead of raising an AttributeError. ([#583](https://github.com/open-telemetry/opentelemetry-python/pull/583)) - bugfix: 'debug' field is now correct ([#549](https://github.com/open-telemetry/opentelemetry-python/pull/549)) - bugfix: enable auto-instrumentation command to work for custom entry points (e.g. flask_run) ([#567](https://github.com/open-telemetry/opentelemetry-python/pull/567)) - Exporter API: span parents are now always spancontext ([#548](https://github.com/open-telemetry/opentelemetry-python/pull/548)) - Console span exporter now prints prettier, more legible messages ([#505](https://github.com/open-telemetry/opentelemetry-python/pull/505)) - bugfix: B3 propagation now retrieves parentSpanId correctly ([#621](https://github.com/open-telemetry/opentelemetry-python/pull/621)) - bugfix: a DefaultSpan now longer causes an exception when used with tracer ([#577](https://github.com/open-telemetry/opentelemetry-python/pull/577)) - move last_updated_timestamp into aggregators instead of bound metric instrument ([#522](https://github.com/open-telemetry/opentelemetry-python/pull/522)) - bugfix: suppressing instrumentation in metrics to eliminate an infinite loop of telemetry ([#529](https://github.com/open-telemetry/opentelemetry-python/pull/529)) - bugfix: freezing span attribute sequences, reducing potential user errors ([#529](https://github.com/open-telemetry/opentelemetry-python/pull/529)) ## Version 0.6b0 (2020-03-30) ### Added - Add support for lazy events and links ([#474](https://github.com/open-telemetry/opentelemetry-python/pull/474)) - Adding is_remote flag to SpanContext, indicating when a span is remote ([#516](https://github.com/open-telemetry/opentelemetry-python/pull/516)) - Adding a solution to release metric handles and observers ([#435](https://github.com/open-telemetry/opentelemetry-python/pull/435)) - Initial release: opentelemetry-instrumentation ### Changed - Metrics API no longer uses LabelSet ([#527](https://github.com/open-telemetry/opentelemetry-python/pull/527)) - Allow digit as first char in vendor specific trace state key ([#511](https://github.com/open-telemetry/opentelemetry-python/pull/511)) - Exporting to collector now works ([#508](https://github.com/open-telemetry/opentelemetry-python/pull/508)) ## Version 0.5b0 (2020-03-16) ### Added - Adding Correlation Context API/SDK and propagator ([#471](https://github.com/open-telemetry/opentelemetry-python/pull/471)) - Adding a global configuration module to simplify setting and getting globals ([#466](https://github.com/open-telemetry/opentelemetry-python/pull/466)) - Adding named meters, removing batchers ([#431](https://github.com/open-telemetry/opentelemetry-python/pull/431)) - Adding attach/detach methods as per spec ([#429](https://github.com/open-telemetry/opentelemetry-python/pull/429)) - Adding OT Collector metrics exporter ([#454](https://github.com/open-telemetry/opentelemetry-python/pull/454)) - Initial release opentelemetry-ext-otcollector ### Changed - Rename metric handle to bound metric instrument ([#470](https://github.com/open-telemetry/opentelemetry-python/pull/470)) - Moving resources to sdk ([#464](https://github.com/open-telemetry/opentelemetry-python/pull/464)) - Implementing propagators to API to use context ([#446](https://github.com/open-telemetry/opentelemetry-python/pull/446)) - Renaming TraceOptions to TraceFlags ([#450](https://github.com/open-telemetry/opentelemetry-python/pull/450)) - Renaming TracerSource to TracerProvider ([#441](https://github.com/open-telemetry/opentelemetry-python/pull/441)) - Improve validation of attributes ([#460](https://github.com/open-telemetry/opentelemetry-python/pull/460)) - Re-raise errors caught in opentelemetry.sdk.trace.Tracer.use_span() ([#469](https://github.com/open-telemetry/opentelemetry-python/pull/469)) - Implement observer instrument ([#425](https://github.com/open-telemetry/opentelemetry-python/pull/425)) ## Version 0.4a0 (2020-02-21) ### Added - Added named Tracers ([#301](https://github.com/open-telemetry/opentelemetry-python/pull/301)) - Add int and valid sequenced to AttributeValue type ([#368](https://github.com/open-telemetry/opentelemetry-python/pull/368)) - Add ABC for Metric ([#391](https://github.com/open-telemetry/opentelemetry-python/pull/391)) - Metrics export pipeline, and stdout exporter ([#341](https://github.com/open-telemetry/opentelemetry-python/pull/341)) - Adding Context API Implementation ([#395](https://github.com/open-telemetry/opentelemetry-python/pull/395)) - Adding trace.get_tracer function ([#430](https://github.com/open-telemetry/opentelemetry-python/pull/430)) - Add runtime validation for set_attribute ([#348](https://github.com/open-telemetry/opentelemetry-python/pull/348)) - Add support for B3 ParentSpanID ([#286](https://github.com/open-telemetry/opentelemetry-python/pull/286)) - Implement MinMaxSumCount aggregator ([#422](https://github.com/open-telemetry/opentelemetry-python/pull/422)) - Initial release opentelemetry-ext-zipkin, opentelemetry-ext-prometheus ### Changed - Separate Default classes from interface descriptions ([#311](https://github.com/open-telemetry/opentelemetry-python/pull/311)) - Export span status ([#367](https://github.com/open-telemetry/opentelemetry-python/pull/367)) - Export span kind ([#387](https://github.com/open-telemetry/opentelemetry-python/pull/387)) - Set status for ended spans ([#297](https://github.com/open-telemetry/opentelemetry-python/pull/297) and [#358](https://github.com/open-telemetry/opentelemetry-python/pull/358)) - Use module loggers ([#351](https://github.com/open-telemetry/opentelemetry-python/pull/351)) - Protect start_time and end_time from being set manually by the user ([#363](https://github.com/open-telemetry/opentelemetry-python/pull/363)) - Set status in start_as_current_span ([#377](https://github.com/open-telemetry/opentelemetry-python/pull/377)) - Implement force_flush for span processors ([#389](https://github.com/open-telemetry/opentelemetry-python/pull/389)) - Set sampled flag on sampling trace ([#407](https://github.com/open-telemetry/opentelemetry-python/pull/407)) - Add io and formatter options to console exporter ([#412](https://github.com/open-telemetry/opentelemetry-python/pull/412)) - Clean up ProbabilitySample for 64 bit trace IDs ([#238](https://github.com/open-telemetry/opentelemetry-python/pull/238)) ### Removed - Remove monotonic and absolute metric instruments ([#410](https://github.com/open-telemetry/opentelemetry-python/pull/410)) ## Version 0.3a0 (2019-12-11) ### Added - Add metrics exporters ([#192](https://github.com/open-telemetry/opentelemetry-python/pull/192)) - Implement extract and inject support for HTTP_HEADERS and TEXT_MAP formats ([#256](https://github.com/open-telemetry/opentelemetry-python/pull/256)) ### Changed - Multiple tracing API/SDK changes - Multiple metrics API/SDK changes ### Removed - Remove option to create unstarted spans from API ([#290](https://github.com/open-telemetry/opentelemetry-python/pull/290)) ## Version 0.2a0 (2019-10-29) ### Added - W3C TraceContext fixes and compliance tests ([#228](https://github.com/open-telemetry/opentelemetry-python/pull/228)) - Sampler API/SDK ([#225](https://github.com/open-telemetry/opentelemetry-python/pull/225)) - Initial release: opentelemetry-ext-jaeger, opentelemetry-opentracing-shim ### Changed - Multiple metrics API/SDK changes - Multiple tracing API/SDK changes - Multiple context API changes - Multiple bugfixes and improvements ## Version 0.1a0 (2019-09-30) ### Added - Initial release api/sdk - Use Attribute rather than boundattribute in logrecord ([#3567](https://github.com/open-telemetry/opentelemetry-python/pull/3567)) - Fix flush error when no LoggerProvider configured for LoggingHandler ([#3608](https://github.com/open-telemetry/opentelemetry-python/pull/3608)) - Fix `OTLPMetricExporter` ignores `preferred_aggregation` property ([#3603](https://github.com/open-telemetry/opentelemetry-python/pull/3603)) - Logs: set `observed_timestamp` field ([#3565](https://github.com/open-telemetry/opentelemetry-python/pull/3565)) - Add missing Resource SchemaURL in OTLP exporters ([#3652](https://github.com/open-telemetry/opentelemetry-python/pull/3652)) - Fix loglevel warning text ([#3566](https://github.com/open-telemetry/opentelemetry-python/pull/3566)) - Prometheus Exporter string representation for target_info labels ([#3659](https://github.com/open-telemetry/opentelemetry-python/pull/3659)) - Logs: ObservedTimestamp field is missing in console exporter output ([#3564](https://github.com/open-telemetry/opentelemetry-python/pull/3564)) - Fix explicit bucket histogram aggregation ([#3429](https://github.com/open-telemetry/opentelemetry-python/pull/3429)) - Add `code.lineno`, `code.function` and `code.filepath` to all logs ([#3645](https://github.com/open-telemetry/opentelemetry-python/pull/3645)) - Add Synchronous Gauge instrument ([#3462](https://github.com/open-telemetry/opentelemetry-python/pull/3462)) - Drop support for 3.7 ([#3668](https://github.com/open-telemetry/opentelemetry-python/pull/3668)) - Include key in attribute sequence warning ([#3639](https://github.com/open-telemetry/opentelemetry-python/pull/3639)) - Upgrade markupsafe, Flask and related dependencies to dev and test environments ([#3609](https://github.com/open-telemetry/opentelemetry-python/pull/3609)) - Handle HTTP 2XX responses as successful in OTLP exporters ([#3623](https://github.com/open-telemetry/opentelemetry-python/pull/3623)) - Improve Resource Detector timeout messaging ([#3645](https://github.com/open-telemetry/opentelemetry-python/pull/3645)) - Add Proxy classes for logging ([#3575](https://github.com/open-telemetry/opentelemetry-python/pull/3575)) - Remove dependency on 'backoff' library ([#3679](https://github.com/open-telemetry/opentelemetry-python/pull/3679)) - Make create_gauge non-abstract method ([#3817](https://github.com/open-telemetry/opentelemetry-python/pull/3817)) - Make `tracer.start_as_current_span()` decorator work with async functions ([#3633](https://github.com/open-telemetry/opentelemetry-python/pull/3633)) - Fix python 3.12 deprecation warning ([#3751](https://github.com/open-telemetry/opentelemetry-python/pull/3751)) - bump mypy to 0.982 ([#3776](https://github.com/open-telemetry/opentelemetry-python/pull/3776)) - Add support for OTEL_SDK_DISABLED environment variable ([#3648](https://github.com/open-telemetry/opentelemetry-python/pull/3648)) - Fix ValueError message for PeriodicExportingMetricsReader ([#3769](https://github.com/open-telemetry/opentelemetry-python/pull/3769)) - Use `BaseException` instead of `Exception` in `record_exception` ([#3354](https://github.com/open-telemetry/opentelemetry-python/pull/3354)) - Make span.record_exception more robust ([#3778](https://github.com/open-telemetry/opentelemetry-python/pull/3778)) - Fix license field in pyproject.toml files ([#3803](https://github.com/open-telemetry/opentelemetry-python/pull/3803)) python-opentelemetry-1.39.1/CONTRIBUTING.md000066400000000000000000000361521511654350100202610ustar00rootroot00000000000000# Contributing to opentelemetry-python The Python special interest group (SIG) meets weekly on Thursdays at 9AM PST. Check the [OpenTelemetry community calendar](https://groups.google.com/a/opentelemetry.io/g/calendar-python) for specific dates and Zoom meeting links. See the [public meeting notes](https://docs.google.com/document/d/1CIMGoIOZ-c3-igzbd6_Pnxx1SjAkjwqoYSUWxPY8XIs/edit) for a summary description of past meetings. See the [community membership document](https://github.com/open-telemetry/community/blob/main/community-membership.md) on how to become a [**Member**](https://github.com/open-telemetry/community/blob/main/community-membership.md#member), [**Approver**](https://github.com/open-telemetry/community/blob/main/community-membership.md#approver) and [**Maintainer**](https://github.com/open-telemetry/community/blob/main/community-membership.md#maintainer). Before you can contribute, you will need to sign the [Contributor License Agreement](https://docs.linuxfoundation.org/lfx/easycla/contributors). Please also read the [OpenTelemetry Contributor Guide](https://github.com/open-telemetry/community/blob/main/guides/contributor/README.md). # Find your right repo This is the main repo for OpenTelemetry Python. Nevertheless, there are other repos that are related to this project. Please take a look at this list first, your contributions may belong in one of these repos better: 1. [OpenTelemetry Contrib](https://github.com/open-telemetry/opentelemetry-python-contrib): Instrumentations for third-party libraries and frameworks. ## Find a Buddy and get Started Quickly! If you are looking for someone to help you find a starting point and be a resource for your first contribution, join our Slack and find a buddy! 1. Join [Slack](https://slack.cncf.io/) and join our [channel](https://cloud-native.slack.com/archives/C01PD4HUVBL). 2. Post in the room with an introduction to yourself, what area you are interested in (check issues marked "Help Wanted"), and say you are looking for a buddy. We will match you with someone who has experience in that area. The Slack channel will be used for introductions and an entry point for external people to be triaged and redirected. For discussions, please open up an issue or a Github [Discussion](https://github.com/open-telemetry/opentelemetry-python/discussions). Your OpenTelemetry buddy is your resource to talk to directly on all aspects of contributing to OpenTelemetry: providing context, reviewing PRs, and helping those get merged. Buddies will not be available 24/7, but is committed to responding during their normal contribution hours. ## Development This project uses [tox](https://tox.readthedocs.io) to automate some aspects of development, including testing against multiple Python versions. To install `tox`, run: ```console pip install tox ``` You can also run tox with `uv` support. By default [tox.ini](./tox.ini) will automatically create a provisioned tox environment with `tox-uv`, but you can install it at host level: ```sh pip install tox-uv ``` You can run `tox` with the following arguments: - `tox` to run all existing tox commands, including unit tests for all packages under multiple Python versions - `tox -e docs` to regenerate the API docs - `tox -e opentelemetry-api` and `tox -e opentelemetry-sdk` to run the API and SDK unit tests - `tox -e py313-opentelemetry-api` to e.g. run the API unit tests under a specific Python version - `tox -e spellcheck` to run a spellcheck on all the code - `tox -e lint-some-package` to run lint checks on `some-package` - `tox -e generate-workflows` to run creation of new CI workflows if tox environments have been updated - `tox -e ruff` to run ruff linter and formatter checks against the entire codebase - `tox -e typecheck` to run pyright against entire code base. - `tox -e public-symbols-check` to run public_symbols_checker.py. - `tox -e docker-tests-{otlpexporter,opencensus}` to run tests in both or either one location. - `tox -e tracecontext` to run integration tests for tracecontext. - `tox -e precommit` to run all `pre-commit` actions `ruff check` and `ruff format` are executed when `tox -e ruff` is run. We strongly recommend you to configure [pre-commit](https://pre-commit.com/) locally to run `ruff` and `rstcheck` automatically before each commit by installing it as git hooks. You just need to [install pre-commit](https://pre-commit.com/#install) in your environment: ```console pip install pre-commit -c dev-requirements.txt ``` and run this command inside the git repository: ```console pre-commit install ``` ### Virtual Environment You can also create a single virtual environment to make it easier to run local tests. For that, you'll need to install [`uv`](https://docs.astral.sh/uv/getting-started/installation/). After installing `uv`, you can run the following command: ```sh uv sync ``` This will create a virtual environment in the `.venv` directory and install all the necessary dependencies. ### Public Symbols We try to keep the amount of _public symbols_ in our code minimal. A public symbol is any Python identifier that does not start with an underscore. Every public symbol is something that has to be kept in order to maintain backwards compatibility, so we try to have as few as possible. To check if your PR is adding public symbols, run `tox -e public-symbols-check`. This will always fail if public symbols are being added/removed. The idea behind this is that every PR that adds/removes public symbols fails in CI, forcing reviewers to check the symbols to make sure they are strictly necessary. If after checking them, it is considered that they are indeed necessary, the PR will be labeled with `Approve Public API check` so that this check is not run. Also, we try to keep our console output as clean as possible. Most of the time this means catching expected log messages in the test cases: ``` python from logging import WARNING ... def test_case(self): with self.assertLogs(level=WARNING): some_function_that_will_log_a_warning_message() ``` Other options can be to disable logging propagation or disabling a logger altogether. A similar approach can be followed to catch warnings: ``` python def test_case(self): with self.assertWarns(DeprecationWarning): some_function_that_will_raise_a_deprecation_warning() ``` See [`tox.ini`](https://github.com/open-telemetry/opentelemetry-python/blob/main/tox.ini) for more detail on available tox commands. ### Contrib repo Some of the `tox` targets install packages from the [OpenTelemetry Python Contrib Repository](https://github.com/open-telemetry/opentelemetry-python.git) via pip. The version of the packages installed defaults to the `main` branch in that repository when `tox` is run locally. It is possible to install packages tagged with a specific git commit hash by setting an environment variable before running tox as per the following example: ``` CONTRIB_REPO_SHA=dde62cebffe519c35875af6d06fae053b3be65ec tox ``` The continuation integration overrides that environment variable with as per the configuration [here](https://github.com/open-telemetry/opentelemetry-python/blob/main/.github/workflows/test_0.yml#L14). ### Benchmarks Some packages have benchmark tests. To run them, run `tox -f benchmark`. Benchmark tests use `pytest-benchmark` and they output a table with results to the console. To write benchmarks, simply use the [pytest benchmark fixture](https://pytest-benchmark.readthedocs.io/en/latest/usage.html#usage) like the following: ```python def test_simple_start_span(benchmark): def benchmark_start_as_current_span(span_name, attribute_num): span = tracer.start_span( span_name, attributes={"count": attribute_num}, ) span.end() benchmark(benchmark_start_as_current_span, "benchmarkedSpan", 42) ``` Make sure the test file is under the `benchmarks/` folder of the package it is benchmarking and further has a path that corresponds to the file in the package it is testing. Make sure that the file name begins with `test_benchmark_`. (e.g. `opentelemetry-sdk/benchmarks/trace/propagation/test_benchmark_b3_format.py`) ## Pull Requests ### How to Send Pull Requests Everyone is welcome to contribute code to `opentelemetry-python` via GitHub pull requests (PRs). To create a new PR, fork the project in GitHub and clone the upstream repo: ```console git clone https://github.com/open-telemetry/opentelemetry-python.git cd opentelemetry-python ``` Add your fork as an origin: ```console git remote add fork https://github.com/YOUR_GITHUB_USERNAME/opentelemetry-python.git ``` Make sure you have all supported versions of Python installed, install tox only for the first time: ```sh pip install tox tox-uv ``` Run tests in the root of the repository (this will run all tox environments and may take some time): ```sh tox ``` Check out a new branch, make modifications and push the branch to your fork: ```sh git checkout -b feature ``` After you edit the files, stage changes in the current directory: ```sh git add . ``` Then run the following to commit the changes: ```sh git commit git push fork feature ``` Open a pull request against the main `opentelemetry-python` repo. Pull requests are also tested for their compatibility with packages distributed by OpenTelemetry in the [OpenTelemetry Python Contrib Repository](https://github.com/open-telemetry/opentelemetry-python.git). If a pull request (PR) introduces a change that would break the compatibility of these packages with the Core packages in this repo, a separate PR should be opened in the Contrib repo with changes to make the packages compatible. Follow these steps: 1. Open Core repo PR (Contrib Tests will fail) 2. Open Contrib repo PR and modify its `CORE_REPO_SHA` in `.github/workflows/test_x.yml` to equal the commit SHA of the Core repo PR to pass tests 3. Modify the Core repo PR `CONTRIB_REPO_SHA` in `.github/workflows/test_x.yml` to equal the commit SHA of the Contrib repo PR to pass Contrib repo tests (a sanity check for the Maintainers & Approvers) 4. Merge the Contrib repo 5. Restore the Core repo PR `CONTRIB_REPO_SHA` to point to `main` 6. Merge the Core repo PR ### How to Receive Comments * If the PR is not ready for review, please put `[WIP]` in the title, tag it as `work-in-progress`, or mark it as [`draft`](https://github.blog/2019-02-14-introducing-draft-pull-requests/). * Make sure CLA is signed and CI is clear. ### How to Get PRs Merged A PR is considered to be **ready to merge** when: * It has received two approvals from [Approvers](https://github.com/open-telemetry/community/blob/main/community-membership.md#approver) / [Maintainers](https://github.com/open-telemetry/community/blob/main/community-membership.md#maintainer) (at different companies). * Major feedbacks are resolved. * All tests are passing, including Contrib Repo tests which may require updating the GitHub workflow to reference a PR in the Contrib repo * It has been open for review for at least one working day. This gives people reasonable time to review. * Trivial change (typo, cosmetic, doc, etc.) doesn't have to wait for one day. * Urgent fix can take exception as long as it has been actively communicated. #### Allow edits from maintainers Something _very important_ is to allow edits from maintainers when opening a PR. This will allow maintainers to rebase your PR against `main` which is necessary in order to merge your PR. You could do it yourself too, but keep in mind that every time another PR gets merged, your PR will require rebasing. Since only maintainers can merge your PR it is almost impossible for maintainers to find your PR just when it has been rebased by you so that it can be merged. Allowing maintainers to edit your PR also allows them to help you get your PR merged by making any minor fixes to solve any issue that while being unrelated to your PR, can still happen. #### Fork from a personal Github account Right now Github [does not allow](https://github.com/orgs/community/discussions/5634) PRs to be edited by maintainers if the corresponding repo fork exists in a Github organization. Please fork this repo in a personal Github account instead. One of the maintainers will merge the PR once it is **ready to merge**. ## Design Choices As with other OpenTelemetry clients, opentelemetry-python follows the [opentelemetry-specification](https://github.com/open-telemetry/opentelemetry-specification). It's especially valuable to read through the [library guidelines](https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/library-guidelines.md). ### Focus on Capabilities, Not Structure Compliance OpenTelemetry is an evolving specification, one where the desires and use cases are clear, but the method to satisfy those uses cases are not. As such, contributions should provide functionality and behavior that conforms to the specification, but the interface and structure is flexible. It is preferable to have contributions follow the idioms of the language rather than conform to specific API names or argument patterns in the spec. For a deeper discussion, see: https://github.com/open-telemetry/opentelemetry-specification/issues/165 ### Environment Variables If you are adding a component that introduces new OpenTelemetry environment variables, put them all in a module, as it is done in `opentelemetry.environment_variables` or in `opentelemetry.sdk.environment_variables`. Keep in mind that any new environment variable must be declared in all caps and must start with `OTEL_PYTHON_`. Register this module with the `opentelemetry_environment_variables` entry point to make your environment variables automatically load as options for the `opentelemetry-instrument` command. ## Style Guide * docstrings should adhere to the [Google Python Style Guide](http://google.github.io/styleguide/pyguide.html#38-comments-and-docstrings) as specified with the [napoleon extension](http://www.sphinx-doc.org/en/master/usage/extensions/napoleon.html#google-vs-numpy) extension in [Sphinx](http://www.sphinx-doc.org/en/master/index.html). ## Updating supported Python versions ### Bumping the Python baseline When updating the minimum supported Python version remember to: - Remove the version in `pyproject.toml` trove classifiers - Remove the version from `tox.ini` - Update github workflows accordingly with `tox -e generate-workflows` - Search for `sys.version_info` usage and remove code for unsupported versions - Bump `py-version` in `.pylintrc` for Python version dependent checks ### Adding support for a new Python release When adding support for a new Python release remember to: - Add the version in `tox.ini` - Add the version in `pyproject.toml` trove classifiers - Update github workflows accordingly with `tox -e generate-workflows`; lint and benchmarks use the latest supported version - Update `.pre-commit-config.yaml` - Update tox examples in the documentation ## Contributions that involve new packages As part of an effort to mitigate namespace squatting on Pypi, please ensure to check whether a package name has been taken already on Pypi before contributing a new package. Contact a maintainer, bring the issue up in the weekly Python SIG or create a ticket in Pypi if a desired name has already been taken. python-opentelemetry-1.39.1/LICENSE000066400000000000000000000261351511654350100170350ustar00rootroot00000000000000 Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. Copyright [yyyy] [name of copyright owner] Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. python-opentelemetry-1.39.1/README.md000066400000000000000000000172341511654350100173070ustar00rootroot00000000000000# OpenTelemetry Python [![Slack](https://img.shields.io/badge/slack-@cncf/otel/python-brightgreen.svg?logo=slack)](https://cloud-native.slack.com/archives/C01PD4HUVBL) [![Build Status 0](https://github.com/open-telemetry/opentelemetry-python/actions/workflows/test_0.yml/badge.svg?branch=main)](https://github.com/open-telemetry/opentelemetry-python/actions/workflows/test_0.yml) [![Minimum Python Version](https://img.shields.io/badge/python-3.9+-blue.svg)](https://www.python.org/downloads/) [![Release](https://img.shields.io/github/v/release/open-telemetry/opentelemetry-python?include_prereleases&style=)](https://github.com/open-telemetry/opentelemetry-python/releases/) [![Read the Docs](https://readthedocs.org/projects/opentelemetry-python/badge/?version=latest)](https://opentelemetry-python.readthedocs.io/en/latest/) [![OpenSSF Best Practices](https://www.bestpractices.dev/projects/11060/badge)](https://www.bestpractices.dev/projects/11060) ## Project Status See the [OpenTelemetry Instrumentation for Python](https://opentelemetry.io/docs/instrumentation/python/#status-and-releases). | Signal | Status | Project | | ------- | ------------ | ------- | | Traces | Stable | N/A | | Metrics | Stable | N/A | | Logs | Development* | N/A | Project versioning information and stability guarantees can be found [here](./rationale.md#versioning-and-releasing). ***Breaking Changes** > [!IMPORTANT] > We are working on stabilizing the Log signal which would require making deprecations and breaking changes. We will try to reduce the releases that may require an update to your code, especially for instrumentations or for SDK developers. ## Getting started You can find the getting started guide for OpenTelemetry Python [here](https://opentelemetry.io/docs/instrumentation/python/getting-started/). If you are looking for **examples** on how to use the OpenTelemetry API to instrument your code manually, or how to set up the OpenTelemetry Python SDK, see https://opentelemetry.io/docs/instrumentation/python/manual/. ## Python Version Support This project ensures compatibility with the current supported versions of the Python. As new Python versions are released, support for them is added and as old Python versions reach their end of life, support for them is removed. We add support for new Python versions no later than 3 months after they become stable. We remove support for old Python versions 6 months after they reach their [end of life](https://devguide.python.org/devcycle/#end-of-life-branches). ## Documentation The online documentation is available at https://opentelemetry-python.readthedocs.io/. To access the latest version of the documentation, see https://opentelemetry-python.readthedocs.io/en/latest/. ## Install This repository includes multiple installable packages. The `opentelemetry-api` package includes abstract classes and no-op implementations that comprise the OpenTelemetry API following the [OpenTelemetry specification](https://github.com/open-telemetry/opentelemetry-specification). The `opentelemetry-sdk` package is the reference implementation of the API. Libraries that produce telemetry data should only depend on `opentelemetry-api`, and defer the choice of the SDK to the application developer. Applications may depend on `opentelemetry-sdk` or another package that implements the API. The API and SDK packages are available on the Python Package Index (PyPI). You can install them via `pip` with the following commands: ```sh pip install opentelemetry-api pip install opentelemetry-sdk ``` The [`exporter/`](https://github.com/open-telemetry/opentelemetry-python/tree/main/exporter) directory includes OpenTelemetry exporter packages. You can install the packages separately with the following command: ```sh pip install opentelemetry-exporter-{exporter} ``` The [`propagator/`](https://github.com/open-telemetry/opentelemetry-python/tree/main/propagator) directory includes OpenTelemetry propagator packages. You can install the packages separately with the following command: ```sh pip install opentelemetry-propagator-{propagator} ``` To install the development versions of these packages instead, clone or fork this repository and perform an [editable install](https://pip.pypa.io/en/stable/reference/pip_install/#editable-installs): ```sh pip install -e ./opentelemetry-api -e ./opentelemetry-sdk -e ./opentelemetry-semantic-conventions ``` For additional exporter and instrumentation packages, see the [`opentelemetry-python-contrib`](https://github.com/open-telemetry/opentelemetry-python-contrib) repository. ## Contributing For information about contributing to OpenTelemetry Python, see [CONTRIBUTING.md](CONTRIBUTING.md). We meet weekly on Thursdays at 9AM PST. The meeting is subject to change depending on contributors' availability. Check the [OpenTelemetry community calendar](https://calendar.google.com/calendar/embed?src=c_2bf73e3b6b530da4babd444e72b76a6ad893a5c3f43cf40467abc7a9a897f977%40group.calendar.google.com) for specific dates and Zoom meeting links. Meeting notes are available as a public [Google doc](https://docs.google.com/document/d/1CIMGoIOZ-c3-igzbd6_Pnxx1SjAkjwqoYSUWxPY8XIs/edit). ### Maintainers - [Aaron Abbott](https://github.com/aabmass), Google - [Leighton Chen](https://github.com/lzchen), Microsoft - [Riccardo Magliocchetti](https://github.com/xrmx), Elastic For more information about the maintainer role, see the [community repository](https://github.com/open-telemetry/community/blob/main/guides/contributor/membership.md#maintainer). ### Approvers - [Dylan Russell](https://github.com/dylanrussell), Google - [Emídio Neto](https://github.com/emdneto), PicPay - [Jeremy Voss](https://github.com/jeremydvoss), Microsoft - [Liudmila Molkova](https://github.com/lmolkova), Grafana Labs - [Owais Lone](https://github.com/owais), Splunk - [Pablo Collins](https://github.com/pmcollins), Splunk - [Shalev Roda](https://github.com/shalevr), Cisco - [Srikanth Chekuri](https://github.com/srikanthccv), signoz.io - [Tammy Baylis](https://github.com/tammy-baylis-swi), SolarWinds For more information about the approver role, see the [community repository](https://github.com/open-telemetry/community/blob/main/guides/contributor/membership.md#approver). ### Emeritus Maintainers - [Alex Boten](https://github.com/codeboten) - [Chris Kleinknecht](https://github.com/c24t) - [Diego Hurtado](https://github.com/ocelotl) - [Owais Lone](https://github.com/owais) - [Reiley Yang](https://github.com/reyang) - [Srikanth Chekuri](https://github.com/srikanthccv) - [Yusuke Tsutsumi](https://github.com/toumorokoshi) For more information about the emeritus role, see the [community repository](https://github.com/open-telemetry/community/blob/main/guides/contributor/membership.md#emeritus-maintainerapprovertriager). ### Emeritus Approvers - [Ashutosh Goel](https://github.com/ashu658) - [Carlos Alberto Cortez](https://github.com/carlosalberto) - [Christian Neumüller](https://github.com/Oberon00) - [Héctor Hernández](https://github.com/hectorhdzg) - [Mauricio Vásquez](https://github.com/mauriciovasquezbernal) - [Nathaniel Ruiz Nowell](https://github.com/NathanielRN) - [Nikolay Sokolik](https://github.com/oxeye-nikolay) - [Sanket Mehta](https://github.com/sanketmehta28) - [Tahir H. Butt](https://github.com/majorgreys) For more information about the emeritus role, see the [community repository](https://github.com/open-telemetry/community/blob/main/guides/contributor/membership.md#emeritus-maintainerapprovertriager). ### Thanks to all of our contributors!
Repo contributors python-opentelemetry-1.39.1/RELEASING.md000066400000000000000000000156741511654350100176710ustar00rootroot00000000000000# Release instructions ## Preparing a new major or minor release * Run the [Prepare release branch workflow](https://github.com/open-telemetry/opentelemetry-python/actions/workflows/prepare-release-branch.yml). * Press the "Run workflow" button, and leave the default branch `main` selected. * If making a pre-release of stable components (e.g. release candidate), enter the pre-release version number, e.g. `1.9.0rc2`. (otherwise the workflow will pick up the version from `main` and just remove the `.dev` suffix). * Review the two pull requests that it creates. (one is targeted to the release branch and one is targeted to `main`). * The builds will fail for the release PR because of validation rules. Follow the [release workflow](https://github.com/open-telemetry/opentelemetry-python-contrib/blob/main/RELEASING.md) for the contrib repo up until this same point. * Close and reopen the PR so that the workflow will take into account the label automation we have in place * Release builds now should pass. * Merge the release PR. * Merge the PR to main (this can be done separately from [making the release](#making-the-release)) ## Preparing a new patch release * Backport pull request(s) to the release branch. * Run the [Backport workflow](https://github.com/open-telemetry/opentelemetry-python/actions/workflows/backport.yml). * Press the "Run workflow" button, then select the release branch from the dropdown list, e.g. `release/v1.9.x`, then enter the pull request number that you want to backport, then click the "Run workflow" button below that. * Add the label `backport` to the generated pull request. * In case label automation doesn't work, just close and reopen the PR so that the workflow will take into account the label automation we have in place. * Review and merge the backport pull request that it generates. * Merge a pull request to the release branch updating the `CHANGELOG.md`. * The heading for the unreleased entries should be `## Unreleased`. * Run the [Prepare patch release workflow](https://github.com/open-telemetry/opentelemetry-python/actions/workflows/prepare-patch-release.yml). * Press the "Run workflow" button, then select the release branch from the dropdown list, e.g. `release/v1.9.x`, and click the "Run workflow" button below that. * Review and merge the pull request that it creates for updating the version. * Note: If you are doing a patch release in `-core` repo, you should also do an equivalent patch release in `-contrib` repo (even if there's no fix to release), otherwise tests in CI will fail. ## Making the release * Run the [Release workflow](https://github.com/open-telemetry/opentelemetry-python/actions/workflows/release.yml). * Press the "Run workflow" button, then select the release branch from the dropdown list, e.g. `release/v1.9.x`, and click the "Run workflow" button below that. * This workflow will publish the artifacts and publish a GitHub release with release notes based on the change log. * Verify that a new [Github release](https://github.com/open-telemetry/opentelemetry-python/releases) has been created and that the CHANGELOGs look correct. ## After the release * Check PyPI * This should be handled automatically on release by the [publish action](https://github.com/open-telemetry/opentelemetry-python/blob/main/.github/workflows/release.yml). * Check the [action logs](https://github.com/open-telemetry/opentelemetry-python/actions?query=workflow%3APublish) to make sure packages have been uploaded to PyPI * Check the release history (e.g. https://pypi.org/project/opentelemetry-api/#history) on PyPI * If for some reason the action failed, see [Publish failed](#publish-failed) below * Move stable tag and kick-off documentation build * Run the following (TODO automate): ```bash git tag -d stable git tag stable git push --delete origin stable git push origin tag stable ``` * ReadTheDocs will not automatically rebuild on tag changes, so manually kick-off a build of stable: https://readthedocs.org/projects/opentelemetry-python/builds/. ![ReadTheDocs build instructions](.github/rtd-build-instructions.png) * This will ensure that ReadTheDocs for core are pointing at the stable release. ## Notes about version numbering for stable components * The version number for stable components in the `main` branch is always `X.Y.0.dev`, where `X.Y.0` represents the next minor release. * When the release branch is created, you can opt to make a "pre-release", e.g. `X.Y.0rc2`. * If you ARE NOT making a "pre-release": * A "long-term" release branch will be created, e.g. `release/v1.9.x-0.21bx` (notice the wildcard x's). Later on, after the initial release, you can backport PRs to a "long-term" release branch and make patch releases from it. * The version number for stable components in the release branch will be bumped to remove the `.dev`, e.g. `X.Y.0`. * The version number for stable components in the `main` branch will be bumped to the next version, e.g. `X.{Y+1}.0.dev`. * If you ARE making a "pre-release": * A "short-term" release branch will be created, e.g. `release/v1.9.0rc2-0.21b0` (notice the precise version with no wildcard x's). "Short-term" release branches do not support backports or patch releases after the initial release. * The version number for stable components in the `main` branch will not be bumped, e.g. it will remain `X.Y.0.dev` since the next minor release will still be `X.Y.0`. ## Notes about version numbering for unstable components * The version number for unstable components in the `main` branch is always `0.Yb0.dev`, where `0.Yb0` represents the next minor release. * _Question: Is "b" (beta) redundant on "0." releases, or is this a python thing? I'm wondering if we can change it to `0.Y.0` to match up with the practice in js and go repos._ * Unstable components do not need "pre-releases", and so whether or not you are making a "pre-release" of stable components: * The version number for unstable components in the release branch will be bumped to remove the `.dev`, e.g. `0.Yb0`. * The version number for unstable components in the `main` branch will be bumped to the next version, e.g. `0.{Y+1}b0.dev`. ## Releasing dev version of new packages to claim namespace When a contribution introduces a new package, in order to mitigate name-squatting incidents, release the current development version of the new package under the `opentelemetry` user to simply claim the namespace. This should be done shortly after the PR that introduced this package has been merged into `main`. ## Troubleshooting ### Publish failed If for some reason the action failed, do it manually: - Switch to the release branch (important so we don't publish packages with "dev" versions) - Build distributions with `./scripts/build.sh` - Delete distributions we don't want to push (e.g. `testutil`) - Push to PyPI as `twine upload --skip-existing --verbose dist/*` - Double check PyPI! python-opentelemetry-1.39.1/dev-requirements.txt000066400000000000000000000007151511654350100220640ustar00rootroot00000000000000pylint==3.3.4 httpretty==1.1.4 pyright==1.1.405 sphinx==7.1.2 sphinx-rtd-theme==2.0.0rc4 sphinx-autodoc-typehints==1.25.2 pytest==7.4.4 pytest-cov==4.1.0 readme-renderer==42.0 markupsafe==2.1.3 bleach==4.1.0 # This dependency was updated to a breaking version. codespell==2.1.0 requests==2.32.3 ruamel.yaml==0.17.21 asgiref==3.7.2 psutil==5.9.6 GitPython==3.1.41 pre-commit==3.7.0; python_version >= '3.9' pre-commit==3.5.0; python_version < '3.9' ruff==0.14.1 python-opentelemetry-1.39.1/docs-requirements.txt000066400000000000000000000014321511654350100222330ustar00rootroot00000000000000sphinx==7.1.2 sphinx-rtd-theme==2.0.0rc4 sphinx-autodoc-typehints==1.25.2 # used to generate docs for the website sphinx-jekyll-builder==0.3.0 # Need to install the api/sdk in the venv for autodoc. Modifying sys.path # doesn't work for pkg_resources. ./opentelemetry-api ./opentelemetry-semantic-conventions ./opentelemetry-sdk ./opentelemetry-proto ./shim/opentelemetry-opencensus-shim ./shim/opentelemetry-opentracing-shim ./exporter/opentelemetry-exporter-otlp-proto-common ./exporter/opentelemetry-exporter-otlp-proto-http ./exporter/opentelemetry-exporter-otlp-proto-grpc # Required by instrumentation and exporter packages grpcio~=1.27 Deprecated~=1.2 django~=4.2 flask~=2.3 opentracing~=2.2.0 thrift~=0.10 wrapt>=1.0.0,<2.0.0 markupsafe~=2.0 protobuf==5.29.5 prometheus-client~=0.22.1 python-opentelemetry-1.39.1/docs/000077500000000000000000000000001511654350100167515ustar00rootroot00000000000000python-opentelemetry-1.39.1/docs/Makefile000066400000000000000000000011051511654350100204060ustar00rootroot00000000000000# Minimal makefile for Sphinx documentation # # You can set these variables from the command line. SPHINXOPTS = SPHINXBUILD = sphinx-build SOURCEDIR = . BUILDDIR = _build # Put it first so that "make" without argument is like "make help". help: @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) .PHONY: help Makefile # Catch-all target: route all unknown targets to Sphinx using the new # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). %: Makefile @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) python-opentelemetry-1.39.1/docs/api/000077500000000000000000000000001511654350100175225ustar00rootroot00000000000000python-opentelemetry-1.39.1/docs/api/_logs.rst000066400000000000000000000002701511654350100213560ustar00rootroot00000000000000opentelemetry._logs package ============================= Submodules ---------- .. toctree:: _logs.severity Module contents --------------- .. automodule:: opentelemetry._logs python-opentelemetry-1.39.1/docs/api/_logs.severity.rst000066400000000000000000000001471511654350100232320ustar00rootroot00000000000000opentelemetry._logs.severity ============================ .. automodule:: opentelemetry._logs.severitypython-opentelemetry-1.39.1/docs/api/baggage.propagation.rst000066400000000000000000000002631511654350100241540ustar00rootroot00000000000000opentelemetry.baggage.propagation package ==================================================== Module contents --------------- .. automodule:: opentelemetry.baggage.propagation python-opentelemetry-1.39.1/docs/api/baggage.rst000066400000000000000000000003161511654350100216310ustar00rootroot00000000000000opentelemetry.baggage package ======================================== Subpackages ----------- .. toctree:: baggage.propagation Module contents --------------- .. automodule:: opentelemetry.baggage python-opentelemetry-1.39.1/docs/api/context.context.rst000066400000000000000000000002761511654350100234300ustar00rootroot00000000000000opentelemetry.context.base\_context module ========================================== .. automodule:: opentelemetry.context.context :members: :undoc-members: :show-inheritance: python-opentelemetry-1.39.1/docs/api/context.rst000066400000000000000000000002751511654350100217440ustar00rootroot00000000000000opentelemetry.context package ============================= Submodules ---------- .. toctree:: context.context Module contents --------------- .. automodule:: opentelemetry.context python-opentelemetry-1.39.1/docs/api/environment_variables.rst000066400000000000000000000002561511654350100246530ustar00rootroot00000000000000opentelemetry.environment_variables package =========================================== Module contents --------------- .. automodule:: opentelemetry.environment_variables python-opentelemetry-1.39.1/docs/api/index.rst000066400000000000000000000003341511654350100213630ustar00rootroot00000000000000OpenTelemetry Python API ======================== .. TODO: what is the API .. toctree:: :maxdepth: 1 _logs baggage context propagate propagators trace metrics environment_variables python-opentelemetry-1.39.1/docs/api/metrics.rst000066400000000000000000000002231511654350100217170ustar00rootroot00000000000000opentelemetry.metrics package ============================= .. toctree:: Module contents --------------- .. automodule:: opentelemetry.metrics python-opentelemetry-1.39.1/docs/api/propagate.rst000066400000000000000000000002231511654350100222330ustar00rootroot00000000000000opentelemetry.propagate package ======================================== Module contents --------------- .. automodule:: opentelemetry.propagate python-opentelemetry-1.39.1/docs/api/propagators.composite.rst000066400000000000000000000002571511654350100246220ustar00rootroot00000000000000opentelemetry.propagators.composite ==================================================== Module contents --------------- .. automodule:: opentelemetry.propagators.composite python-opentelemetry-1.39.1/docs/api/propagators.rst000066400000000000000000000002431511654350100226140ustar00rootroot00000000000000opentelemetry.propagators package ======================================== Subpackages ----------- .. toctree:: propagators.textmap propagators.composite python-opentelemetry-1.39.1/docs/api/propagators.textmap.rst000066400000000000000000000002531511654350100242760ustar00rootroot00000000000000opentelemetry.propagators.textmap ==================================================== Module contents --------------- .. automodule:: opentelemetry.propagators.textmap python-opentelemetry-1.39.1/docs/api/trace.rst000066400000000000000000000003011511654350100213440ustar00rootroot00000000000000opentelemetry.trace package =========================== Submodules ---------- .. toctree:: trace.status trace.span Module contents --------------- .. automodule:: opentelemetry.tracepython-opentelemetry-1.39.1/docs/api/trace.span.rst000066400000000000000000000002251511654350100223110ustar00rootroot00000000000000opentelemetry.trace.span ======================== .. automodule:: opentelemetry.trace.span :members: :undoc-members: :show-inheritance: python-opentelemetry-1.39.1/docs/api/trace.status.rst000066400000000000000000000002331511654350100226720ustar00rootroot00000000000000opentelemetry.trace.status ========================== .. automodule:: opentelemetry.trace.status :members: :undoc-members: :show-inheritance: python-opentelemetry-1.39.1/docs/conf.py000066400000000000000000000174331511654350100202600ustar00rootroot00000000000000# Configuration file for the Sphinx documentation builder. # # This file only contains a selection of the most common options. For a full # list see the documentation: # http://www.sphinx-doc.org/en/master/config # -- Path setup -------------------------------------------------------------- # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. import os import sys from os import listdir from os.path import isdir, join # configure django to avoid the following exception: # django.core.exceptions.ImproperlyConfigured: Requested settings, but settings # are not configured. You must either define the environment variable # DJANGO_SETTINGS_MODULE or call settings.configure() before accessing settings. from django.conf import settings settings.configure() source_dirs = [ os.path.abspath("../opentelemetry-instrumentation/src/"), ] exp = "../exporter" exp_dirs = [ os.path.abspath("/".join(["../exporter", f, "src"])) for f in listdir(exp) if isdir(join(exp, f)) ] shim = "../shim" shim_dirs = [ os.path.abspath("/".join(["../shim", f, "src"])) for f in listdir(shim) if isdir(join(shim, f)) ] sys.path[:0] = source_dirs + exp_dirs + shim_dirs # -- Project information ----------------------------------------------------- project = "OpenTelemetry Python" copyright = "OpenTelemetry Authors" # pylint: disable=redefined-builtin author = "OpenTelemetry Authors" # -- General configuration --------------------------------------------------- # Easy automatic cross-references for `code in backticks` default_role = "any" # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ # API doc generation "sphinx.ext.autodoc", # Support for google-style docstrings "sphinx.ext.napoleon", # Infer types from hints instead of docstrings "sphinx_autodoc_typehints", # Add links to source from generated docs "sphinx.ext.viewcode", # Link to other sphinx docs "sphinx.ext.intersphinx", # Add a .nojekyll file to the generated HTML docs # https://help.github.com/en/articles/files-that-start-with-an-underscore-are-missing "sphinx.ext.githubpages", # Support external links to different versions in the Github repo "sphinx.ext.extlinks", ] intersphinx_mapping = { "python": ("https://docs.python.org/3/", None), "opentracing": ( "https://opentracing-python.readthedocs.io/en/latest/", None, ), "aiohttp": ("https://aiohttp.readthedocs.io/en/stable/", None), "wrapt": ("https://wrapt.readthedocs.io/en/latest/", None), "pymongo": ("https://pymongo.readthedocs.io/en/stable/", None), "grpc": ("https://grpc.github.io/grpc/python/", None), } # http://www.sphinx-doc.org/en/master/config.html#confval-nitpicky # Sphinx will warn about all references where the target cannot be found. nitpicky = True # Sphinx does not recognize generic type TypeVars # Container supposedly were fixed, but does not work # https://github.com/sphinx-doc/sphinx/pull/3744 nitpick_ignore = [ ("py:class", "ValueT"), ("py:class", "CarrierT"), ("py:obj", "opentelemetry.propagators.textmap.CarrierT"), ("py:obj", "Union"), ("py:data", "typing.Union"), ( "py:class", "opentelemetry.sdk.metrics._internal.instrument._Synchronous", ), ( "py:class", "opentelemetry.sdk.metrics._internal.instrument._Asynchronous", ), # Even if wrapt is added to intersphinx_mapping, sphinx keeps failing # with "class reference target not found: ObjectProxy". ("py:class", "ObjectProxy"), ( "py:class", "opentelemetry.trace._LinkBase", ), ( "py:class", "opentelemetry.proto.collector.trace.v1.trace_service_pb2_grpc.TraceServiceStub", ), ( "py:class", "opentelemetry.proto.collector.metrics.v1.metrics_service_pb2_grpc.MetricsServiceStub", ), ( "py:class", "opentelemetry.proto.collector.logs.v1.logs_service_pb2_grpc.LogsServiceStub", ), ( "py:class", "opentelemetry.exporter.otlp.proto.grpc.exporter.OTLPExporterMixin", ), ( "py:class", "opentelemetry.proto.collector.trace.v1.trace_service_pb2.ExportTraceServiceRequest", ), ( "py:class", "opentelemetry.exporter.otlp.proto.common._internal.metrics_encoder.OTLPMetricExporterMixin", ), ("py:class", "opentelemetry.proto.resource.v1.resource_pb2.Resource"), ( "py:class", "opentelemetry.proto.collector.metrics.v1.metrics_service_pb2.ExportMetricsServiceRequest", ), ("py:class", "opentelemetry.sdk._logs._internal.export.LogRecordExporter"), ( "py:class", "opentelemetry.sdk._logs._internal.export.LogRecordExportResult", ), ( "py:class", "opentelemetry.proto.collector.logs.v1.logs_service_pb2.ExportLogsServiceRequest", ), ( "py:class", "opentelemetry.sdk.metrics._internal.exemplar.exemplar_reservoir.FixedSizeExemplarReservoirABC", ), ( "py:class", "opentelemetry.sdk.metrics._internal.exemplar.exemplar.Exemplar", ), ( "py:class", "opentelemetry.sdk.metrics._internal.aggregation._Aggregation", ), ( "py:class", "_contextvars.Token", ), ( "py:class", "AnyValue", ), ] # Add any paths that contain templates here, relative to this directory. templates_path = ["_templates"] # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. # This pattern also affects html_static_path and html_extra_path. exclude_patterns = [ "_build", "Thumbs.db", ".DS_Store", "examples/fork-process-model/flask-gunicorn", "examples/fork-process-model/flask-uwsgi", "examples/error_handler/error_handler_0", "examples/error_handler/error_handler_1", ] _exclude_members = ["_abc_impl"] autodoc_default_options = { "members": True, "undoc-members": True, "show-inheritance": True, "member-order": "bysource", "exclude-members": ",".join(_exclude_members), } # -- Options for HTML output ------------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. # html_theme = "sphinx_rtd_theme" # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = [] # Support external links to specific versions of the files in the Github repo branch = os.environ.get("READTHEDOCS_VERSION") if branch is None or branch == "latest": branch = "main" REPO = "open-telemetry/opentelemetry-python/" scm_raw_web = "https://raw.githubusercontent.com/" + REPO + branch scm_web = "https://github.com/" + REPO + "blob/" + branch # Store variables in the epilogue so they are globally available. rst_epilog = """ .. |SCM_WEB| replace:: {s} .. |SCM_RAW_WEB| replace:: {sr} .. |SCM_BRANCH| replace:: {b} """.format(s=scm_web, sr=scm_raw_web, b=branch) # used to have links to repo files extlinks = { "scm_raw_web": (scm_raw_web + "/%s", "scm_raw_web"), "scm_web": (scm_web + "/%s", "scm_web"), } def on_missing_reference(app, env, node, contnode): # FIXME Remove when opentelemetry.metrics._Gauge is renamed to # opentelemetry.metrics.Gauge if node["reftarget"] == "opentelemetry.metrics.Gauge": return contnode def setup(app): app.connect("missing-reference", on_missing_reference) python-opentelemetry-1.39.1/docs/examples/000077500000000000000000000000001511654350100205675ustar00rootroot00000000000000python-opentelemetry-1.39.1/docs/examples/auto-instrumentation/000077500000000000000000000000001511654350100250005ustar00rootroot00000000000000python-opentelemetry-1.39.1/docs/examples/auto-instrumentation/README.rst000066400000000000000000000004131511654350100264650ustar00rootroot00000000000000Auto-instrumentation ==================== To learn about automatic instrumentation and how to run the example in this directory, see `Automatic Instrumentation`_. .. _Automatic Instrumentation: https://opentelemetry.io/docs/instrumentation/python/automatic/example python-opentelemetry-1.39.1/docs/examples/auto-instrumentation/client.py000066400000000000000000000027751511654350100266430ustar00rootroot00000000000000# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import sys from requests import get from opentelemetry import trace from opentelemetry.propagate import inject from opentelemetry.sdk.trace import TracerProvider from opentelemetry.sdk.trace.export import ( BatchSpanProcessor, ConsoleSpanExporter, ) trace.set_tracer_provider(TracerProvider()) tracer = trace.get_tracer_provider().get_tracer(__name__) trace.get_tracer_provider().add_span_processor( BatchSpanProcessor(ConsoleSpanExporter()) ) # Get parameter from command line argument or use default value "testing" param_value = sys.argv[1] if len(sys.argv) > 1 else "testing" with tracer.start_as_current_span("client"): with tracer.start_as_current_span("client-server"): headers = {} inject(headers) requested = get( "http://localhost:8082/server_request", params={"param": param_value}, headers=headers, ) assert requested.status_code == 200 python-opentelemetry-1.39.1/docs/examples/auto-instrumentation/server_automatic.py000066400000000000000000000014441511654350100307310ustar00rootroot00000000000000# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from flask import Flask, request app = Flask(__name__) @app.route("/server_request") def server_request(): print(request.args.get("param")) return "served" if __name__ == "__main__": app.run(port=8082) python-opentelemetry-1.39.1/docs/examples/auto-instrumentation/server_manual.py000066400000000000000000000030401511654350100302120ustar00rootroot00000000000000# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from flask import Flask, request from opentelemetry.instrumentation.wsgi import collect_request_attributes from opentelemetry.propagate import extract from opentelemetry.sdk.trace import TracerProvider from opentelemetry.sdk.trace.export import ( BatchSpanProcessor, ConsoleSpanExporter, ) from opentelemetry.trace import ( SpanKind, get_tracer_provider, set_tracer_provider, ) app = Flask(__name__) set_tracer_provider(TracerProvider()) tracer = get_tracer_provider().get_tracer(__name__) get_tracer_provider().add_span_processor( BatchSpanProcessor(ConsoleSpanExporter()) ) @app.route("/server_request") def server_request(): with tracer.start_as_current_span( "server_request", context=extract(request.headers), kind=SpanKind.SERVER, attributes=collect_request_attributes(request.environ), ): print(request.args.get("param")) return "served" if __name__ == "__main__": app.run(port=8082) python-opentelemetry-1.39.1/docs/examples/auto-instrumentation/server_programmatic.py000066400000000000000000000025161511654350100314310ustar00rootroot00000000000000# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from flask import Flask, request from opentelemetry.instrumentation.flask import FlaskInstrumentor from opentelemetry.sdk.trace import TracerProvider from opentelemetry.sdk.trace.export import ( BatchSpanProcessor, ConsoleSpanExporter, ) from opentelemetry.trace import get_tracer_provider, set_tracer_provider set_tracer_provider(TracerProvider()) get_tracer_provider().add_span_processor( BatchSpanProcessor(ConsoleSpanExporter()) ) instrumentor = FlaskInstrumentor() app = Flask(__name__) instrumentor.instrument_app(app) # instrumentor.instrument_app(app, excluded_urls="/server_request") @app.route("/server_request") def server_request(): print(request.args.get("param")) return "served" if __name__ == "__main__": app.run(port=8082) python-opentelemetry-1.39.1/docs/examples/basic_context/000077500000000000000000000000001511654350100234145ustar00rootroot00000000000000python-opentelemetry-1.39.1/docs/examples/basic_context/README.rst000066400000000000000000000015141511654350100251040ustar00rootroot00000000000000Basic Context ============= These examples show how context is propagated through Spans in OpenTelemetry. There are three different examples: * implicit_context: Shows how starting a span implicitly creates context. * child_context: Shows how context is propagated through child spans. * async_context: Shows how context can be shared in another coroutine. The source files of these examples are available :scm_web:`here `. Installation ------------ .. code-block:: sh pip install opentelemetry-api pip install opentelemetry-sdk Run the Example --------------- .. code-block:: sh python .py The output will be shown in the console. Useful links ------------ - OpenTelemetry_ - :doc:`../../api/trace` .. _OpenTelemetry: https://github.com/open-telemetry/opentelemetry-python/ python-opentelemetry-1.39.1/docs/examples/basic_context/async_context.py000066400000000000000000000021141511654350100266450ustar00rootroot00000000000000# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import asyncio from opentelemetry import baggage, trace from opentelemetry.sdk.trace import TracerProvider trace.set_tracer_provider(TracerProvider()) tracer = trace.get_tracer(__name__) loop = asyncio.get_event_loop() async def async_span(span): with trace.use_span(span): ctx = baggage.set_baggage("foo", "bar") return ctx async def main(): span = tracer.start_span(name="span") ctx = await async_span(span) print(baggage.get_all(context=ctx)) loop.run_until_complete(main()) python-opentelemetry-1.39.1/docs/examples/basic_context/child_context.py000066400000000000000000000022061511654350100266150ustar00rootroot00000000000000# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from opentelemetry import baggage, trace tracer = trace.get_tracer(__name__) global_ctx = baggage.set_baggage("context", "global") with tracer.start_as_current_span(name="root span") as root_span: parent_ctx = baggage.set_baggage("context", "parent") with tracer.start_as_current_span( name="child span", context=parent_ctx ) as child_span: child_ctx = baggage.set_baggage("context", "child") print(baggage.get_baggage("context", global_ctx)) print(baggage.get_baggage("context", parent_ctx)) print(baggage.get_baggage("context", child_ctx)) python-opentelemetry-1.39.1/docs/examples/basic_context/implicit_context.py000066400000000000000000000017201511654350100273440ustar00rootroot00000000000000# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from opentelemetry import baggage, trace from opentelemetry.sdk.trace import TracerProvider trace.set_tracer_provider(TracerProvider()) tracer = trace.get_tracer(__name__) with tracer.start_span(name="root span") as root_span: ctx = baggage.set_baggage("foo", "bar") print(f"Global context baggage: {baggage.get_all()}") print(f"Span context baggage: {baggage.get_all(context=ctx)}") python-opentelemetry-1.39.1/docs/examples/basic_tracer/000077500000000000000000000000001511654350100232105ustar00rootroot00000000000000python-opentelemetry-1.39.1/docs/examples/basic_tracer/README.rst000066400000000000000000000014241511654350100247000ustar00rootroot00000000000000Basic Trace =========== These examples show how to use OpenTelemetry to create and export Spans. There are two different examples: * basic_trace: Shows how to configure a SpanProcessor and Exporter, and how to create a tracer and span. * resources: Shows how to add resource information to a Provider. The source files of these examples are available :scm_web:`here `. Installation ------------ .. code-block:: sh pip install opentelemetry-api pip install opentelemetry-sdk Run the Example --------------- .. code-block:: sh python .py The output will be shown in the console. Useful links ------------ - OpenTelemetry_ - :doc:`../../api/trace` .. _OpenTelemetry: https://github.com/open-telemetry/opentelemetry-python/ python-opentelemetry-1.39.1/docs/examples/basic_tracer/basic_trace.py000066400000000000000000000017611511654350100260260ustar00rootroot00000000000000# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from opentelemetry import trace from opentelemetry.sdk.trace import TracerProvider from opentelemetry.sdk.trace.export import ( BatchSpanProcessor, ConsoleSpanExporter, ) trace.set_tracer_provider(TracerProvider()) trace.get_tracer_provider().add_span_processor( BatchSpanProcessor(ConsoleSpanExporter()) ) tracer = trace.get_tracer(__name__) with tracer.start_as_current_span("foo"): print("Hello world!") python-opentelemetry-1.39.1/docs/examples/basic_tracer/resources.py000066400000000000000000000022531511654350100255760ustar00rootroot00000000000000# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from opentelemetry import trace from opentelemetry.sdk.resources import Resource from opentelemetry.sdk.trace import TracerProvider from opentelemetry.sdk.trace.export import ( BatchSpanProcessor, ConsoleSpanExporter, ) # Use Resource.create() instead of constructor directly resource = Resource.create({"service.name": "basic_service"}) trace.set_tracer_provider(TracerProvider(resource=resource)) trace.get_tracer_provider().add_span_processor( BatchSpanProcessor(ConsoleSpanExporter()) ) tracer = trace.get_tracer(__name__) with tracer.start_as_current_span("foo"): print("Hello world!") python-opentelemetry-1.39.1/docs/examples/django/000077500000000000000000000000001511654350100220315ustar00rootroot00000000000000python-opentelemetry-1.39.1/docs/examples/django/README.rst000066400000000000000000000105131511654350100235200ustar00rootroot00000000000000Django Instrumentation ====================== This shows how to use ``opentelemetry-instrumentation-django`` to automatically instrument a Django app. The source files of these examples are available :scm_web:`here `. Preparation ----------- This example will be executed in a separate virtual environment: .. code-block:: $ mkdir django_auto_instrumentation $ virtualenv django_auto_instrumentation $ source django_auto_instrumentation/bin/activate Installation ------------ .. code-block:: $ pip install opentelemetry-sdk $ pip install opentelemetry-instrumentation-django $ pip install requests Execution --------- Execution of the Django app ........................... This example uses Django features intended for development environment. The ``runserver`` option should not be used for production environments. Set these environment variables first: #. ``export DJANGO_SETTINGS_MODULE=instrumentation_example.settings`` The way to achieve OpenTelemetry instrumentation for your Django app is to use an ``opentelemetry.instrumentation.django.DjangoInstrumentor`` to instrument the app. Clone the ``opentelemetry-python`` repository and go to ``opentelemetry-python/docs/examples/django``. Once there, open the ``manage.py`` file. The call to ``DjangoInstrumentor().instrument()`` in ``main`` is all that is needed to make the app be instrumented. Run the Django app with ``python manage.py runserver --noreload``. The ``--noreload`` flag is needed to avoid Django from running ``main`` twice. Execution of the client ....................... Open up a new console and activate the previous virtual environment there too: ``source django_auto_instrumentation/bin/activate`` Go to ``opentelemetry-python/docs/examples/django``, once there run the client with: ``python client.py hello`` Go to the previous console, where the Django app is running. You should see output similar to this one: .. code-block:: { "name": "home_page_view", "context": { "trace_id": "0xed88755c56d95d05a506f5f70e7849b9", "span_id": "0x0a94c7a60e0650d5", "trace_state": "{}" }, "kind": "SpanKind.SERVER", "parent_id": "0x3096ef92e621c22d", "start_time": "2020-04-26T01:49:57.205833Z", "end_time": "2020-04-26T01:49:57.206214Z", "status": { "status_code": "OK" }, "attributes": { "http.request.method": "GET", "server.address": "localhost", "url.scheme": "http", "server.port": 8000, "url.full": "http://localhost:8000/?param=hello", "server.socket.address": "127.0.0.1", "network.protocol.version": "1.1", "http.response.status_code": 200 }, "events": [], "links": [] } The last output shows spans automatically generated by the OpenTelemetry Django Instrumentation package. Disabling Django Instrumentation -------------------------------- Django's instrumentation can be disabled by setting the following environment variable: ``export OTEL_PYTHON_DJANGO_INSTRUMENT=False`` Auto Instrumentation -------------------- This same example can be run using auto instrumentation. Comment out the call to ``DjangoInstrumentor().instrument()`` in ``main``, then Run the django app with ``opentelemetry-instrument python manage.py runserver --noreload``. Repeat the steps with the client, the result should be the same. Usage with Auto Instrumentation and uWSGI ----------------------------------------- uWSGI and Django can be used together with auto instrumentation. To do so, first install uWSGI in the previous virtual environment: ``pip install uwsgi`` Once that is done, run the server with ``uwsgi`` from the directory that contains ``instrumentation_example``: ``opentelemetry-instrument uwsgi --http :8000 --module instrumentation_example.wsgi`` This should start one uWSGI worker in your console. Open up a browser and point it to ``localhost:8000``. This request should display a span exported in the server console. References ---------- * `Django `_ * `OpenTelemetry Project `_ * `OpenTelemetry Django extension `_ python-opentelemetry-1.39.1/docs/examples/django/client.py000066400000000000000000000025541511654350100236670ustar00rootroot00000000000000# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from sys import argv from requests import get from opentelemetry import trace from opentelemetry.propagate import inject from opentelemetry.sdk.trace import TracerProvider from opentelemetry.sdk.trace.export import ( BatchSpanProcessor, ConsoleSpanExporter, ) trace.set_tracer_provider(TracerProvider()) tracer = trace.get_tracer_provider().get_tracer(__name__) trace.get_tracer_provider().add_span_processor( BatchSpanProcessor(ConsoleSpanExporter()) ) with tracer.start_as_current_span("client"): with tracer.start_as_current_span("client-server"): headers = {} inject(headers) requested = get( "http://localhost:8000", params={"param": argv[1]}, headers=headers, ) assert requested.status_code == 200 python-opentelemetry-1.39.1/docs/examples/django/instrumentation_example/000077500000000000000000000000001511654350100270075ustar00rootroot00000000000000python-opentelemetry-1.39.1/docs/examples/django/instrumentation_example/__init__.py000066400000000000000000000000001511654350100311060ustar00rootroot00000000000000python-opentelemetry-1.39.1/docs/examples/django/instrumentation_example/asgi.py000066400000000000000000000017651511654350100303150ustar00rootroot00000000000000# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ ASGI config for instrumentation_example project. It exposes the ASGI callable as a module-level variable named ``application``. For more information on this file, see https://docs.djangoproject.com/en/3.0/howto/deployment/asgi/ """ import os from django.core.asgi import get_asgi_application os.environ.setdefault( "DJANGO_SETTINGS_MODULE", "instrumentation_example.settings" ) application = get_asgi_application() python-opentelemetry-1.39.1/docs/examples/django/instrumentation_example/settings.py000066400000000000000000000072131511654350100312240ustar00rootroot00000000000000# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Django settings for instrumentation_example project. Generated by "django-admin startproject" using Django 3.0.4. For more information on this file, see https://docs.djangoproject.com/en/3.0/topics/settings/ For the full list of settings and their values, see https://docs.djangoproject.com/en/3.0/ref/settings/ """ import os # Build paths inside the project like this: os.path.join(BASE_DIR, ...) BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) # Quick-start development settings - unsuitable for production # See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/ # SECURITY WARNING: keep the secret key used in production secret! SECRET_KEY = "it%*!=l2(fcawu=!m-06n&#j(iq2j#%$fu6)myi*b9i5ojk+6+" # SECURITY WARNING: don"t run with debug turned on in production! DEBUG = True ALLOWED_HOSTS = [] # Application definition INSTALLED_APPS = [ "django.contrib.admin", "django.contrib.auth", "django.contrib.contenttypes", "django.contrib.sessions", "django.contrib.messages", "django.contrib.staticfiles", ] MIDDLEWARE = [ "django.middleware.security.SecurityMiddleware", "django.contrib.sessions.middleware.SessionMiddleware", "django.middleware.common.CommonMiddleware", "django.middleware.csrf.CsrfViewMiddleware", "django.contrib.auth.middleware.AuthenticationMiddleware", "django.contrib.messages.middleware.MessageMiddleware", "django.middleware.clickjacking.XFrameOptionsMiddleware", ] ROOT_URLCONF = "instrumentation_example.urls" TEMPLATES = [ { "BACKEND": "django.template.backends.django.DjangoTemplates", "DIRS": [], "APP_DIRS": True, "OPTIONS": { "context_processors": [ "django.template.context_processors.debug", "django.template.context_processors.request", "django.contrib.auth.context_processors.auth", "django.contrib.messages.context_processors.messages", ], }, }, ] WSGI_APPLICATION = "instrumentation_example.wsgi.application" # Database # https://docs.djangoproject.com/en/3.0/ref/settings/#databases DATABASES = { "default": { "ENGINE": "django.db.backends.sqlite3", "NAME": os.path.join(BASE_DIR, "db.sqlite3"), } } # Password validation # https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators AUTH_PASSWORD_VALIDATORS = [ { "NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator", }, { "NAME": "django.contrib.auth.password_validation.MinimumLengthValidator", }, { "NAME": "django.contrib.auth.password_validation.CommonPasswordValidator", }, { "NAME": "django.contrib.auth.password_validation.NumericPasswordValidator", }, ] # Internationalization # https://docs.djangoproject.com/en/3.0/topics/i18n/ LANGUAGE_CODE = "en-us" TIME_ZONE = "UTC" USE_I18N = True USE_L10N = True USE_TZ = True # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/3.0/howto/static-files/ STATIC_URL = "/static/" python-opentelemetry-1.39.1/docs/examples/django/instrumentation_example/urls.py000066400000000000000000000025641511654350100303550ustar00rootroot00000000000000# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """instrumentation_example URL Configuration The `urlpatterns` list routes URLs to views. For more information please see: https://docs.djangoproject.com/en/3.0/topics/http/urls/ Examples: Function views 1. Add an import: from my_app import views 2. Add a URL to urlpatterns: path("", views.home, name="home") Class-based views 1. Add an import: from other_app.views import Home 2. Add a URL to urlpatterns: path("", Home.as_view(), name="home") Including another URLconf 1. Import the include() function: from django.urls import include, path 2. Add a URL to urlpatterns: path("blog/", include("blog.urls")) """ from django.contrib import admin from django.urls import include, path urlpatterns = [ path("admin/", admin.site.urls), path("", include("pages.urls")), ] python-opentelemetry-1.39.1/docs/examples/django/instrumentation_example/wsgi.py000066400000000000000000000017651511654350100303430ustar00rootroot00000000000000# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ WSGI config for instrumentation_example project. It exposes the WSGI callable as a module-level variable named ``application``. For more information on this file, see https://docs.djangoproject.com/en/3.0/howto/deployment/wsgi/ """ import os from django.core.wsgi import get_wsgi_application os.environ.setdefault( "DJANGO_SETTINGS_MODULE", "instrumentation_example.settings" ) application = get_wsgi_application() python-opentelemetry-1.39.1/docs/examples/django/manage.py000077500000000000000000000026671511654350100236510ustar00rootroot00000000000000#!/usr/bin/env python # Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Django"s command-line utility for administrative tasks.""" import os import sys from opentelemetry.instrumentation.django import DjangoInstrumentor def main(): os.environ.setdefault( "DJANGO_SETTINGS_MODULE", "instrumentation_example.settings" ) # This call is what makes the Django application be instrumented DjangoInstrumentor().instrument() try: from django.core.management import ( # noqa: PLC0415 execute_from_command_line, ) except ImportError as exc: raise ImportError( "Couldn't import Django. Are you sure it's installed and " "available on your PYTHONPATH environment variable? Did you " "forget to activate a virtual environment?" ) from exc execute_from_command_line(sys.argv) if __name__ == "__main__": main() python-opentelemetry-1.39.1/docs/examples/django/pages/000077500000000000000000000000001511654350100231305ustar00rootroot00000000000000python-opentelemetry-1.39.1/docs/examples/django/pages/__init__.py000066400000000000000000000011671511654350100252460ustar00rootroot00000000000000# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. default_app_config = "pages.apps.PagesConfig" python-opentelemetry-1.39.1/docs/examples/django/pages/apps.py000066400000000000000000000012351511654350100244460ustar00rootroot00000000000000# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from django.apps import AppConfig class PagesConfig(AppConfig): name = "pages" python-opentelemetry-1.39.1/docs/examples/django/pages/migrations/000077500000000000000000000000001511654350100253045ustar00rootroot00000000000000python-opentelemetry-1.39.1/docs/examples/django/pages/migrations/__init__.py000066400000000000000000000000001511654350100274030ustar00rootroot00000000000000python-opentelemetry-1.39.1/docs/examples/django/pages/urls.py000066400000000000000000000012771511654350100244760ustar00rootroot00000000000000# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from django.urls import path from .views import home_page_view urlpatterns = [path("", home_page_view, name="home")] python-opentelemetry-1.39.1/docs/examples/django/pages/views.py000066400000000000000000000017661511654350100246510ustar00rootroot00000000000000# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from django.http import HttpResponse from opentelemetry import trace from opentelemetry.sdk.trace import TracerProvider from opentelemetry.sdk.trace.export import ( BatchSpanProcessor, ConsoleSpanExporter, ) trace.set_tracer_provider(TracerProvider()) trace.get_tracer_provider().add_span_processor( BatchSpanProcessor(ConsoleSpanExporter()) ) def home_page_view(request): return HttpResponse("Hello, world") python-opentelemetry-1.39.1/docs/examples/error_handler/000077500000000000000000000000001511654350100234155ustar00rootroot00000000000000python-opentelemetry-1.39.1/docs/examples/error_handler/README.rst000066400000000000000000000106761511654350100251160ustar00rootroot00000000000000Global Error Handler ==================== Overview -------- This example shows how to use the global error handler. The source files of these examples are available :scm_web:`here `. Preparation ----------- This example will be executed in a separate virtual environment: .. code:: sh $ mkdir global_error_handler $ virtualenv global_error_handler $ source global_error_handler/bin/activate Installation ------------ Here we install first ``opentelemetry-sdk``, the only dependency. Afterwards, 2 error handlers are installed: ``error_handler_0`` will handle ``ZeroDivisionError`` exceptions, ``error_handler_1`` will handle ``IndexError`` and ``KeyError`` exceptions. .. code:: sh $ pip install opentelemetry-sdk $ git clone https://github.com/open-telemetry/opentelemetry-python.git $ pip install -e opentelemetry-python/docs/examples/error_handler/error_handler_0 $ pip install -e opentelemetry-python/docs/examples/error_handler/error_handler_1 Execution --------- An example is provided in the ``opentelemetry-python/docs/examples/error_handler/example.py``. You can just run it, you should get output similar to this one: .. code:: pytb ErrorHandler0 handling a ZeroDivisionError Traceback (most recent call last): File "test.py", line 5, in 1 / 0 ZeroDivisionError: division by zero ErrorHandler1 handling an IndexError Traceback (most recent call last): File "test.py", line 11, in [1][2] IndexError: list index out of range ErrorHandler1 handling a KeyError Traceback (most recent call last): File "test.py", line 17, in {1: 2}[2] KeyError: 2 Error handled by default error handler: Traceback (most recent call last): File "test.py", line 23, in assert False AssertionError No error raised The ``opentelemetry-sdk.error_handler`` module includes documentation that explains how this works. We recommend you read it also, here is just a small summary. In ``example.py`` we use ``GlobalErrorHandler`` as a context manager in several places, for example: .. code:: python with GlobalErrorHandler(): {1: 2}[2] Running that code will raise a ``KeyError`` exception. ``GlobalErrorHandler`` will "capture" that exception and pass it down to the registered error handlers. If there is one that handles ``KeyError`` exceptions then it will handle it. That can be seen in the result of the execution of ``example.py``: .. code:: ErrorHandler1 handling a KeyError Traceback (most recent call last): File "test.py", line 17, in {1: 2}[2] KeyError: 2 There is no registered error handler that can handle ``AssertionError`` exceptions so this kind of errors are handled by the default error handler which just logs the exception to standard logging, as seen here: .. code:: Error handled by default error handler: Traceback (most recent call last): File "test.py", line 23, in assert False AssertionError When no exception is raised, the code inside the scope of ``GlobalErrorHandler`` is executed normally: .. code:: No error raised Users can create Python packages that provide their own custom error handlers and install them in their virtual environments before running their code which instantiates ``GlobalErrorHandler`` context managers. ``error_handler_0`` and ``error_handler_1`` can be used as examples to create these custom error handlers. In order for the error handlers to be registered, they need to create a class that inherits from ``opentelemetry.sdk.error_handler.ErrorHandler`` and at least one ``Exception``-type class. For example, this is an error handler that handles ``ZeroDivisionError`` exceptions: .. code:: python from opentelemetry.sdk.error_handler import ErrorHandler from logging import getLogger logger = getLogger(__name__) class ErrorHandler0(ErrorHandler, ZeroDivisionError): def handle(self, error: Exception, *args, **kwargs): logger.exception("ErrorHandler0 handling a ZeroDivisionError") To register this error handler, use the ``opentelemetry_error_handler`` entry point in the setup of the error handler package: .. code:: [options.entry_points] opentelemetry_error_handler = error_handler_0 = error_handler_0:ErrorHandler0 This entry point should point to the error handler class, ``ErrorHandler0`` in this case. python-opentelemetry-1.39.1/docs/examples/error_handler/error_handler_0/000077500000000000000000000000001511654350100264625ustar00rootroot00000000000000python-opentelemetry-1.39.1/docs/examples/error_handler/error_handler_0/README.rst000066400000000000000000000001211511654350100301430ustar00rootroot00000000000000Error Handler 0 =============== This is just an error handler for this example. python-opentelemetry-1.39.1/docs/examples/error_handler/error_handler_0/pyproject.toml000066400000000000000000000021241511654350100313750ustar00rootroot00000000000000[build-system] requires = ["hatchling"] build-backend = "hatchling.build" [project] name = "error-handler-0" dynamic = ["version"] description = "This is just an error handler example package" readme = "README.rst" license = "Apache-2.0" requires-python = ">=3.9" authors = [ { name = "OpenTelemetry Authors", email = "cncf-opentelemetry-contributors@lists.cncf.io" }, ] classifiers = [ "Development Status :: 4 - Beta", "Intended Audience :: Developers", "Programming Language :: Python", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.9", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", "Programming Language :: Python :: 3.12", "Programming Language :: Python :: 3.13", ] dependencies = [ "opentelemetry-sdk ~= 1.3", ] [project.entry-points.opentelemetry_error_handler] error_handler_0 = "error_handler_0:ErrorHandler0" [tool.hatch.version] path = "src/error_handler_0/version/__init__.py" [tool.hatch.build.targets.sdist] include = [ "/src", ] [tool.hatch.build.targets.wheel] packages = ["src/opentelemetry"] python-opentelemetry-1.39.1/docs/examples/error_handler/error_handler_0/src/000077500000000000000000000000001511654350100272515ustar00rootroot00000000000000python-opentelemetry-1.39.1/docs/examples/error_handler/error_handler_0/src/error_handler_0/000077500000000000000000000000001511654350100323165ustar00rootroot00000000000000__init__.py000066400000000000000000000015701511654350100343530ustar00rootroot00000000000000python-opentelemetry-1.39.1/docs/examples/error_handler/error_handler_0/src/error_handler_0# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from logging import getLogger from opentelemetry.sdk.error_handler import ErrorHandler logger = getLogger(__name__) class ErrorHandler0(ErrorHandler, ZeroDivisionError): def _handle(self, error: Exception, *args, **kwargs): logger.exception("ErrorHandler0 handling a ZeroDivisionError") python-opentelemetry-1.39.1/docs/examples/error_handler/error_handler_0/src/error_handler_0/version/000077500000000000000000000000001511654350100340035ustar00rootroot00000000000000__init__.py000066400000000000000000000011431511654350100360340ustar00rootroot00000000000000python-opentelemetry-1.39.1/docs/examples/error_handler/error_handler_0/src/error_handler_0/version# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. __version__ = "0.23.dev0" python-opentelemetry-1.39.1/docs/examples/error_handler/error_handler_1/000077500000000000000000000000001511654350100264635ustar00rootroot00000000000000python-opentelemetry-1.39.1/docs/examples/error_handler/error_handler_1/README.rst000066400000000000000000000001211511654350100301440ustar00rootroot00000000000000Error Handler 1 =============== This is just an error handler for this example. python-opentelemetry-1.39.1/docs/examples/error_handler/error_handler_1/pyproject.toml000066400000000000000000000021241511654350100313760ustar00rootroot00000000000000[build-system] requires = ["hatchling"] build-backend = "hatchling.build" [project] name = "error-handler-1" dynamic = ["version"] description = "This is just an error handler example package" readme = "README.rst" license = "Apache-2.0" requires-python = ">=3.9" authors = [ { name = "OpenTelemetry Authors", email = "cncf-opentelemetry-contributors@lists.cncf.io" }, ] classifiers = [ "Development Status :: 4 - Beta", "Intended Audience :: Developers", "Programming Language :: Python", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.9", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", "Programming Language :: Python :: 3.12", "Programming Language :: Python :: 3.13", ] dependencies = [ "opentelemetry-sdk ~= 1.3", ] [project.entry-points.opentelemetry_error_handler] error_handler_1 = "error_handler_1:ErrorHandler1" [tool.hatch.version] path = "src/error_handler_1/version/__init__.py" [tool.hatch.build.targets.sdist] include = [ "/src", ] [tool.hatch.build.targets.wheel] packages = ["src/opentelemetry"] python-opentelemetry-1.39.1/docs/examples/error_handler/error_handler_1/src/000077500000000000000000000000001511654350100272525ustar00rootroot00000000000000python-opentelemetry-1.39.1/docs/examples/error_handler/error_handler_1/src/error_handler_1/000077500000000000000000000000001511654350100323205ustar00rootroot00000000000000__init__.py000066400000000000000000000020651511654350100343550ustar00rootroot00000000000000python-opentelemetry-1.39.1/docs/examples/error_handler/error_handler_1/src/error_handler_1# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from logging import getLogger from opentelemetry.sdk.error_handler import ErrorHandler logger = getLogger(__name__) # pylint: disable=too-many-ancestors class ErrorHandler1(ErrorHandler, IndexError, KeyError): def _handle(self, error: Exception, *args, **kwargs): if isinstance(error, IndexError): logger.exception("ErrorHandler1 handling an IndexError") elif isinstance(error, KeyError): logger.exception("ErrorHandler1 handling a KeyError") python-opentelemetry-1.39.1/docs/examples/error_handler/error_handler_1/src/error_handler_1/version/000077500000000000000000000000001511654350100340055ustar00rootroot00000000000000__init__.py000066400000000000000000000011431511654350100360360ustar00rootroot00000000000000python-opentelemetry-1.39.1/docs/examples/error_handler/error_handler_1/src/error_handler_1/version# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. __version__ = "0.23.dev0" python-opentelemetry-1.39.1/docs/examples/error_handler/example.py000066400000000000000000000010211511654350100254140ustar00rootroot00000000000000from opentelemetry.sdk.error_handler import GlobalErrorHandler # ZeroDivisionError to be handled by ErrorHandler0 with GlobalErrorHandler(): 1 / 0 print() # IndexError to be handled by ErrorHandler1 with GlobalErrorHandler(): [1][2] print() # KeyError to be handled by ErrorHandler1 with GlobalErrorHandler(): {1: 2}[2] print() # AssertionError to be handled by DefaultErrorHandler with GlobalErrorHandler(): assert False print() # No error raised with GlobalErrorHandler(): print("No error raised") python-opentelemetry-1.39.1/docs/examples/fork-process-model/000077500000000000000000000000001511654350100243025ustar00rootroot00000000000000python-opentelemetry-1.39.1/docs/examples/fork-process-model/README.rst000066400000000000000000000045411511654350100257750ustar00rootroot00000000000000Working With Fork Process Models ================================ The `BatchSpanProcessor` is not fork-safe and doesn't work well with application servers (Gunicorn, uWSGI) which are based on the pre-fork web server model. The `BatchSpanProcessor` spawns a thread to run in the background to export spans to the telemetry backend. During the fork, the child process inherits the lock which is held by the parent process and deadlock occurs. We can use fork hooks to get around this limitation of the span processor. Please see http://bugs.python.org/issue6721 for the problems about Python locks in (multi)threaded context with fork. The source code for the examples with Flask app are available :scm_web:`here `. Gunicorn post_fork hook ----------------------- .. code-block:: python from opentelemetry import trace from opentelemetry.exporter.otlp.proto.grpc.trace_exporter import OTLPSpanExporter from opentelemetry.sdk.resources import Resource from opentelemetry.sdk.trace import TracerProvider from opentelemetry.sdk.trace.export import BatchSpanProcessor def post_fork(server, worker): server.log.info("Worker spawned (pid: %s)", worker.pid) resource = Resource.create(attributes={ "service.name": "api-service" }) trace.set_tracer_provider(TracerProvider(resource=resource)) span_processor = BatchSpanProcessor( OTLPSpanExporter(endpoint="http://localhost:4317") ) trace.get_tracer_provider().add_span_processor(span_processor) uWSGI postfork decorator ------------------------ .. code-block:: python from uwsgidecorators import postfork from opentelemetry import trace from opentelemetry.exporter.otlp.proto.grpc.trace_exporter import OTLPSpanExporter from opentelemetry.sdk.resources import Resource from opentelemetry.sdk.trace import TracerProvider from opentelemetry.sdk.trace.export import BatchSpanProcessor @postfork def init_tracing(): resource = Resource.create(attributes={ "service.name": "api-service" }) trace.set_tracer_provider(TracerProvider(resource=resource)) span_processor = BatchSpanProcessor( OTLPSpanExporter(endpoint="http://localhost:4317") ) trace.get_tracer_provider().add_span_processor(span_processor) python-opentelemetry-1.39.1/docs/examples/fork-process-model/flask-gunicorn/000077500000000000000000000000001511654350100272245ustar00rootroot00000000000000python-opentelemetry-1.39.1/docs/examples/fork-process-model/flask-gunicorn/README.rst000066400000000000000000000002531511654350100307130ustar00rootroot00000000000000Installation ------------ .. code-block:: sh pip install -rrequirements.txt Run application --------------- .. code-block:: sh gunicorn app -c gunicorn.conf.py python-opentelemetry-1.39.1/docs/examples/fork-process-model/flask-gunicorn/app.py000066400000000000000000000033071511654350100303610ustar00rootroot00000000000000# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import flask from flask import request from opentelemetry import trace from opentelemetry.instrumentation.flask import FlaskInstrumentor application = flask.Flask(__name__) FlaskInstrumentor().instrument_app(application) tracer = trace.get_tracer(__name__) def fib_slow(n): if n <= 1: return n return fib_slow(n - 1) + fib_fast(n - 2) def fib_fast(n): nth_fib = [0] * (n + 2) nth_fib[1] = 1 for i in range(2, n + 1): nth_fib[i] = nth_fib[i - 1] + nth_fib[i - 2] return nth_fib[n] @application.route("/fibonacci") def fibonacci(): n = int(request.args.get("n", 1)) with tracer.start_as_current_span("root"): with tracer.start_as_current_span("fib_slow") as slow_span: ans = fib_slow(n) slow_span.set_attribute("n", n) slow_span.set_attribute("nth_fibonacci", ans) with tracer.start_as_current_span("fib_fast") as fast_span: ans = fib_fast(n) fast_span.set_attribute("n", n) fast_span.set_attribute("nth_fibonacci", ans) return f"F({n}) is: ({ans})" if __name__ == "__main__": application.run() python-opentelemetry-1.39.1/docs/examples/fork-process-model/flask-gunicorn/gunicorn.conf.py000066400000000000000000000053541511654350100323550ustar00rootroot00000000000000# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from opentelemetry import metrics, trace from opentelemetry.exporter.otlp.proto.grpc.metric_exporter import ( OTLPMetricExporter, ) from opentelemetry.exporter.otlp.proto.grpc.trace_exporter import ( OTLPSpanExporter, ) from opentelemetry.sdk.metrics import MeterProvider from opentelemetry.sdk.metrics.export import PeriodicExportingMetricReader from opentelemetry.sdk.resources import Resource from opentelemetry.sdk.trace import TracerProvider from opentelemetry.sdk.trace.export import BatchSpanProcessor bind = "127.0.0.1:8000" # Sample Worker processes workers = 4 worker_class = "sync" worker_connections = 1000 timeout = 30 keepalive = 2 # Sample logging errorlog = "-" loglevel = "info" accesslog = "-" access_log_format = ( '%(h)s %(l)s %(u)s %(t)s "%(r)s" %(s)s %(b)s "%(f)s" "%(a)s"' ) def post_fork(server, worker): server.log.info("Worker spawned (pid: %s)", worker.pid) resource = Resource.create( attributes={ "service.name": "api-service", # If workers are not distinguished within attributes, traces and # metrics exported from each worker will be indistinguishable. While # not necessarily an issue for traces, it is confusing for almost # all metric types. A built-in way to identify a worker is by PID # but this may lead to high label cardinality. An alternative # workaround and additional discussion are available here: # https://github.com/benoitc/gunicorn/issues/1352 "worker": worker.pid, } ) trace.set_tracer_provider(TracerProvider(resource=resource)) # This uses insecure connection for the purpose of example. Please see the # OTLP Exporter documentation for other options. span_processor = BatchSpanProcessor( OTLPSpanExporter(endpoint="http://localhost:4317", insecure=True) ) trace.get_tracer_provider().add_span_processor(span_processor) reader = PeriodicExportingMetricReader( OTLPMetricExporter(endpoint="http://localhost:4317") ) metrics.set_meter_provider( MeterProvider( resource=resource, metric_readers=[reader], ) ) python-opentelemetry-1.39.1/docs/examples/fork-process-model/flask-gunicorn/requirements.txt000066400000000000000000000006741511654350100325170ustar00rootroot00000000000000click==8.1.7 Flask==2.3.3 googleapis-common-protos==1.52.0 grpcio==1.56.2 gunicorn==22.0.0 itsdangerous==2.1.2 Jinja2==3.1.6 MarkupSafe==2.1.3 opentelemetry-api==1.20.0 opentelemetry-exporter-otlp==1.20.0 opentelemetry-instrumentation==0.41b0 opentelemetry-instrumentation-flask==0.41b0 opentelemetry-instrumentation-wsgi==0.41b0 opentelemetry-sdk==1.20.0 protobuf==3.20.3 six==1.15.0 thrift==0.13.0 uWSGI==2.0.22 Werkzeug==3.0.6 wrapt==1.16.0 python-opentelemetry-1.39.1/docs/examples/fork-process-model/flask-uwsgi/000077500000000000000000000000001511654350100265365ustar00rootroot00000000000000python-opentelemetry-1.39.1/docs/examples/fork-process-model/flask-uwsgi/README.rst000066400000000000000000000003421511654350100302240ustar00rootroot00000000000000Installation ------------ .. code-block:: sh pip install -rrequirements.txt Run application --------------- .. code-block:: sh uwsgi --http :8000 --wsgi-file app.py --callable application --master --enable-threads python-opentelemetry-1.39.1/docs/examples/fork-process-model/flask-uwsgi/app.py000066400000000000000000000047271511654350100277020ustar00rootroot00000000000000# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import flask from flask import request from uwsgidecorators import postfork from opentelemetry import trace from opentelemetry.exporter.otlp.proto.grpc.trace_exporter import ( OTLPSpanExporter, ) from opentelemetry.instrumentation.flask import FlaskInstrumentor from opentelemetry.sdk.resources import Resource from opentelemetry.sdk.trace import TracerProvider from opentelemetry.sdk.trace.export import BatchSpanProcessor application = flask.Flask(__name__) FlaskInstrumentor().instrument_app(application) tracer = trace.get_tracer(__name__) @postfork def init_tracing(): resource = Resource.create(attributes={"service.name": "api-service"}) trace.set_tracer_provider(TracerProvider(resource=resource)) # This uses insecure connection for the purpose of example. Please see the # OTLP Exporter documentation for other options. span_processor = BatchSpanProcessor( OTLPSpanExporter(endpoint="http://localhost:4317", insecure=True) ) trace.get_tracer_provider().add_span_processor(span_processor) def fib_slow(n): if n <= 1: return n return fib_slow(n - 1) + fib_fast(n - 2) def fib_fast(n): nth_fib = [0] * (n + 2) nth_fib[1] = 1 for i in range(2, n + 1): nth_fib[i] = nth_fib[i - 1] + nth_fib[i - 2] return nth_fib[n] @application.route("/fibonacci") def fibonacci(): n = int(request.args.get("n", 1)) with tracer.start_as_current_span("root"): with tracer.start_as_current_span("fib_slow") as slow_span: ans = fib_slow(n) slow_span.set_attribute("n", n) slow_span.set_attribute("nth_fibonacci", ans) with tracer.start_as_current_span("fib_fast") as fast_span: ans = fib_fast(n) fast_span.set_attribute("n", n) fast_span.set_attribute("nth_fibonacci", ans) return f"F({n}) is: ({ans})" if __name__ == "__main__": application.run() python-opentelemetry-1.39.1/docs/examples/fork-process-model/flask-uwsgi/requirements.txt000066400000000000000000000006531511654350100320260ustar00rootroot00000000000000click==8.1.7 Flask==2.3.3 googleapis-common-protos==1.52.0 grpcio==1.56.2 itsdangerous==2.1.2 Jinja2==3.1.6 MarkupSafe==2.1.3 opentelemetry-api==1.20.0 opentelemetry-exporter-otlp==1.20.0 opentelemetry-instrumentation==0.41b0 opentelemetry-instrumentation-flask==0.41b0 opentelemetry-instrumentation-wsgi==0.41b0 opentelemetry-sdk==1.20.0 protobuf==3.20.3 six==1.15.0 thrift==0.13.0 uWSGI==2.0.22 Werkzeug==3.0.6 wrapt==1.16.0 python-opentelemetry-1.39.1/docs/examples/index.rst000066400000000000000000000001161511654350100224260ustar00rootroot00000000000000:orphan: Examples ======== .. toctree:: :maxdepth: 1 :glob: ** python-opentelemetry-1.39.1/docs/examples/logs/000077500000000000000000000000001511654350100215335ustar00rootroot00000000000000python-opentelemetry-1.39.1/docs/examples/logs/README.rst000066400000000000000000000071631511654350100232310ustar00rootroot00000000000000OpenTelemetry Logs SDK ====================== .. warning:: OpenTelemetry Python logs are in an experimental state. The APIs within :mod:`opentelemetry.sdk._logs` are subject to change in minor/patch releases and make no backward compatibility guarantees at this time. The source files of these examples are available :scm_web:`here `. Start the Collector locally to see data being exported. Write the following file: .. code-block:: yaml # otel-collector-config.yaml receivers: otlp: protocols: grpc: endpoint: 0.0.0.0:4317 exporters: debug: verbosity: detailed service: pipelines: logs: receivers: [otlp] exporters: [debug] traces: receivers: [otlp] exporters: [debug] Then start the Docker container: .. code-block:: sh docker run \ -p 4317:4317 \ -v $(pwd)/otel-collector-config.yaml:/etc/otelcol-contrib/config.yaml \ otel/opentelemetry-collector-contrib:latest .. code-block:: sh $ python example.py The resulting logs will appear in the output from the collector and look similar to this: .. code-block:: sh ResourceLog #0 Resource SchemaURL: Resource attributes: -> telemetry.sdk.language: Str(python) -> telemetry.sdk.name: Str(opentelemetry) -> telemetry.sdk.version: Str(1.33.0.dev0) -> service.name: Str(shoppingcart) -> service.instance.id: Str(instance-12) ScopeLogs #0 ScopeLogs SchemaURL: InstrumentationScope myapp.area2 LogRecord #0 ObservedTimestamp: 2025-04-22 12:16:57.315179 +0000 UTC Timestamp: 2025-04-22 12:16:57.315152896 +0000 UTC SeverityText: WARN SeverityNumber: Warn(13) Body: Str(Jail zesty vixen who grabbed pay from quack.) Attributes: -> code.filepath: Str(/Users/jayclifford/Repos/opentelemetry-python/docs/examples/logs/example.py) -> code.function: Str() -> code.lineno: Int(47) Trace ID: Span ID: Flags: 0 LogRecord #1 ObservedTimestamp: 2025-04-22 12:16:57.31522 +0000 UTC Timestamp: 2025-04-22 12:16:57.315213056 +0000 UTC SeverityText: ERROR SeverityNumber: Error(17) Body: Str(The five boxing wizards jump quickly.) Attributes: -> code.filepath: Str(/Users/jayclifford/Repos/opentelemetry-python/docs/examples/logs/example.py) -> code.function: Str() -> code.lineno: Int(48) Trace ID: Span ID: Flags: 0 LogRecord #2 ObservedTimestamp: 2025-04-22 12:16:57.315445 +0000 UTC Timestamp: 2025-04-22 12:16:57.31543808 +0000 UTC SeverityText: ERROR SeverityNumber: Error(17) Body: Str(Hyderabad, we have a major problem.) Attributes: -> code.filepath: Str(/Users/jayclifford/Repos/opentelemetry-python/docs/examples/logs/example.py) -> code.function: Str() -> code.lineno: Int(61) Trace ID: 8a6739fffce895e694700944e2faf23e Span ID: a45337020100cb63 Flags: 1 ScopeLogs #1 ScopeLogs SchemaURL: InstrumentationScope myapp.area1 LogRecord #0 ObservedTimestamp: 2025-04-22 12:16:57.315242 +0000 UTC Timestamp: 2025-04-22 12:16:57.315234048 +0000 UTC SeverityText: ERROR SeverityNumber: Error(17) Body: Str(I have custom attributes.) Attributes: -> user_id: Str(user-123) -> code.filepath: Str(/Users/jayclifford/Repos/opentelemetry-python/docs/examples/logs/example.py) -> code.function: Str() -> code.lineno: Int(53) Trace ID: Span ID: Flags: 0 python-opentelemetry-1.39.1/docs/examples/logs/example.py000066400000000000000000000041241511654350100235410ustar00rootroot00000000000000import logging from opentelemetry import trace from opentelemetry._logs import set_logger_provider from opentelemetry.exporter.otlp.proto.grpc._log_exporter import ( OTLPLogExporter, ) from opentelemetry.sdk._logs import LoggerProvider, LoggingHandler from opentelemetry.sdk._logs.export import BatchLogRecordProcessor from opentelemetry.sdk.resources import Resource from opentelemetry.sdk.trace import TracerProvider from opentelemetry.sdk.trace.export import ( BatchSpanProcessor, ConsoleSpanExporter, ) trace.set_tracer_provider(TracerProvider()) trace.get_tracer_provider().add_span_processor( BatchSpanProcessor(ConsoleSpanExporter()) ) logger_provider = LoggerProvider( resource=Resource.create( { "service.name": "shoppingcart", "service.instance.id": "instance-12", } ), ) set_logger_provider(logger_provider) exporter = OTLPLogExporter(insecure=True) logger_provider.add_log_record_processor(BatchLogRecordProcessor(exporter)) handler = LoggingHandler(level=logging.NOTSET, logger_provider=logger_provider) # Set the root logger level to NOTSET to ensure all messages are captured logging.getLogger().setLevel(logging.NOTSET) # Attach OTLP handler to root logger logging.getLogger().addHandler(handler) # Create different namespaced loggers # It is recommended to not use the root logger with OTLP handler # so telemetry is collected only for the application logger1 = logging.getLogger("myapp.area1") logger2 = logging.getLogger("myapp.area2") logger1.debug("Quick zephyrs blow, vexing daft Jim.") logger1.info("How quickly daft jumping zebras vex.") logger2.warning("Jail zesty vixen who grabbed pay from quack.") logger2.error("The five boxing wizards jump quickly.") # Log custom attributes # Custom attributes are added on a per event basis user_id = "user-123" logger1.error("I have custom attributes.", extra={"user_id": user_id}) # Trace context correlation tracer = trace.get_tracer(__name__) with tracer.start_as_current_span("foo"): # Do something logger2.error("Hyderabad, we have a major problem.") logger_provider.shutdown() python-opentelemetry-1.39.1/docs/examples/logs/otel-collector-config.yaml000066400000000000000000000004541511654350100266140ustar00rootroot00000000000000receivers: otlp: protocols: grpc: endpoint: 0.0.0.0:4317 exporters: debug: verbosity: detailed service: pipelines: logs: receivers: [otlp] exporters: [debug] traces: receivers: [otlp] exporters: [debug]python-opentelemetry-1.39.1/docs/examples/metrics/000077500000000000000000000000001511654350100222355ustar00rootroot00000000000000python-opentelemetry-1.39.1/docs/examples/metrics/instruments/000077500000000000000000000000001511654350100246305ustar00rootroot00000000000000python-opentelemetry-1.39.1/docs/examples/metrics/instruments/README.rst000066400000000000000000000040321511654350100263160ustar00rootroot00000000000000OpenTelemetry Metrics SDK ========================= The source files of these examples are available :scm_web:`here `. Start the Collector locally to see data being exported. Write the following file: .. code-block:: yaml # otel-collector-config.yaml receivers: otlp: protocols: grpc: endpoint: 0.0.0.0:4317 exporters: debug: service: pipelines: metrics: receivers: [otlp] exporters: [debug] Then start the Docker container: .. code-block:: sh docker run \ -p 4317:4317 \ -v $(pwd)/otel-collector-config.yaml:/etc/otel/config.yaml \ otel/opentelemetry-collector-contrib:latest .. code-block:: sh $ python example.py The resulting metrics will appear in the output from the collector and look similar to this: .. code-block:: sh ScopeMetrics #0 ScopeMetrics SchemaURL: InstrumentationScope getting-started 0.1.2 Metric #0 Descriptor: -> Name: counter -> Description: -> Unit: -> DataType: Sum -> IsMonotonic: true -> AggregationTemporality: Cumulative NumberDataPoints #0 StartTimestamp: 2024-08-09 11:21:42.145179 +0000 UTC Timestamp: 2024-08-09 11:21:42.145325 +0000 UTC Value: 1 Metric #1 Descriptor: -> Name: updown_counter -> Description: -> Unit: -> DataType: Sum -> IsMonotonic: false -> AggregationTemporality: Cumulative NumberDataPoints #0 StartTimestamp: 2024-08-09 11:21:42.145202 +0000 UTC Timestamp: 2024-08-09 11:21:42.145325 +0000 UTC Value: -4 Metric #2 Descriptor: -> Name: histogram -> Description: -> Unit: -> DataType: Histogram -> AggregationTemporality: Cumulative HistogramDataPoints #0 StartTimestamp: 2024-08-09 11:21:42.145221 +0000 UTC Timestamp: 2024-08-09 11:21:42.145325 +0000 UTC Count: 1 python-opentelemetry-1.39.1/docs/examples/metrics/instruments/example.py000066400000000000000000000034421511654350100266400ustar00rootroot00000000000000from typing import Iterable from opentelemetry.exporter.otlp.proto.grpc.metric_exporter import ( OTLPMetricExporter, ) from opentelemetry.metrics import ( CallbackOptions, Observation, get_meter_provider, set_meter_provider, ) from opentelemetry.sdk.metrics import MeterProvider from opentelemetry.sdk.metrics.export import PeriodicExportingMetricReader exporter = OTLPMetricExporter(insecure=True) reader = PeriodicExportingMetricReader(exporter) provider = MeterProvider(metric_readers=[reader]) set_meter_provider(provider) def observable_counter_func(options: CallbackOptions) -> Iterable[Observation]: yield Observation(1, {}) def observable_up_down_counter_func( options: CallbackOptions, ) -> Iterable[Observation]: yield Observation(-10, {}) def observable_gauge_func(options: CallbackOptions) -> Iterable[Observation]: yield Observation(9, {}) meter = get_meter_provider().get_meter("getting-started", "0.1.2") # Counter counter = meter.create_counter("counter") counter.add(1) # Async Counter observable_counter = meter.create_observable_counter( "observable_counter", [observable_counter_func], ) # UpDownCounter updown_counter = meter.create_up_down_counter("updown_counter") updown_counter.add(1) updown_counter.add(-5) # Async UpDownCounter observable_updown_counter = meter.create_observable_up_down_counter( "observable_updown_counter", [observable_up_down_counter_func] ) # Histogram histogram = meter.create_histogram("histogram") histogram.record(99.9) # Histogram with explicit bucket boundaries advisory histogram = meter.create_histogram( "histogram_with_advisory", explicit_bucket_boundaries_advisory=[0.0, 1.0, 2.0], ) histogram.record(99.9) # Async Gauge gauge = meter.create_observable_gauge("gauge", [observable_gauge_func]) python-opentelemetry-1.39.1/docs/examples/metrics/instruments/otel-collector-config.yaml000066400000000000000000000003371511654350100317110ustar00rootroot00000000000000receivers: otlp: protocols: grpc: endpoint: 0.0.0.0:4317 exporters: debug: service: pipelines: metrics: receivers: [otlp] exporters: [debug] python-opentelemetry-1.39.1/docs/examples/metrics/instruments/requirements.txt000066400000000000000000000001221511654350100301070ustar00rootroot00000000000000opentelemetry-api~=1.25 opentelemetry-sdk~=1.25 opentelemetry-exporter-otlp~=1.25 python-opentelemetry-1.39.1/docs/examples/metrics/prometheus-grafana/000077500000000000000000000000001511654350100260255ustar00rootroot00000000000000python-opentelemetry-1.39.1/docs/examples/metrics/prometheus-grafana/README.rst000066400000000000000000000042531511654350100275200ustar00rootroot00000000000000Prometheus Instrumentation ========================== This shows how to use ``opentelemetry-exporter-prometheus`` to automatically generate Prometheus metrics. The source files of these examples are available :scm_web:`here `. Preparation ----------- This example will be executed in a separate virtual environment: .. code-block:: $ mkdir prometheus_auto_instrumentation $ virtualenv prometheus_auto_instrumentation $ source prometheus_auto_instrumentation/bin/activate Installation ------------ .. code-block:: $ pip install -r requirements.txt Execution --------- .. code-block:: $ python ./prometheus-monitor.py $ Server is running at http://localhost:8000 Now you can visit http://localhost:8000/metrics to see Prometheus metrics. You should see something like: .. code-block:: # HELP python_gc_objects_collected_total Objects collected during gc # TYPE python_gc_objects_collected_total counter python_gc_objects_collected_total{generation="0"} 320.0 python_gc_objects_collected_total{generation="1"} 58.0 python_gc_objects_collected_total{generation="2"} 0.0 # HELP python_gc_objects_uncollectable_total Uncollectable objects found during GC # TYPE python_gc_objects_uncollectable_total counter python_gc_objects_uncollectable_total{generation="0"} 0.0 python_gc_objects_uncollectable_total{generation="1"} 0.0 python_gc_objects_uncollectable_total{generation="2"} 0.0 # HELP python_gc_collections_total Number of times this generation was collected # TYPE python_gc_collections_total counter python_gc_collections_total{generation="0"} 61.0 python_gc_collections_total{generation="1"} 5.0 python_gc_collections_total{generation="2"} 0.0 # HELP python_info Python platform information # TYPE python_info gauge python_info{implementation="CPython",major="3",minor="8",patchlevel="5",version="3.8.5"} 1.0 # HELP MyAppPrefix_my_counter_total # TYPE MyAppPrefix_my_counter_total counter MyAppPrefix_my_counter_total 964.0 ``MyAppPrefix_my_counter_total`` is the custom counter created in the application with the custom prefix ``MyAppPrefix``. python-opentelemetry-1.39.1/docs/examples/metrics/prometheus-grafana/prometheus-monitor.py000066400000000000000000000014651511654350100322650ustar00rootroot00000000000000import random import time from prometheus_client import start_http_server from opentelemetry.exporter.prometheus import PrometheusMetricReader from opentelemetry.metrics import get_meter_provider, set_meter_provider from opentelemetry.sdk.metrics import MeterProvider # Start Prometheus client start_http_server(port=8000, addr="localhost") # Exporter to export metrics to Prometheus prefix = "MyAppPrefix" reader = PrometheusMetricReader(prefix) # Meter is responsible for creating and recording metrics set_meter_provider(MeterProvider(metric_readers=[reader])) meter = get_meter_provider().get_meter("view-name-change", "0.1.2") my_counter = meter.create_counter("my.counter") print("Server is running at http://localhost:8000") while 1: my_counter.add(random.randint(1, 10)) time.sleep(random.random()) python-opentelemetry-1.39.1/docs/examples/metrics/prometheus-grafana/requirements.txt000066400000000000000000000000761511654350100313140ustar00rootroot00000000000000opentelemetry-exporter-prometheus==1.12.0rc1 protobuf~=3.18.1 python-opentelemetry-1.39.1/docs/examples/metrics/reader/000077500000000000000000000000001511654350100234775ustar00rootroot00000000000000python-opentelemetry-1.39.1/docs/examples/metrics/reader/README.rst000066400000000000000000000021361511654350100251700ustar00rootroot00000000000000MetricReader configuration scenarios ==================================== These examples show how to customize the metrics that are output by the SDK using configuration on metric readers. There are multiple examples: * preferred_aggregation.py: Shows how to configure the preferred aggregation for metric instrument types. * preferred_temporality.py: Shows how to configure the preferred temporality for metric instrument types. * preferred_exemplarfilter.py: Shows how to configure the exemplar filter. * synchronous_gauge_read.py: Shows how to use `PeriodicExportingMetricReader` in a synchronous manner to explicitly control the collection of metrics. The source files of these examples are available :scm_web:`here `. Installation ------------ .. code-block:: sh pip install -r requirements.txt Run the Example --------------- .. code-block:: sh python .py The output will be shown in the console. Useful links ------------ - OpenTelemetry_ - :doc:`../../../api/metrics` .. _OpenTelemetry: https://github.com/open-telemetry/opentelemetry-python/ python-opentelemetry-1.39.1/docs/examples/metrics/reader/preferred_aggregation.py000066400000000000000000000034061511654350100304010ustar00rootroot00000000000000# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import time from opentelemetry.metrics import get_meter_provider, set_meter_provider from opentelemetry.sdk.metrics import Counter, MeterProvider from opentelemetry.sdk.metrics.export import ( ConsoleMetricExporter, PeriodicExportingMetricReader, ) from opentelemetry.sdk.metrics.view import LastValueAggregation aggregation_last_value = {Counter: LastValueAggregation()} # Use console exporter for the example exporter = ConsoleMetricExporter( preferred_aggregation=aggregation_last_value, ) # The PeriodicExportingMetricReader takes the preferred aggregation # from the passed in exporter reader = PeriodicExportingMetricReader( exporter, export_interval_millis=5_000, ) provider = MeterProvider(metric_readers=[reader]) set_meter_provider(provider) meter = get_meter_provider().get_meter("preferred-aggregation", "0.1.2") counter = meter.create_counter("my-counter") # A counter normally would have an aggregation type of SumAggregation, # in which it's value would be determined by a cumulative sum. # In this example, the counter is configured with the LastValueAggregation, # which will only hold the most recent value. for x in range(10): counter.add(x) time.sleep(2.0) python-opentelemetry-1.39.1/docs/examples/metrics/reader/preferred_exemplarfilter.py000066400000000000000000000042741511654350100311410ustar00rootroot00000000000000# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import time from opentelemetry import trace from opentelemetry.metrics import get_meter_provider, set_meter_provider from opentelemetry.sdk.metrics import MeterProvider from opentelemetry.sdk.metrics._internal.exemplar import AlwaysOnExemplarFilter from opentelemetry.sdk.metrics.export import ( ConsoleMetricExporter, PeriodicExportingMetricReader, ) from opentelemetry.sdk.trace import TracerProvider # Create an ExemplarFilter instance # Available values are AlwaysOffExemplarFilter, AlwaysOnExemplarFilter # and TraceBasedExemplarFilter. # The default value is `TraceBasedExemplarFilter`. # # You can also use the environment variable `OTEL_METRICS_EXEMPLAR_FILTER` # to change the default value. # # You can also define your own filter by implementing the abstract class # `ExemplarFilter` exemplar_filter = AlwaysOnExemplarFilter() exporter = ConsoleMetricExporter() reader = PeriodicExportingMetricReader( exporter, export_interval_millis=5_000, ) # Set up the MeterProvider with the ExemplarFilter provider = MeterProvider( metric_readers=[reader], exemplar_filter=exemplar_filter, # Pass the ExemplarFilter to the MeterProvider ) set_meter_provider(provider) meter = get_meter_provider().get_meter("exemplar-filter-example", "0.1.2") counter = meter.create_counter("my-counter") # Create a trace and span as the default exemplar filter `TraceBasedExemplarFilter` # will only store exemplar if a context exists trace.set_tracer_provider(TracerProvider()) tracer = trace.get_tracer(__name__) with tracer.start_as_current_span("foo"): for value in range(10): counter.add(value) time.sleep(2.0) python-opentelemetry-1.39.1/docs/examples/metrics/reader/preferred_temporality.py000066400000000000000000000045071511654350100304660ustar00rootroot00000000000000# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import time from opentelemetry.metrics import get_meter_provider, set_meter_provider from opentelemetry.sdk.metrics import Counter, MeterProvider from opentelemetry.sdk.metrics.export import ( AggregationTemporality, ConsoleMetricExporter, PeriodicExportingMetricReader, ) temporality_cumulative = {Counter: AggregationTemporality.CUMULATIVE} temporality_delta = {Counter: AggregationTemporality.DELTA} # Use console exporters for the example # The metrics that are exported using this exporter will represent a cumulative value exporter = ConsoleMetricExporter( preferred_temporality=temporality_cumulative, ) # The metrics that are exported using this exporter will represent a delta value exporter2 = ConsoleMetricExporter( preferred_temporality=temporality_delta, ) # The PeriodicExportingMetricReader takes the preferred aggregation # from the passed in exporter reader = PeriodicExportingMetricReader( exporter, export_interval_millis=5_000, ) # The PeriodicExportingMetricReader takes the preferred aggregation # from the passed in exporter reader2 = PeriodicExportingMetricReader( exporter2, export_interval_millis=5_000, ) provider = MeterProvider(metric_readers=[reader, reader2]) set_meter_provider(provider) meter = get_meter_provider().get_meter("preferred-temporality", "0.1.2") counter = meter.create_counter("my-counter") # Two metrics are expected to be printed to the console per export interval. # The metric originating from the metric exporter with a preferred temporality # of cumulative will keep a running sum of all values added. # The metric originating from the metric exporter with a preferred temporality # of delta will have the sum value reset each export interval. counter.add(5) time.sleep(10) counter.add(20) python-opentelemetry-1.39.1/docs/examples/metrics/reader/requirements.txt000066400000000000000000000002061511654350100267610ustar00rootroot00000000000000opentelemetry-api==1.15.0 opentelemetry-sdk==1.15.0 opentelemetry-semantic-conventions==0.36b0 typing_extensions==4.5.0 wrapt==1.14.1 python-opentelemetry-1.39.1/docs/examples/metrics/reader/synchronous_gauge_read.py000066400000000000000000000054241511654350100306130ustar00rootroot00000000000000# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import math from typing import Iterable from opentelemetry.metrics import ( CallbackOptions, Observation, get_meter_provider, set_meter_provider, ) from opentelemetry.sdk.metrics import MeterProvider from opentelemetry.sdk.metrics.export import ( ConsoleMetricExporter, PeriodicExportingMetricReader, ) temperature = 0.0 humidity = 0.0 # Function called by the gauge to read the temperature def read_temperature(options: CallbackOptions) -> Iterable[Observation]: global temperature yield Observation(value=temperature, attributes={"room": "living-room"}) # Function called by the gauge to read the humidity def read_humidity(options: CallbackOptions) -> Iterable[Observation]: global humidity yield Observation(value=humidity, attributes={"room": "living-room"}) # Use console exporter for the example exporter = ConsoleMetricExporter() # The PeriodicExportingMetricReader If the time interval is set to math.inf # the reader will not invoke periodic collection reader = PeriodicExportingMetricReader( exporter, export_interval_millis=math.inf, ) provider = MeterProvider(metric_readers=[reader]) set_meter_provider(provider) meter = get_meter_provider().get_meter("synchronous_read", "0.1.2") gauge = meter.create_observable_gauge( name="synchronous_gauge_temperature", description="Gauge value captured synchronously", callbacks=[read_temperature], ) # Simulate synchronous reading of temperature print("--- Simulating synchronous reading of temperature ---", flush=True) temperature = 25.0 reader.collect() # Note: The reader will only collect the last value before `collect` is called print("--- Last value only ---", flush=True) temperature = 30.0 temperature = 35.0 reader.collect() # Invoking `collect` will read all measurements assigned to the reader gauge2 = meter.create_observable_gauge( name="synchronous_gauge_humidity", description="Gauge value captured synchronously", callbacks=[read_humidity], ) print("--- Multiple Measurements ---", flush=True) temperature = 20.0 humidity = 50.0 reader.collect() # Invoking `force_flush` will read all measurements assigned to the reader print("--- Invoking force_flush ---", flush=True) provider.force_flush() python-opentelemetry-1.39.1/docs/examples/metrics/views/000077500000000000000000000000001511654350100233725ustar00rootroot00000000000000python-opentelemetry-1.39.1/docs/examples/metrics/views/README.rst000066400000000000000000000020301511654350100250540ustar00rootroot00000000000000View common scenarios ===================== These examples show how to customize the metrics that are output by the SDK using Views. There are multiple examples: * change_aggregation.py: Shows how to configure to change the default aggregation for an instrument. * change_name.py: Shows how to change the name of a metric. * limit_num_of_attrs.py: Shows how to limit the number of attributes that are output for a metric. * drop_metrics_from_instrument.py: Shows how to drop measurements from an instrument. * change_reservoir_factory.py: Shows how to use your own ``ExemplarReservoir`` The source files of these examples are available :scm_web:`here `. Installation ------------ .. code-block:: sh pip install -r requirements.txt Run the Example --------------- .. code-block:: sh python .py The output will be shown in the console. Useful links ------------ - OpenTelemetry_ - :doc:`../../../api/metrics` .. _OpenTelemetry: https://github.com/open-telemetry/opentelemetry-python/ python-opentelemetry-1.39.1/docs/examples/metrics/views/change_aggregation.py000066400000000000000000000033031511654350100275370ustar00rootroot00000000000000# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import random import time from opentelemetry.metrics import get_meter_provider, set_meter_provider from opentelemetry.sdk.metrics import MeterProvider from opentelemetry.sdk.metrics.export import ( ConsoleMetricExporter, PeriodicExportingMetricReader, ) from opentelemetry.sdk.metrics.view import SumAggregation, View # Create a view matching the histogram instrument name `http.client.request.latency` # and configure the `SumAggregation` for the result metrics stream hist_to_sum_view = View( instrument_name="http.client.request.latency", aggregation=SumAggregation() ) # Use console exporter for the example exporter = ConsoleMetricExporter() # Create a metric reader with stdout exporter reader = PeriodicExportingMetricReader(exporter, export_interval_millis=1_000) provider = MeterProvider( metric_readers=[ reader, ], views=[ hist_to_sum_view, ], ) set_meter_provider(provider) meter = get_meter_provider().get_meter("view-change-aggregation", "0.1.2") histogram = meter.create_histogram("http.client.request.latency") while 1: histogram.record(99.9) time.sleep(random.random()) python-opentelemetry-1.39.1/docs/examples/metrics/views/change_name.py000066400000000000000000000032771511654350100262020ustar00rootroot00000000000000# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import random import time from opentelemetry.metrics import get_meter_provider, set_meter_provider from opentelemetry.sdk.metrics import Counter, MeterProvider from opentelemetry.sdk.metrics.export import ( ConsoleMetricExporter, PeriodicExportingMetricReader, ) from opentelemetry.sdk.metrics.view import View # Create a view matching the counter instrument `my.counter` # and configure the new name `my.counter.total` for the result metrics stream change_metric_name_view = View( instrument_type=Counter, instrument_name="my.counter", name="my.counter.total", ) # Use console exporter for the example exporter = ConsoleMetricExporter() # Create a metric reader with stdout exporter reader = PeriodicExportingMetricReader(exporter, export_interval_millis=1_000) provider = MeterProvider( metric_readers=[ reader, ], views=[ change_metric_name_view, ], ) set_meter_provider(provider) meter = get_meter_provider().get_meter("view-name-change", "0.1.2") my_counter = meter.create_counter("my.counter") while 1: my_counter.add(random.randint(1, 10)) time.sleep(random.random()) python-opentelemetry-1.39.1/docs/examples/metrics/views/change_reservoir_factory.py000066400000000000000000000057631511654350100310330ustar00rootroot00000000000000# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import random import time from typing import Type from opentelemetry import trace from opentelemetry.metrics import get_meter_provider, set_meter_provider from opentelemetry.sdk.metrics import MeterProvider from opentelemetry.sdk.metrics._internal.aggregation import ( DefaultAggregation, _Aggregation, _ExplicitBucketHistogramAggregation, ) from opentelemetry.sdk.metrics._internal.exemplar import ( AlignedHistogramBucketExemplarReservoir, ExemplarReservoirBuilder, SimpleFixedSizeExemplarReservoir, ) from opentelemetry.sdk.metrics.export import ( ConsoleMetricExporter, PeriodicExportingMetricReader, ) from opentelemetry.sdk.metrics.view import View from opentelemetry.sdk.trace import TracerProvider # Create a custom reservoir factory with specified parameters def custom_reservoir_factory( aggregationType: Type[_Aggregation], ) -> ExemplarReservoirBuilder: if issubclass(aggregationType, _ExplicitBucketHistogramAggregation): return AlignedHistogramBucketExemplarReservoir else: # Custom reservoir must accept `**kwargs` that may set the `size` for # _ExponentialBucketHistogramAggregation or the `boundaries` for # _ExplicitBucketHistogramAggregation return lambda **kwargs: SimpleFixedSizeExemplarReservoir( size=10, **{k: v for k, v in kwargs.items() if k != "size"}, ) # Create a view with the custom reservoir factory change_reservoir_factory_view = View( instrument_name="my.counter", name="name", aggregation=DefaultAggregation(), exemplar_reservoir_factory=custom_reservoir_factory, ) # Use console exporter for the example exporter = ConsoleMetricExporter() # Create a metric reader with stdout exporter reader = PeriodicExportingMetricReader(exporter, export_interval_millis=1_000) provider = MeterProvider( metric_readers=[ reader, ], views=[ change_reservoir_factory_view, ], ) set_meter_provider(provider) meter = get_meter_provider().get_meter("reservoir-factory-change", "0.1.2") my_counter = meter.create_counter("my.counter") # Create a trace and span as the default exemplar filter `TraceBasedExemplarFilter` # will only store exemplar if a context exists trace.set_tracer_provider(TracerProvider()) tracer = trace.get_tracer(__name__) with tracer.start_as_current_span("foo"): while 1: my_counter.add(random.randint(1, 10)) time.sleep(random.random()) python-opentelemetry-1.39.1/docs/examples/metrics/views/disable_default_aggregation.py000066400000000000000000000032531511654350100314250ustar00rootroot00000000000000# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import random import time from opentelemetry.metrics import get_meter_provider, set_meter_provider from opentelemetry.sdk.metrics import MeterProvider from opentelemetry.sdk.metrics.export import ( ConsoleMetricExporter, PeriodicExportingMetricReader, ) from opentelemetry.sdk.metrics.view import ( DropAggregation, SumAggregation, View, ) # disable_default_aggregation. disable_default_aggregation = View( instrument_name="*", aggregation=DropAggregation() ) exporter = ConsoleMetricExporter() reader = PeriodicExportingMetricReader(exporter, export_interval_millis=1_000) provider = MeterProvider( metric_readers=[ reader, ], views=[ disable_default_aggregation, View(instrument_name="mycounter", aggregation=SumAggregation()), ], ) set_meter_provider(provider) meter = get_meter_provider().get_meter( "view-disable-default-aggregation", "0.1.2" ) # Create a view to configure aggregation specific for this counter. my_counter = meter.create_counter("mycounter") while 1: my_counter.add(random.randint(1, 10)) time.sleep(random.random()) python-opentelemetry-1.39.1/docs/examples/metrics/views/drop_metrics_from_instrument.py000066400000000000000000000031461511654350100317550ustar00rootroot00000000000000# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import random import time from opentelemetry.metrics import get_meter_provider, set_meter_provider from opentelemetry.sdk.metrics import Counter, MeterProvider from opentelemetry.sdk.metrics.export import ( ConsoleMetricExporter, PeriodicExportingMetricReader, ) from opentelemetry.sdk.metrics.view import DropAggregation, View # Create a view matching the counter instrument `my.counter` # and configure the view to drop the aggregation. drop_aggregation_view = View( instrument_type=Counter, instrument_name="my.counter", aggregation=DropAggregation(), ) exporter = ConsoleMetricExporter() reader = PeriodicExportingMetricReader(exporter, export_interval_millis=1_000) provider = MeterProvider( metric_readers=[ reader, ], views=[ drop_aggregation_view, ], ) set_meter_provider(provider) meter = get_meter_provider().get_meter("view-drop-aggregation", "0.1.2") my_counter = meter.create_counter("my.counter") while 1: my_counter.add(random.randint(1, 10)) time.sleep(random.random()) python-opentelemetry-1.39.1/docs/examples/metrics/views/limit_num_of_attrs.py000066400000000000000000000040011511654350100276350ustar00rootroot00000000000000# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import random import time from typing import Iterable from opentelemetry.metrics import ( CallbackOptions, Observation, get_meter_provider, set_meter_provider, ) from opentelemetry.sdk.metrics import MeterProvider, ObservableGauge from opentelemetry.sdk.metrics.export import ( ConsoleMetricExporter, PeriodicExportingMetricReader, ) from opentelemetry.sdk.metrics.view import View # Create a view matching the observable gauge instrument `observable_gauge` # and configure the attributes in the result metric stream # to contain only the attributes with keys with `k_3` and `k_5` view_with_attributes_limit = View( instrument_type=ObservableGauge, instrument_name="observable_gauge", attribute_keys={"k_3", "k_5"}, ) exporter = ConsoleMetricExporter() reader = PeriodicExportingMetricReader(exporter, export_interval_millis=1_000) provider = MeterProvider( metric_readers=[ reader, ], views=[ view_with_attributes_limit, ], ) set_meter_provider(provider) meter = get_meter_provider().get_meter("reduce-cardinality-with-view", "0.1.2") def observable_gauge_func(options: CallbackOptions) -> Iterable[Observation]: attrs = {} for i in range(random.randint(1, 100)): attrs[f"k_{i}"] = f"v_{i}" yield Observation(1, attrs) # Async gauge observable_gauge = meter.create_observable_gauge( "observable_gauge", [observable_gauge_func], ) while 1: time.sleep(1) python-opentelemetry-1.39.1/docs/examples/metrics/views/requirements.txt000066400000000000000000000002061511654350100266540ustar00rootroot00000000000000opentelemetry-api==1.12.0 opentelemetry-sdk==1.12.0 opentelemetry-semantic-conventions==0.33b0 typing_extensions==4.5.0 wrapt==1.14.1 python-opentelemetry-1.39.1/docs/examples/opencensus-exporter-tracer/000077500000000000000000000000001511654350100260755ustar00rootroot00000000000000python-opentelemetry-1.39.1/docs/examples/opencensus-exporter-tracer/README.rst000066400000000000000000000023461511654350100275710ustar00rootroot00000000000000OpenCensus Exporter =================== This example shows how to use the OpenCensus Exporter to export traces to the OpenTelemetry collector. The source files of this example are available :scm_web:`here `. Installation ------------ .. code-block:: sh pip install opentelemetry-api pip install opentelemetry-sdk pip install opentelemetry-exporter-opencensus Run the Example --------------- Before running the example, it's necessary to run the OpenTelemetry collector and Jaeger. The :scm_web:`docker ` folder contains a ``docker-compose`` template with the configuration of those services. .. code-block:: sh pip install docker-compose cd docker docker-compose up Now, the example can be executed: .. code-block:: sh python collector.py The traces are available in the Jaeger UI at http://localhost:16686/. Useful links ------------ - OpenTelemetry_ - `OpenTelemetry Collector`_ - :doc:`../../api/trace` - :doc:`../../exporter/opencensus/opencensus` .. _OpenTelemetry: https://github.com/open-telemetry/opentelemetry-python/ .. _OpenTelemetry Collector: https://github.com/open-telemetry/opentelemetry-collector python-opentelemetry-1.39.1/docs/examples/opencensus-exporter-tracer/collector.py000066400000000000000000000023731511654350100304420ustar00rootroot00000000000000# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from opentelemetry import trace from opentelemetry.exporter.opencensus.trace_exporter import ( OpenCensusSpanExporter, ) from opentelemetry.sdk.trace import TracerProvider from opentelemetry.sdk.trace.export import BatchSpanProcessor exporter = OpenCensusSpanExporter(endpoint="localhost:55678") trace.set_tracer_provider(TracerProvider()) tracer = trace.get_tracer(__name__) span_processor = BatchSpanProcessor(exporter) trace.get_tracer_provider().add_span_processor(span_processor) with tracer.start_as_current_span("foo"): with tracer.start_as_current_span("bar"): with tracer.start_as_current_span("baz"): print("Hello world from OpenTelemetry Python!") python-opentelemetry-1.39.1/docs/examples/opencensus-exporter-tracer/docker/000077500000000000000000000000001511654350100273445ustar00rootroot00000000000000python-opentelemetry-1.39.1/docs/examples/opencensus-exporter-tracer/docker/collector-config.yaml000066400000000000000000000005061511654350100334620ustar00rootroot00000000000000receivers: opencensus: endpoint: "0.0.0.0:55678" exporters: jaeger_grpc: endpoint: jaeger-all-in-one:14250 sending_queue: batch: debug: processors: queued_retry: service: pipelines: traces: receivers: [opencensus] exporters: [jaeger_grpc, debug] processors: [queued_retry] python-opentelemetry-1.39.1/docs/examples/opencensus-exporter-tracer/docker/docker-compose.yaml000066400000000000000000000007251511654350100331460ustar00rootroot00000000000000version: "2" services: # Collector collector: image: omnition/opentelemetry-collector-contrib:latest command: ["--config=/conf/collector-config.yaml", "--log-level=DEBUG"] volumes: - ./collector-config.yaml:/conf/collector-config.yaml ports: - "55678:55678" jaeger-all-in-one: image: jaegertracing/all-in-one:latest ports: - "16686:16686" - "6831:6831/udp" - "6832:6832/udp" - "14268" - "14250" python-opentelemetry-1.39.1/docs/examples/opencensus-shim/000077500000000000000000000000001511654350100237075ustar00rootroot00000000000000python-opentelemetry-1.39.1/docs/examples/opencensus-shim/.gitignore000066400000000000000000000000131511654350100256710ustar00rootroot00000000000000example.db python-opentelemetry-1.39.1/docs/examples/opencensus-shim/README.rst000066400000000000000000000046201511654350100254000ustar00rootroot00000000000000OpenCensus Shim ================ This example shows how to use the :doc:`opentelemetry-opencensus-shim package <../../shim/opencensus_shim/opencensus_shim>` to interact with libraries instrumented with `opencensus-python `_. The source files required to run this example are available :scm_web:`here `. Installation ------------ Jaeger ****** Start Jaeger .. code-block:: sh docker run --rm \ -p 4317:4317 \ -p 4318:4318 \ -p 16686:16686 \ jaegertracing/all-in-one:latest \ --log-level=debug Python Dependencies ******************* Install the Python dependencies in :scm_raw_web:`requirements.txt ` .. code-block:: sh pip install -r requirements.txt Alternatively, you can install the Python dependencies separately: .. code-block:: sh pip install \ opentelemetry-api \ opentelemetry-sdk \ opentelemetry-exporter-otlp \ opentelemetry-opencensus-shim \ opentelemetry-instrumentation-sqlite3 \ opencensus \ opencensus-ext-flask \ Flask Run the Application ------------------- Start the application in a terminal. .. code-block:: sh flask --app app run -h 0.0.0.0 Point your browser to the address printed out (probably http://127.0.0.1:5000). Alternatively, just use curl to trigger a request: .. code-block:: sh curl http://127.0.0.1:5000 Jaeger UI ********* Open the Jaeger UI in your browser at ``_ and view traces for the "opencensus-shim-example-flask" service. Click on a span named "span" in the scatter plot. You will see a span tree with the following structure: * ``span`` * ``query movies from db`` * ``SELECT`` * ``build response html`` The root span comes from OpenCensus Flask instrumentation. The children ``query movies from db`` and ``build response html`` come from the manual instrumentation using OpenTelemetry's :meth:`opentelemetry.trace.Tracer.start_as_current_span`. Finally, the ``SELECT`` span is created by OpenTelemetry's SQLite3 instrumentation. Everything is exported to Jaeger using the OpenTelemetry exporter. Useful links ------------ - OpenTelemetry_ - :doc:`../../shim/opencensus_shim/opencensus_shim` .. _OpenTelemetry: https://github.com/open-telemetry/opentelemetry-python/ python-opentelemetry-1.39.1/docs/examples/opencensus-shim/app.py000066400000000000000000000055221511654350100250450ustar00rootroot00000000000000# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import sqlite3 from flask import Flask from opencensus.ext.flask.flask_middleware import FlaskMiddleware from opentelemetry import trace from opentelemetry.exporter.otlp.proto.grpc.trace_exporter import ( OTLPSpanExporter, ) from opentelemetry.instrumentation.sqlite3 import SQLite3Instrumentor from opentelemetry.sdk.resources import Resource from opentelemetry.sdk.trace import TracerProvider from opentelemetry.sdk.trace.export import BatchSpanProcessor from opentelemetry.shim.opencensus import install_shim DB = "example.db" # Set up OpenTelemetry tracer_provider = TracerProvider( resource=Resource( { "service.name": "opencensus-shim-example-flask", } ) ) trace.set_tracer_provider(tracer_provider) # Configure OTel to export traces to Jaeger tracer_provider.add_span_processor( BatchSpanProcessor( OTLPSpanExporter( endpoint="localhost:4317", ) ) ) tracer = tracer_provider.get_tracer(__name__) # Install the shim to start bridging spans from OpenCensus to OpenTelemetry install_shim() # Instrument sqlite3 library SQLite3Instrumentor().instrument() # Setup Flask with OpenCensus instrumentation app = Flask(__name__) FlaskMiddleware(app) # Setup the application database def setup_db(): with sqlite3.connect(DB) as con: cur = con.cursor() cur.execute( """ CREATE TABLE IF NOT EXISTS movie( title, year, PRIMARY KEY(title, year) ) """ ) cur.execute( """ INSERT OR IGNORE INTO movie(title, year) VALUES ('Mission Telemetry', 2000), ('Observing the World', 2010), ('The Tracer', 1999), ('The Instrument', 2020) """ ) setup_db() @app.route("/") def hello_world(): lines = [] with tracer.start_as_current_span("query movies from db"), sqlite3.connect( DB ) as con: cur = con.cursor() for title, year in cur.execute("SELECT title, year from movie"): lines.append(f"

  • {title} is from the year {year}
  • ") with tracer.start_as_current_span("build response html"): html = f"
      {''.join(lines)}
    " return html python-opentelemetry-1.39.1/docs/examples/opencensus-shim/requirements.txt000066400000000000000000000002521511654350100271720ustar00rootroot00000000000000opentelemetry-api opentelemetry-sdk opentelemetry-exporter-otlp opentelemetry-opencensus-shim opentelemetry-instrumentation-sqlite3 opencensus opencensus-ext-flask Flask python-opentelemetry-1.39.1/docs/examples/opentracing/000077500000000000000000000000001511654350100231005ustar00rootroot00000000000000python-opentelemetry-1.39.1/docs/examples/opentracing/README.rst000066400000000000000000000047461511654350100246020ustar00rootroot00000000000000OpenTracing Shim ================ This example shows how to use the :doc:`opentelemetry-opentracing-shim package <../../shim/opentracing_shim/opentracing_shim>` to interact with libraries instrumented with `opentracing-python `_. The included ``rediscache`` library creates spans via the OpenTracing Redis integration, `redis_opentracing `_. Spans are exported via the Jaeger exporter, which is attached to the OpenTelemetry tracer. The source files required to run this example are available :scm_web:`here `. Installation ------------ Jaeger ****** Start Jaeger .. code-block:: sh docker run --rm \ -p 4317:4317 \ -p 4318:4318 \ -p 16686:16686 \ jaegertracing/all-in-one:latest \ --log-level=debug Redis ***** Install Redis following the `instructions `_. Make sure that the Redis server is running by executing this: .. code-block:: sh redis-server Python Dependencies ******************* Install the Python dependencies in :scm_raw_web:`requirements.txt ` .. code-block:: sh pip install -r requirements.txt Alternatively, you can install the Python dependencies separately: .. code-block:: sh pip install \ opentelemetry-api \ opentelemetry-sdk \ opentelemetry-exporter-otlp \ opentelemetry-opentracing-shim \ redis \ redis_opentracing Run the Application ------------------- The example script calculates a few Fibonacci numbers and stores the results in Redis. The script, the ``rediscache`` library, and the OpenTracing Redis integration all contribute spans to the trace. To run the script: .. code-block:: sh python main.py After running, you can view the generated trace in the Jaeger UI. Jaeger UI ********* Open the Jaeger UI in your browser at ``_ and view traces for the "OpenTracing Shim Example" service. Each ``main.py`` run should generate a trace, and each trace should include multiple spans that represent calls to Redis. Note that tags and logs (OpenTracing) and attributes and events (OpenTelemetry) from both tracing systems appear in the exported trace. Useful links ------------ - OpenTelemetry_ - :doc:`../../shim/opentracing_shim/opentracing_shim` .. _OpenTelemetry: https://github.com/open-telemetry/opentelemetry-python/ python-opentelemetry-1.39.1/docs/examples/opentracing/__init__.py000066400000000000000000000000001511654350100251770ustar00rootroot00000000000000python-opentelemetry-1.39.1/docs/examples/opentracing/main.py000077500000000000000000000030521511654350100244010ustar00rootroot00000000000000#!/usr/bin/env python from rediscache import RedisCache from opentelemetry import trace from opentelemetry.exporter.otlp.proto.grpc.trace_exporter import ( OTLPSpanExporter, ) from opentelemetry.sdk.trace import TracerProvider from opentelemetry.sdk.trace.export import BatchSpanProcessor from opentelemetry.shim import opentracing_shim # Configure the tracer using the default implementation trace.set_tracer_provider(TracerProvider()) tracer_provider = trace.get_tracer_provider() # Create an OTLP gRPC span exporter otlp_exporter = OTLPSpanExporter( endpoint="http://localhost:4317", # For insecure connection, useful for testing insecure=True, ) # Add the exporter to the tracer provider trace.get_tracer_provider().add_span_processor( BatchSpanProcessor(otlp_exporter) ) # Create an OpenTracing shim. This implements the OpenTracing tracer API, but # forwards calls to the underlying OpenTelemetry tracer. opentracing_tracer = opentracing_shim.create_tracer(tracer_provider) # Our example caching library expects an OpenTracing-compliant tracer. redis_cache = RedisCache(opentracing_tracer) # Application code uses an OpenTelemetry Tracer as usual. tracer = trace.get_tracer(__name__) @redis_cache def fib(number): """Get the Nth Fibonacci number, cache intermediate results in Redis.""" if number < 0: raise ValueError if number in (0, 1): return number return fib(number - 1) + fib(number - 2) with tracer.start_as_current_span("Fibonacci") as span: span.set_attribute("is_example", "yes :)") fib(4) python-opentelemetry-1.39.1/docs/examples/opentracing/rediscache.py000066400000000000000000000041531511654350100255470ustar00rootroot00000000000000""" This is an example of a library written to work with opentracing-python. It provides a simple caching decorator backed by Redis, and uses the OpenTracing Redis integration to automatically generate spans for each call to Redis. """ import pickle from functools import wraps # FIXME The pylint disablings are needed here because the code of this # example is being executed against the tox.ini of the main # opentelemetry-python project. Find a way to separate the two. import redis # pylint: disable=import-error import redis_opentracing # pylint: disable=import-error class RedisCache: """Redis-backed caching decorator, using OpenTracing! Args: tracer: an opentracing.tracer.Tracer """ def __init__(self, tracer): redis_opentracing.init_tracing(tracer) self.tracer = tracer self.client = redis.StrictRedis() def __call__(self, func): @wraps(func) def inner(*args, **kwargs): with self.tracer.start_active_span("Caching decorator") as scope1: # Pickle the call args to get a canonical key. Don't do this in # prod! key = pickle.dumps((func.__qualname__, args, kwargs)) pval = self.client.get(key) if pval is not None: val = pickle.loads(pval) scope1.span.log_kv( {"msg": "Found cached value", "val": val} ) return val scope1.span.log_kv({"msg": "Cache miss, calling function"}) with self.tracer.start_active_span( f'Call "{func.__name__}"' ) as scope2: scope2.span.set_tag("func", func.__name__) scope2.span.set_tag("args", str(args)) scope2.span.set_tag("kwargs", str(kwargs)) val = func(*args, **kwargs) scope2.span.set_tag("val", str(val)) # Let keys expire after 10 seconds self.client.setex(key, 10, pickle.dumps(val)) return val return inner python-opentelemetry-1.39.1/docs/examples/opentracing/requirements.txt000066400000000000000000000001671511654350100263700ustar00rootroot00000000000000opentelemetry-api opentelemetry-sdk opentelemetry-exporter-otlp opentelemetry-opentracing-shim redis redis_opentracing python-opentelemetry-1.39.1/docs/examples/sqlcommenter/000077500000000000000000000000001511654350100233005ustar00rootroot00000000000000python-opentelemetry-1.39.1/docs/examples/sqlcommenter/README.rst000066400000000000000000000114131511654350100247670ustar00rootroot00000000000000sqlcommenter ============ This is an example of how to use OpenTelemetry Python instrumention with sqlcommenter to enrich database query statements with contextual information. For more information on sqlcommenter concepts, see: * `Semantic Conventions - Database Spans `_ * `sqlcommenter `_ The source files of this example are available `here `_. This example uses Docker to manage a database server and OpenTelemetry collector. Run MySQL server ---------------- A running MySQL server with general logs enabled will store query statements with context resulting from the sqlcommenter feature enabled in this example. .. code-block:: sh cd books_database docker build -t books-db . docker run -d --name books-db -p 3366:3306 books-db cd .. Check that the run is working and the general log is available: .. code-block:: sh docker exec -it books-db tail -f /var/log/general.log Run OpenTelemetry Collector --------------------------- Running the OpenTelemetry collector will show the MySQL instrumentor's comment-in-span-attribute feature, which this example has also enabled. .. code-block:: sh docker run \ -p 4317:4317 \ -v $(pwd)/collector-config.yaml:/etc/otel/config.yaml \ otel/opentelemetry-collector-contrib:latest Run the sqlcommenter example ---------------------------- Set up and activate a Python virtual environment. Install these dependencies of the sqlcommenter example: .. code-block:: sh pip install opentelemetry-sdk \ opentelemetry-exporter-otlp-proto-grpc \ opentelemetry-instrumentation-mysql \ mysql-connector-python Then, run this script, which instruments all mysql-connector calls with two sqlcommenter features opted in. .. code-block:: sh python instrumented_query.py Note that OpenTelemetry instrumentation with sqlcommenter is also available for other Python database client drivers/object relation mappers (ORMs). See full list at `instrumentation`_. .. _instrumentation: https://github.com/open-telemetry/opentelemetry-python-contrib/tree/main/instrumentation Check MySQL server general log and spans for sqlcomment ------------------------------------------------------- After running the query script, check the MySQL general log contents: .. code-block:: sh docker exec -it books-db tail -f /var/log/general.log For each instrumented ``SELECT`` call, a query was made and logged with a sqlcomment appended. For example: .. code:: 2025-09-02T18:49:06.981980Z 186 Query SELECT * FROM authors WHERE id = 1 /*db_driver='mysql.connector%%3A9.4.0',dbapi_level='2.0',dbapi_threadsafety=1,driver_paramstyle='pyformat',mysql_client_version='9.4.0',traceparent='00-2c45248f2beefdd9688b0a94eb4ac9ee-4f3af9a825aae9b1-01'*/ In the running OpenTelemetry collector, you'll also see one span per ``SELECT`` call. Each of those span's trace ID and span ID will correspond to a query log sqlcomment. With the comment-in-attribute feature enabled, the span's ``db.statement`` attribute will also contain the sqlcomment. For example: .. code:: ScopeSpans #0 ScopeSpans SchemaURL: https://opentelemetry.io/schemas/1.11.0 InstrumentationScope opentelemetry.instrumentation.mysql 0.57b0 Span #0 Trace ID : 2c45248f2beefdd9688b0a94eb4ac9ee Parent ID : ID : 4f3af9a825aae9b1 Name : SELECT Kind : Client Start time : 2025-09-02 18:49:06.982341 +0000 UTC End time : 2025-09-02 18:49:06.98463 +0000 UTC Status code : Unset Status message : Attributes: -> db.system: Str(mysql) -> db.name: Str(books) -> db.statement: Str(SELECT * FROM authors WHERE id = %s /*db_driver='mysql.connector%%3A9.4.0',dbapi_level='2.0',dbapi_threadsafety=1,driver_paramstyle='pyformat',mysql_client_version='9.4.0',traceparent='00-2c45248f2beefdd9688b0a94eb4ac9ee-4f3af9a825aae9b1-01'*/) -> db.user: Str(books) -> net.peer.name: Str(localhost) -> net.peer.port: Int(3366) References ---------- * `OpenTelemetry Project `_ * `OpenTelemetry Collector `_ * `OpenTelemetry MySQL instrumentation `_ * `Semantic Conventions - Database Spans `_ * `sqlcommenter `_python-opentelemetry-1.39.1/docs/examples/sqlcommenter/books_database/000077500000000000000000000000001511654350100262415ustar00rootroot00000000000000python-opentelemetry-1.39.1/docs/examples/sqlcommenter/books_database/Dockerfile000066400000000000000000000015711511654350100302370ustar00rootroot00000000000000FROM mysql:8.0 ENV MYSQL_ROOT_PASSWORD=root ENV MYSQL_DATABASE=books ADD books.sql /docker-entrypoint-initdb.d/ RUN echo "CREATE USER IF NOT EXISTS 'books'@'%' IDENTIFIED WITH mysql_native_password BY 'books123';" > /docker-entrypoint-initdb.d/01-create-user.sql && \ echo "GRANT ALL PRIVILEGES ON books.* TO 'books'@'%';" >> /docker-entrypoint-initdb.d/01-create-user.sql && \ echo "FLUSH PRIVILEGES;" >> /docker-entrypoint-initdb.d/01-create-user.sql # Prepare general logs RUN mkdir -p /var/log && \ touch /var/log/general.log && \ chown mysql:mysql /var/log/general.log EXPOSE 3306 HEALTHCHECK --interval=30s --timeout=5s --start-period=5s --retries=3 \ CMD mysqladmin ping -p${MYSQL_ROOT_PASSWORD} || exit 1 # Start MySQL with general logging enabled and compatible authentication CMD ["mysqld", "--general-log=1", "--general-log-file=/var/log/general.log"]python-opentelemetry-1.39.1/docs/examples/sqlcommenter/books_database/books.sql000066400000000000000000000044401511654350100301010ustar00rootroot00000000000000-- MySQL dump for Books Database -- Database: books_db -- Generated on: 2025-08-29 SET SQL_MODE = "NO_AUTO_VALUE_ON_ZERO"; START TRANSACTION; SET time_zone = "+00:00"; -- Database: `books` CREATE DATABASE IF NOT EXISTS `books` DEFAULT CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci; USE `books`; -- -------------------------------------------------------- -- Table structure for table `authors` DROP TABLE IF EXISTS `authors`; CREATE TABLE `authors` ( `id` int(11) NOT NULL AUTO_INCREMENT, `name` varchar(255) NOT NULL, `home_town` varchar(255) DEFAULT NULL, `birthdate` date DEFAULT NULL, PRIMARY KEY (`id`) ) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_unicode_ci; -- Dumping data for table `authors` INSERT INTO `authors` (`id`, `name`, `home_town`, `birthdate`) VALUES (1, 'Frank Herbert', 'Tacoma, Washington', '1920-10-08'), (2, 'Isaac Asimov', 'Petrovichi, Russia', '1920-01-02'), (3, 'Terry Pratchett', 'Beaconsfield, England', '1948-04-28'); -- -------------------------------------------------------- -- Table structure for table `books` DROP TABLE IF EXISTS `books`; CREATE TABLE `books` ( `id` int(11) NOT NULL AUTO_INCREMENT, `title` varchar(255) NOT NULL, `author_id` int(11) NOT NULL, `year_published` int(4) DEFAULT NULL, `genre` varchar(100) DEFAULT NULL, PRIMARY KEY (`id`), KEY `fk_author` (`author_id`), CONSTRAINT `fk_author` FOREIGN KEY (`author_id`) REFERENCES `authors` (`id`) ON DELETE CASCADE ON UPDATE CASCADE ) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_unicode_ci; -- Dumping data for table `books` INSERT INTO `books` (`id`, `title`, `author_id`, `year_published`, `genre`) VALUES (1, 'Dune', 1, 1965, 'Science Fiction'), (2, 'Foundation', 2, 1951, 'Science Fiction'), (3, 'The Colour of Magic', 3, 1983, 'Fantasy Comedy'); -- -------------------------------------------------------- -- Additional books to show the many-to-one relationship INSERT INTO `books` (`id`, `title`, `author_id`, `year_published`, `genre`) VALUES (4, 'Dune Messiah', 1, 1969, 'Science Fiction'), (5, 'I, Robot', 2, 1950, 'Science Fiction'), (6, 'Good Omens', 3, 1990, 'Fantasy Comedy'); -- -------------------------------------------------------- -- Auto increment values ALTER TABLE `authors` AUTO_INCREMENT = 4; ALTER TABLE `books` AUTO_INCREMENT = 7; COMMIT;python-opentelemetry-1.39.1/docs/examples/sqlcommenter/collector-config.yaml000066400000000000000000000003431511654350100274150ustar00rootroot00000000000000receivers: otlp: protocols: grpc: endpoint: 0.0.0.0:4317 exporters: debug: verbosity: detailed service: pipelines: traces: receivers: [otlp] processors: [] exporters: [debug] python-opentelemetry-1.39.1/docs/examples/sqlcommenter/instrumented_query.py000066400000000000000000000040101511654350100276130ustar00rootroot00000000000000# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from mysql.connector import connect from opentelemetry import trace from opentelemetry.exporter.otlp.proto.grpc.trace_exporter import ( OTLPSpanExporter, ) from opentelemetry.instrumentation.mysql import MySQLInstrumentor from opentelemetry.sdk.resources import Resource from opentelemetry.sdk.trace import TracerProvider from opentelemetry.sdk.trace.export import BatchSpanProcessor resource = Resource.create( attributes={ "service.name": "sqlcommenter-example", } ) trace.set_tracer_provider(TracerProvider(resource=resource)) span_processor = BatchSpanProcessor( OTLPSpanExporter(endpoint="http://localhost:4317") ) trace.get_tracer_provider().add_span_processor(span_processor) cnx = connect( host="localhost", port=3366, user="books", password="books123", database="books", ) # Instruments MySQL queries with sqlcommenter enabled # and comment-in-span-attribute enabled. # Returns wrapped connection to generate traces. cnx = MySQLInstrumentor().instrument_connection( connection=cnx, enable_commenter=True, enable_attribute_commenter=True, ) cursor = cnx.cursor() statement = "SELECT * FROM authors WHERE id = %s" # Each SELECT query generates one mysql log with sqlcomment # and one OTel span with `db.statement` attribute that also # includes sqlcomment. for cid in range(1, 4): cursor.execute(statement, (cid,)) rows = cursor.fetchall() print(f"Found author: {rows[0]}") print("Done.") python-opentelemetry-1.39.1/docs/exporter/000077500000000000000000000000001511654350100206215ustar00rootroot00000000000000python-opentelemetry-1.39.1/docs/exporter/index.rst000066400000000000000000000001201511654350100224530ustar00rootroot00000000000000:orphan: Exporters ========= .. toctree:: :maxdepth: 1 :glob: ** python-opentelemetry-1.39.1/docs/exporter/opencensus/000077500000000000000000000000001511654350100230035ustar00rootroot00000000000000python-opentelemetry-1.39.1/docs/exporter/opencensus/opencensus.rst000066400000000000000000000002241511654350100257150ustar00rootroot00000000000000OpenCensus Exporter =================== .. automodule:: opentelemetry.exporter.opencensus :members: :undoc-members: :show-inheritance: python-opentelemetry-1.39.1/docs/exporter/otlp/000077500000000000000000000000001511654350100215775ustar00rootroot00000000000000python-opentelemetry-1.39.1/docs/exporter/otlp/otlp.rst000066400000000000000000000017131511654350100233110ustar00rootroot00000000000000OpenTelemetry OTLP Exporters ============================ .. automodule:: opentelemetry.exporter.otlp :members: :undoc-members: :show-inheritance: opentelemetry.exporter.otlp.proto.http --------------------------------------- .. automodule:: opentelemetry.exporter.otlp.proto.http :members: :undoc-members: :show-inheritance: .. automodule:: opentelemetry.exporter.otlp.proto.http.trace_exporter .. automodule:: opentelemetry.exporter.otlp.proto.http.metric_exporter .. automodule:: opentelemetry.exporter.otlp.proto.http._log_exporter opentelemetry.exporter.otlp.proto.grpc --------------------------------------- .. automodule:: opentelemetry.exporter.otlp.proto.grpc :members: :undoc-members: :show-inheritance: .. automodule:: opentelemetry.exporter.otlp.proto.grpc.trace_exporter .. automodule:: opentelemetry.exporter.otlp.proto.grpc.metric_exporter .. automodule:: opentelemetry.exporter.otlp.proto.grpc._log_exporter python-opentelemetry-1.39.1/docs/exporter/prometheus/000077500000000000000000000000001511654350100230145ustar00rootroot00000000000000python-opentelemetry-1.39.1/docs/exporter/prometheus/prometheus.rst000066400000000000000000000034361511654350100257470ustar00rootroot00000000000000OpenTelemetry Prometheus Exporter ================================= .. automodule:: opentelemetry.exporter.prometheus :members: :undoc-members: :show-inheritance: Installation ------------ The OpenTelemetry Prometheus Exporter package is available on PyPI:: pip install opentelemetry-exporter-prometheus Usage ----- The Prometheus exporter starts an HTTP server that collects metrics and serializes them to Prometheus text format on request:: from prometheus_client import start_http_server from opentelemetry import metrics from opentelemetry.exporter.prometheus import PrometheusMetricReader from opentelemetry.sdk.metrics import MeterProvider from opentelemetry.sdk.resources import SERVICE_NAME, Resource # Service name is required for most backends resource = Resource(attributes={ SERVICE_NAME: "your-service-name" }) # Start Prometheus client start_http_server(port=9464, addr="localhost") # Initialize PrometheusMetricReader which pulls metrics from the SDK # on-demand to respond to scrape requests reader = PrometheusMetricReader() provider = MeterProvider(resource=resource, metric_readers=[reader]) metrics.set_meter_provider(provider) Configuration ------------- The following environment variables are supported: * ``OTEL_EXPORTER_PROMETHEUS_HOST`` (default: "localhost"): The host to bind to * ``OTEL_EXPORTER_PROMETHEUS_PORT`` (default: 9464): The port to bind to Limitations ----------- * No multiprocessing support: The Prometheus exporter is not designed to operate in multiprocessing environments (see `#3747 `_). References ---------- * `Prometheus `_ * `OpenTelemetry Project `_ python-opentelemetry-1.39.1/docs/exporter/zipkin/000077500000000000000000000000001511654350100221255ustar00rootroot00000000000000python-opentelemetry-1.39.1/docs/exporter/zipkin/zipkin.rst000066400000000000000000000006061511654350100241650ustar00rootroot00000000000000OpenTelemetry Zipkin Exporters ============================== .. automodule:: opentelemetry.exporter.zipkin :members: :undoc-members: :show-inheritance: .. automodule:: opentelemetry.exporter.zipkin.json :members: :undoc-members: :show-inheritance: .. automodule:: opentelemetry.exporter.zipkin.proto.http :members: :undoc-members: :show-inheritance: python-opentelemetry-1.39.1/docs/getting_started/000077500000000000000000000000001511654350100221405ustar00rootroot00000000000000python-opentelemetry-1.39.1/docs/getting_started/__init__.py000066400000000000000000000000001511654350100242370ustar00rootroot00000000000000python-opentelemetry-1.39.1/docs/getting_started/flask_example.py000066400000000000000000000026161511654350100253320ustar00rootroot00000000000000# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # flask_example.py import flask import requests from opentelemetry import trace from opentelemetry.instrumentation.flask import FlaskInstrumentor from opentelemetry.instrumentation.requests import RequestsInstrumentor from opentelemetry.sdk.trace import TracerProvider from opentelemetry.sdk.trace.export import ( BatchSpanProcessor, ConsoleSpanExporter, ) trace.set_tracer_provider(TracerProvider()) trace.get_tracer_provider().add_span_processor( BatchSpanProcessor(ConsoleSpanExporter()) ) app = flask.Flask(__name__) FlaskInstrumentor().instrument_app(app) RequestsInstrumentor().instrument() tracer = trace.get_tracer(__name__) @app.route("/") def hello(): with tracer.start_as_current_span("example-request"): requests.get("http://www.example.com", timeout=10) return "hello" app.run(port=5000) python-opentelemetry-1.39.1/docs/getting_started/metrics_example.py000066400000000000000000000044071511654350100257000ustar00rootroot00000000000000# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # metrics.py # This is still work in progress as the metrics SDK is being implemented from typing import Iterable from opentelemetry.metrics import ( CallbackOptions, Observation, get_meter_provider, set_meter_provider, ) from opentelemetry.sdk.metrics import MeterProvider from opentelemetry.sdk.metrics.export import ( ConsoleMetricExporter, PeriodicExportingMetricReader, ) exporter = ConsoleMetricExporter() reader = PeriodicExportingMetricReader(exporter) provider = MeterProvider(metric_readers=[reader]) set_meter_provider(provider) def observable_counter_func(options: CallbackOptions) -> Iterable[Observation]: yield Observation(1, {}) def observable_up_down_counter_func( options: CallbackOptions, ) -> Iterable[Observation]: yield Observation(-10, {}) def observable_gauge_func(options: CallbackOptions) -> Iterable[Observation]: yield Observation(9, {}) meter = get_meter_provider().get_meter("getting-started", "0.1.2") # Counter counter = meter.create_counter("counter") counter.add(1) # Async Counter observable_counter = meter.create_observable_counter( "observable_counter", [observable_counter_func] ) # UpDownCounter updown_counter = meter.create_up_down_counter("updown_counter") updown_counter.add(1) updown_counter.add(-5) # Async UpDownCounter observable_updown_counter = meter.create_observable_up_down_counter( "observable_updown_counter", [observable_up_down_counter_func] ) # Histogram histogram = meter.create_histogram("histogram") histogram.record(99.9) # Async Gauge observable_gauge = meter.create_observable_gauge( "observable_gauge", [observable_gauge_func] ) # Sync Gauge gauge = meter.create_gauge("gauge") gauge.set(1) python-opentelemetry-1.39.1/docs/getting_started/otlpcollector_example.py000066400000000000000000000025501511654350100271140ustar00rootroot00000000000000# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # otcollector.py from opentelemetry import trace from opentelemetry.exporter.otlp.proto.grpc.trace_exporter import ( OTLPSpanExporter, ) from opentelemetry.sdk.trace import TracerProvider from opentelemetry.sdk.trace.export import BatchSpanProcessor span_exporter = OTLPSpanExporter( # optional # endpoint="myCollectorURL:4317", # credentials=ChannelCredentials(credentials), # headers=(("metadata", "metadata")), ) tracer_provider = TracerProvider() trace.set_tracer_provider(tracer_provider) span_processor = BatchSpanProcessor(span_exporter) tracer_provider.add_span_processor(span_processor) # Configure the tracer to use the collector exporter tracer = trace.get_tracer_provider().get_tracer(__name__) with tracer.start_as_current_span("foo"): print("Hello world!") python-opentelemetry-1.39.1/docs/getting_started/tests/000077500000000000000000000000001511654350100233025ustar00rootroot00000000000000python-opentelemetry-1.39.1/docs/getting_started/tests/__init__.py000066400000000000000000000011101511654350100254040ustar00rootroot00000000000000# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. python-opentelemetry-1.39.1/docs/getting_started/tests/requirements.txt000066400000000000000000000011151511654350100265640ustar00rootroot00000000000000asgiref==3.7.2 attrs==23.1.0 certifi==2024.7.4 charset-normalizer==2.0.12 click==8.1.7 Flask==2.3.3 idna==3.7 importlib-metadata==6.8.0 iniconfig==2.0.0 itsdangerous==2.1.2 Jinja2==3.1.5 MarkupSafe==2.1.3 packaging==24.0 pluggy==1.3.0 py-cpuinfo==9.0.0 pytest==7.4.4 requests==2.32.3 tomli==2.0.1 typing_extensions==4.8.0 urllib3==1.26.19 Werkzeug==3.0.6 wrapt==1.15.0 zipp==3.19.2 -e opentelemetry-semantic-conventions -e opentelemetry-proto -e exporter/opentelemetry-exporter-otlp-proto-common -e exporter/opentelemetry-exporter-otlp-proto-grpc -e opentelemetry-api -e opentelemetry-sdk python-opentelemetry-1.39.1/docs/getting_started/tests/test_flask.py000066400000000000000000000033231511654350100260140ustar00rootroot00000000000000# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import subprocess import sys import unittest from time import sleep import requests from requests.adapters import HTTPAdapter from requests.packages.urllib3.util.retry import ( # pylint: disable=import-error Retry, ) class TestFlask(unittest.TestCase): def test_flask(self): dirpath = os.path.dirname(os.path.realpath(__file__)) server_script = f"{dirpath}/../flask_example.py" server = subprocess.Popen( # pylint: disable=consider-using-with [sys.executable, server_script], stdout=subprocess.PIPE, ) retry_strategy = Retry(total=10, backoff_factor=1) adapter = HTTPAdapter(max_retries=retry_strategy) http = requests.Session() http.mount("http://", adapter) try: result = http.get("http://localhost:5000") self.assertEqual(result.status_code, 200) sleep(5) finally: server.terminate() output = str(server.stdout.read()) self.assertIn('"name": "GET"', output) self.assertIn('"name": "example-request"', output) self.assertIn('"name": "GET /"', output) python-opentelemetry-1.39.1/docs/getting_started/tests/test_metrics.py000066400000000000000000000070041511654350100263620ustar00rootroot00000000000000# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import os import subprocess import sys import unittest class TestMetrics(unittest.TestCase): def test_metrics(self): """Test that metrics example produces expected values""" # Run the metrics example test_script = f"{os.path.dirname(os.path.realpath(__file__))}/../metrics_example.py" result = subprocess.run( [sys.executable, test_script], capture_output=True, text=True, timeout=10, check=True, ) # Script should run successfully self.assertEqual(result.returncode, 0) # Parse the JSON output output_data = json.loads(result.stdout) # Get the metrics from the JSON structure metrics = output_data["resource_metrics"][0]["scope_metrics"][0][ "metrics" ] # Create a lookup dict for easier testing metrics_by_name = {metric["name"]: metric for metric in metrics} # Test Counter: should be 1 (called counter.add(1)) counter_value = metrics_by_name["counter"]["data"]["data_points"][0][ "value" ] self.assertEqual(counter_value, 1, "Counter should have value 1") # Test UpDownCounter: should be -4 (1 + (-5) = -4) updown_value = metrics_by_name["updown_counter"]["data"][ "data_points" ][0]["value"] self.assertEqual( updown_value, -4, "UpDownCounter should have value -4" ) # Test Histogram: should have count=1, sum=99.9 histogram_data = metrics_by_name["histogram"]["data"]["data_points"][0] self.assertEqual( histogram_data["count"], 1, "Histogram should have count 1" ) self.assertEqual( histogram_data["sum"], 99.9, "Histogram should have sum 99.9" ) # Test Gauge: should be 1 (last value set) gauge_value = metrics_by_name["gauge"]["data"]["data_points"][0][ "value" ] self.assertEqual(gauge_value, 1, "Gauge should have value 1") # Test Observable Counter: should be 1 (from callback) obs_counter_value = metrics_by_name["observable_counter"]["data"][ "data_points" ][0]["value"] self.assertEqual( obs_counter_value, 1, "Observable counter should have value 1" ) # Test Observable UpDownCounter: should be -10 (from callback) obs_updown_value = metrics_by_name["observable_updown_counter"][ "data" ]["data_points"][0]["value"] self.assertEqual( obs_updown_value, -10, "Observable updown counter should have value -10", ) # Test Observable Gauge: should be 9 (from callback) obs_gauge_value = metrics_by_name["observable_gauge"]["data"][ "data_points" ][0]["value"] self.assertEqual( obs_gauge_value, 9, "Observable gauge should have value 9" ) python-opentelemetry-1.39.1/docs/getting_started/tests/test_otlpcollector.py000066400000000000000000000030671511654350100276060ustar00rootroot00000000000000# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # oltpcollector_example.py import os import subprocess import sys import unittest class TestOTLPCollector(unittest.TestCase): def test_otlpcollector(self): """Test that OTLP collector example outputs 'Hello world!'""" dirpath = os.path.dirname(os.path.realpath(__file__)) test_script = f"{dirpath}/../otlpcollector_example.py" # Run the script with a short timeout since it will retry forever with subprocess.Popen( [sys.executable, test_script], stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True, ) as process: # Wait 2 seconds then kill it (enough time to print "Hello world!") try: stdout, _ = process.communicate(timeout=2) except subprocess.TimeoutExpired: process.kill() stdout, _ = process.communicate() # Check that it printed the expected message self.assertIn("Hello world!", stdout) python-opentelemetry-1.39.1/docs/getting_started/tests/test_tracing.py000066400000000000000000000020771511654350100263500ustar00rootroot00000000000000# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import subprocess import sys import unittest class TestBasicTracerExample(unittest.TestCase): def test_basic_tracer(self): dirpath = os.path.dirname(os.path.realpath(__file__)) test_script = f"{dirpath}/../tracing_example.py" output = subprocess.check_output( (sys.executable, test_script) ).decode() self.assertIn('"name": "foo"', output) self.assertIn('"name": "bar"', output) self.assertIn('"name": "baz"', output) python-opentelemetry-1.39.1/docs/getting_started/tracing_example.py000066400000000000000000000022241511654350100256540ustar00rootroot00000000000000# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # tracing.py from opentelemetry import trace from opentelemetry.sdk.trace import TracerProvider from opentelemetry.sdk.trace.export import ( BatchSpanProcessor, ConsoleSpanExporter, ) provider = TracerProvider() processor = BatchSpanProcessor(ConsoleSpanExporter()) provider.add_span_processor(processor) trace.set_tracer_provider(provider) tracer = trace.get_tracer(__name__) with tracer.start_as_current_span("foo"): with tracer.start_as_current_span("bar"): with tracer.start_as_current_span("baz"): print("Hello world from OpenTelemetry Python!") python-opentelemetry-1.39.1/docs/index.rst000066400000000000000000000023571511654350100206210ustar00rootroot00000000000000OpenTelemetry-Python API Reference ================================== .. image:: https://img.shields.io/badge/slack-chat-green.svg :target: https://cloud-native.slack.com/archives/C01PD4HUVBL :alt: Slack Chat Welcome to the docs for the `Python OpenTelemetry implementation `_. For an introduction to OpenTelemetry, see the `OpenTelemetry website docs `_. To learn how to instrument your Python code, see `Getting Started `_. For project status, information about releases, installation instructions and more, see `Python `_. Getting Started --------------- * `Getting Started `_ * `Frequently Asked Questions and Cookbook `_ .. toctree:: :maxdepth: 1 :caption: Core Packages :name: packages api/index sdk/index .. toctree:: :maxdepth: 2 :caption: More :glob: exporter/index shim/index examples/index * :ref:`genindex` * :ref:`modindex` * :ref:`search` python-opentelemetry-1.39.1/docs/make.bat000066400000000000000000000014231511654350100203560ustar00rootroot00000000000000@ECHO OFF pushd %~dp0 REM Command file for Sphinx documentation if "%SPHINXBUILD%" == "" ( set SPHINXBUILD=sphinx-build ) set SOURCEDIR=. set BUILDDIR=_build if "%1" == "" goto help %SPHINXBUILD% >NUL 2>NUL if errorlevel 9009 ( echo. echo.The 'sphinx-build' command was not found. Make sure you have Sphinx echo.installed, then set the SPHINXBUILD environment variable to point echo.to the full path of the 'sphinx-build' executable. Alternatively you echo.may add the Sphinx directory to PATH. echo. echo.If you don't have Sphinx installed, grab it from echo.http://sphinx-doc.org/ exit /b 1 ) %SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% goto end :help %SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% :end popd python-opentelemetry-1.39.1/docs/sdk/000077500000000000000000000000001511654350100175325ustar00rootroot00000000000000python-opentelemetry-1.39.1/docs/sdk/_logs.rst000066400000000000000000000002421511654350100213650ustar00rootroot00000000000000opentelemetry.sdk._logs package =============================== .. automodule:: opentelemetry.sdk._logs :members: :undoc-members: :show-inheritance: python-opentelemetry-1.39.1/docs/sdk/environment_variables.rst000066400000000000000000000003721511654350100246620ustar00rootroot00000000000000opentelemetry.sdk.environment_variables ======================================= .. TODO: what is the SDK .. toctree:: :maxdepth: 1 .. automodule:: opentelemetry.sdk.environment_variables :members: :undoc-members: :show-inheritance: python-opentelemetry-1.39.1/docs/sdk/error_handler.rst000066400000000000000000000002721511654350100231130ustar00rootroot00000000000000opentelemetry.sdk.error_handler package ======================================= .. automodule:: opentelemetry.sdk.error_handler :members: :undoc-members: :show-inheritance: python-opentelemetry-1.39.1/docs/sdk/index.rst000066400000000000000000000003061511654350100213720ustar00rootroot00000000000000OpenTelemetry Python SDK ======================== .. TODO: what is the SDK .. toctree:: :maxdepth: 1 _logs resources trace metrics error_handler environment_variables python-opentelemetry-1.39.1/docs/sdk/metrics.export.rst000066400000000000000000000002551511654350100232540ustar00rootroot00000000000000opentelemetry.sdk.metrics.export ================================ .. automodule:: opentelemetry.sdk.metrics.export :members: :undoc-members: :show-inheritance: python-opentelemetry-1.39.1/docs/sdk/metrics.rst000066400000000000000000000003611511654350100217320ustar00rootroot00000000000000opentelemetry.sdk.metrics package ================================== Submodules ---------- .. toctree:: metrics.export metrics.view .. automodule:: opentelemetry.sdk.metrics :members: :undoc-members: :show-inheritance: python-opentelemetry-1.39.1/docs/sdk/metrics.view.rst000066400000000000000000000002471511654350100227060ustar00rootroot00000000000000opentelemetry.sdk.metrics.view ============================== .. automodule:: opentelemetry.sdk.metrics.view :members: :undoc-members: :show-inheritance: python-opentelemetry-1.39.1/docs/sdk/resources.rst000066400000000000000000000002651511654350100223010ustar00rootroot00000000000000opentelemetry.sdk.resources package ========================================== .. automodule:: opentelemetry.sdk.resources :members: :undoc-members: :show-inheritance: python-opentelemetry-1.39.1/docs/sdk/trace.export.rst000066400000000000000000000002621511654350100227020ustar00rootroot00000000000000opentelemetry.sdk.trace.export ========================================== .. automodule:: opentelemetry.sdk.trace.export :members: :undoc-members: :show-inheritance:python-opentelemetry-1.39.1/docs/sdk/trace.id_generator.rst000066400000000000000000000002711511654350100240230ustar00rootroot00000000000000opentelemetry.sdk.trace.id_generator ==================================== .. automodule:: opentelemetry.sdk.trace.id_generator :members: :undoc-members: :show-inheritance: python-opentelemetry-1.39.1/docs/sdk/trace.rst000066400000000000000000000004301511654350100213570ustar00rootroot00000000000000opentelemetry.sdk.trace package =============================== Submodules ---------- .. toctree:: trace.export trace.id_generator trace.sampling util.instrumentation .. automodule:: opentelemetry.sdk.trace :members: :undoc-members: :show-inheritance: python-opentelemetry-1.39.1/docs/sdk/trace.sampling.rst000066400000000000000000000002661511654350100231770ustar00rootroot00000000000000opentelemetry.sdk.trace.sampling ========================================== .. automodule:: opentelemetry.sdk.trace.sampling :members: :undoc-members: :show-inheritance:python-opentelemetry-1.39.1/docs/sdk/util.instrumentation.rst000066400000000000000000000002121511654350100244760ustar00rootroot00000000000000opentelemetry.sdk.util.instrumentation ========================================== .. automodule:: opentelemetry.sdk.util.instrumentation python-opentelemetry-1.39.1/docs/shim/000077500000000000000000000000001511654350100177115ustar00rootroot00000000000000python-opentelemetry-1.39.1/docs/shim/index.rst000066400000000000000000000001101511654350100215420ustar00rootroot00000000000000:orphan: Shims ===== .. toctree:: :maxdepth: 1 :glob: ** python-opentelemetry-1.39.1/docs/shim/opencensus_shim/000077500000000000000000000000001511654350100231135ustar00rootroot00000000000000python-opentelemetry-1.39.1/docs/shim/opencensus_shim/opencensus_shim.rst000066400000000000000000000002151511654350100270450ustar00rootroot00000000000000OpenCensus Shim for OpenTelemetry ================================== .. automodule:: opentelemetry.shim.opencensus :no-show-inheritance: python-opentelemetry-1.39.1/docs/shim/opentracing_shim/000077500000000000000000000000001511654350100232425ustar00rootroot00000000000000python-opentelemetry-1.39.1/docs/shim/opentracing_shim/opentracing_shim.rst000066400000000000000000000002241511654350100273230ustar00rootroot00000000000000OpenTracing Shim for OpenTelemetry ================================== .. automodule:: opentelemetry.shim.opentracing_shim :no-show-inheritance: python-opentelemetry-1.39.1/eachdist.ini000066400000000000000000000021461511654350100203110ustar00rootroot00000000000000# These will be sorted first in that order. # All packages that are depended upon by others should be listed here. [DEFAULT] sortfirst= opentelemetry-api opentelemetry-sdk opentelemetry-proto opentelemetry-distro tests/opentelemetry-test-utils exporter/* [stable] version=1.39.1 packages= opentelemetry-sdk opentelemetry-proto opentelemetry-propagator-jaeger opentelemetry-propagator-b3 opentelemetry-exporter-zipkin-proto-http opentelemetry-exporter-zipkin-json opentelemetry-exporter-zipkin opentelemetry-exporter-otlp-proto-grpc opentelemetry-exporter-otlp-proto-http opentelemetry-exporter-otlp opentelemetry-api [prerelease] version=0.60b1 packages= opentelemetry-opentracing-shim opentelemetry-opencensus-shim opentelemetry-exporter-opencensus opentelemetry-exporter-prometheus opentelemetry-distro opentelemetry-semantic-conventions opentelemetry-test-utils tests [lintroots] extraroots=examples/*,scripts/ subglob=*.py,tests/,test/,src/*,examples/* [testroots] extraroots=examples/*,tests/ subglob=tests/,test/ python-opentelemetry-1.39.1/exporter/000077500000000000000000000000001511654350100176715ustar00rootroot00000000000000python-opentelemetry-1.39.1/exporter/opentelemetry-exporter-opencensus/000077500000000000000000000000001511654350100266135ustar00rootroot00000000000000python-opentelemetry-1.39.1/exporter/opentelemetry-exporter-opencensus/LICENSE000066400000000000000000000261351511654350100276270ustar00rootroot00000000000000 Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. Copyright [yyyy] [name of copyright owner] Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. python-opentelemetry-1.39.1/exporter/opentelemetry-exporter-opencensus/README.rst000066400000000000000000000011541511654350100303030ustar00rootroot00000000000000OpenCensus Exporter =================== |pypi| .. |pypi| image:: https://badge.fury.io/py/opentelemetry-exporter-opencensus.svg :target: https://pypi.org/project/opentelemetry-exporter-opencensus/ This library allows to export traces using OpenCensus. Installation ------------ :: pip install opentelemetry-exporter-opencensus References ---------- * `OpenCensus Exporter `_ * `OpenTelemetry Collector `_ * `OpenTelemetry `_ python-opentelemetry-1.39.1/exporter/opentelemetry-exporter-opencensus/pyproject.toml000066400000000000000000000032531511654350100315320ustar00rootroot00000000000000[build-system] requires = ["hatchling"] build-backend = "hatchling.build" [project] name = "opentelemetry-exporter-opencensus" dynamic = ["version"] description = "OpenCensus Exporter" readme = "README.rst" license = "Apache-2.0" requires-python = ">=3.9" authors = [ { name = "OpenTelemetry Authors", email = "cncf-opentelemetry-contributors@lists.cncf.io" }, ] classifiers = [ "Development Status :: 4 - Beta", "Framework :: OpenTelemetry", "Framework :: OpenTelemetry :: Exporters", "Intended Audience :: Developers", "Programming Language :: Python", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.9", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", "Programming Language :: Python :: 3.12", "Programming Language :: Python :: 3.13", "Typing :: Typed", ] dependencies = [ "grpcio >= 1.63.2, < 2.0.0; python_version < '3.13'", "grpcio >= 1.66.2, < 2.0.0; python_version >= '3.13'", "opencensus-proto >= 0.1.0, < 1.0.0", "opentelemetry-api >= 1.39.1", "opentelemetry-sdk >= 1.15", "protobuf ~= 3.13", "setuptools >= 16.0", ] [project.entry-points.opentelemetry_traces_exporter] opencensus = "opentelemetry.exporter.opencensus.trace_exporter:OpenCensusSpanExporter" [project.urls] Homepage = "https://github.com/open-telemetry/opentelemetry-python/tree/main/exporter/opentelemetry-exporter-opencensus" Repository = "https://github.com/open-telemetry/opentelemetry-python" [tool.hatch.version] path = "src/opentelemetry/exporter/opencensus/version/__init__.py" [tool.hatch.build.targets.sdist] include = [ "/src", "/tests", ] [tool.hatch.build.targets.wheel] packages = ["src/opentelemetry"] python-opentelemetry-1.39.1/exporter/opentelemetry-exporter-opencensus/src/000077500000000000000000000000001511654350100274025ustar00rootroot00000000000000python-opentelemetry-1.39.1/exporter/opentelemetry-exporter-opencensus/src/opentelemetry/000077500000000000000000000000001511654350100322765ustar00rootroot00000000000000python-opentelemetry-1.39.1/exporter/opentelemetry-exporter-opencensus/src/opentelemetry/exporter/000077500000000000000000000000001511654350100341465ustar00rootroot00000000000000opencensus/000077500000000000000000000000001511654350100362515ustar00rootroot00000000000000python-opentelemetry-1.39.1/exporter/opentelemetry-exporter-opencensus/src/opentelemetry/exporter__init__.py000066400000000000000000000012271511654350100403640ustar00rootroot00000000000000python-opentelemetry-1.39.1/exporter/opentelemetry-exporter-opencensus/src/opentelemetry/exporter/opencensus# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ The **OpenCensus Exporter** allows to export traces using OpenCensus. """ py.typed000066400000000000000000000000001511654350100377360ustar00rootroot00000000000000python-opentelemetry-1.39.1/exporter/opentelemetry-exporter-opencensus/src/opentelemetry/exporter/opencensustrace_exporter/000077500000000000000000000000001511654350100412775ustar00rootroot00000000000000python-opentelemetry-1.39.1/exporter/opentelemetry-exporter-opencensus/src/opentelemetry/exporter/opencensus__init__.py000066400000000000000000000152221511654350100434120ustar00rootroot00000000000000python-opentelemetry-1.39.1/exporter/opentelemetry-exporter-opencensus/src/opentelemetry/exporter/opencensus/trace_exporter# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """OpenCensus Span Exporter.""" import logging from typing import Sequence import grpc from opencensus.proto.agent.trace.v1 import ( trace_service_pb2, trace_service_pb2_grpc, ) from opencensus.proto.trace.v1 import trace_pb2 import opentelemetry.exporter.opencensus.util as utils from opentelemetry import trace from opentelemetry.sdk.resources import SERVICE_NAME, Resource from opentelemetry.sdk.trace import ReadableSpan from opentelemetry.sdk.trace.export import SpanExporter, SpanExportResult DEFAULT_ENDPOINT = "localhost:55678" logger = logging.getLogger(__name__) # pylint: disable=no-member class OpenCensusSpanExporter(SpanExporter): """OpenCensus Collector span exporter. Args: endpoint: OpenCensus Collector receiver endpoint. host_name: Host name. client: TraceService client stub. """ def __init__( self, endpoint=DEFAULT_ENDPOINT, host_name=None, client=None, ): tracer_provider = trace.get_tracer_provider() service_name = ( tracer_provider.resource.attributes[SERVICE_NAME] if getattr(tracer_provider, "resource", None) else Resource.create().attributes.get(SERVICE_NAME) ) self.endpoint = endpoint if client is None: self.channel = grpc.insecure_channel(self.endpoint) self.client = trace_service_pb2_grpc.TraceServiceStub( channel=self.channel ) else: self.client = client self.host_name = host_name self.node = utils.get_node(service_name, host_name) def export(self, spans: Sequence[ReadableSpan]) -> SpanExportResult: # Populate service_name from first span # We restrict any SpanProcessor to be only associated with a single # TracerProvider, so it is safe to assume that all Spans in a single # batch all originate from one TracerProvider (and in turn have all # the same service_name) if spans: service_name = spans[0].resource.attributes.get(SERVICE_NAME) if service_name: self.node = utils.get_node(service_name, self.host_name) try: responses = self.client.Export(self.generate_span_requests(spans)) # Read response for _ in responses: pass except grpc.RpcError: return SpanExportResult.FAILURE return SpanExportResult.SUCCESS def shutdown(self) -> None: pass def generate_span_requests(self, spans): collector_spans = translate_to_collector(spans) service_request = trace_service_pb2.ExportTraceServiceRequest( node=self.node, spans=collector_spans ) yield service_request def force_flush(self, timeout_millis: int = 30000) -> bool: return True # pylint: disable=too-many-branches def translate_to_collector(spans: Sequence[ReadableSpan]): collector_spans = [] for span in spans: status = None if span.status is not None: status = trace_pb2.Status( code=span.status.status_code.value, message=span.status.description, ) collector_span = trace_pb2.Span( name=trace_pb2.TruncatableString(value=span.name), kind=utils.get_collector_span_kind(span.kind), trace_id=span.context.trace_id.to_bytes(16, "big"), span_id=span.context.span_id.to_bytes(8, "big"), start_time=utils.proto_timestamp_from_time_ns(span.start_time), end_time=utils.proto_timestamp_from_time_ns(span.end_time), status=status, ) parent_id = 0 if span.parent is not None: parent_id = span.parent.span_id collector_span.parent_span_id = parent_id.to_bytes(8, "big") if span.context.trace_state is not None: for key, value in span.context.trace_state.items(): collector_span.tracestate.entries.add(key=key, value=value) if span.attributes: for key, value in span.attributes.items(): utils.add_proto_attribute_value( collector_span.attributes, key, value ) if span.events: for event in span.events: collector_annotation = trace_pb2.Span.TimeEvent.Annotation( description=trace_pb2.TruncatableString(value=event.name) ) if event.attributes: for key, value in event.attributes.items(): utils.add_proto_attribute_value( collector_annotation.attributes, key, value ) collector_span.time_events.time_event.add( time=utils.proto_timestamp_from_time_ns(event.timestamp), annotation=collector_annotation, ) if span.links: for link in span.links: collector_span_link = collector_span.links.link.add() collector_span_link.trace_id = link.context.trace_id.to_bytes( 16, "big" ) collector_span_link.span_id = link.context.span_id.to_bytes( 8, "big" ) collector_span_link.type = ( trace_pb2.Span.Link.Type.TYPE_UNSPECIFIED ) if span.parent is not None: if ( link.context.span_id == span.parent.span_id and link.context.trace_id == span.parent.trace_id ): collector_span_link.type = ( trace_pb2.Span.Link.Type.PARENT_LINKED_SPAN ) if link.attributes: for key, value in link.attributes.items(): utils.add_proto_attribute_value( collector_span_link.attributes, key, value ) collector_spans.append(collector_span) return collector_spans util.py000066400000000000000000000064671511654350100376150ustar00rootroot00000000000000python-opentelemetry-1.39.1/exporter/opentelemetry-exporter-opencensus/src/opentelemetry/exporter/opencensus# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from os import getpid from socket import gethostname from time import time # pylint: disable=wrong-import-position from google.protobuf.timestamp_pb2 import ( # pylint: disable=no-name-in-module Timestamp, ) from opencensus.proto.agent.common.v1 import common_pb2 from opencensus.proto.trace.v1 import trace_pb2 from opentelemetry.exporter.opencensus.version import ( __version__ as opencensusexporter_exporter_version, ) from opentelemetry.trace import SpanKind from opentelemetry.util._importlib_metadata import version OPENTELEMETRY_VERSION = version("opentelemetry-api") def proto_timestamp_from_time_ns(time_ns): """Converts datetime to protobuf timestamp. Args: time_ns: Time in nanoseconds Returns: Returns protobuf timestamp. """ ts = Timestamp() if time_ns is not None: # pylint: disable=no-member ts.FromNanoseconds(time_ns) return ts # pylint: disable=no-member def get_collector_span_kind(kind: SpanKind): if kind is SpanKind.SERVER: return trace_pb2.Span.SpanKind.SERVER if kind is SpanKind.CLIENT: return trace_pb2.Span.SpanKind.CLIENT return trace_pb2.Span.SpanKind.SPAN_KIND_UNSPECIFIED def add_proto_attribute_value(pb_attributes, key, value): """Sets string, int, boolean or float value on protobuf span, link or annotation attributes. Args: pb_attributes: protobuf Span's attributes property. key: attribute key to set. value: attribute value """ if isinstance(value, bool): pb_attributes.attribute_map[key].bool_value = value elif isinstance(value, int): pb_attributes.attribute_map[key].int_value = value elif isinstance(value, str): pb_attributes.attribute_map[key].string_value.value = value elif isinstance(value, float): pb_attributes.attribute_map[key].double_value = value else: pb_attributes.attribute_map[key].string_value.value = str(value) # pylint: disable=no-member def get_node(service_name, host_name): """Generates Node message from params and system information. Args: service_name: Name of Collector service. host_name: Host name. """ return common_pb2.Node( identifier=common_pb2.ProcessIdentifier( host_name=gethostname() if host_name is None else host_name, pid=getpid(), start_timestamp=proto_timestamp_from_time_ns(int(time() * 1e9)), ), library_info=common_pb2.LibraryInfo( language=common_pb2.LibraryInfo.Language.Value("PYTHON"), exporter_version=opencensusexporter_exporter_version, core_library_version=OPENTELEMETRY_VERSION, ), service_info=common_pb2.ServiceInfo(name=service_name), ) version/000077500000000000000000000000001511654350100377365ustar00rootroot00000000000000python-opentelemetry-1.39.1/exporter/opentelemetry-exporter-opencensus/src/opentelemetry/exporter/opencensus__init__.py000066400000000000000000000011401511654350100420430ustar00rootroot00000000000000python-opentelemetry-1.39.1/exporter/opentelemetry-exporter-opencensus/src/opentelemetry/exporter/opencensus/version# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. __version__ = "0.60b1" python-opentelemetry-1.39.1/exporter/opentelemetry-exporter-opencensus/test-requirements.txt000066400000000000000000000006231511654350100330550ustar00rootroot00000000000000asgiref==3.7.2 grpcio==1.66.2 importlib-metadata==6.11.0 iniconfig==2.0.0 opencensus-proto==0.1.0 packaging==24.0 pluggy==1.5.0 protobuf==3.20.3 py-cpuinfo==9.0.0 pytest==7.4.4 tomli==2.0.1 typing_extensions==4.10.0 wrapt==1.16.0 zipp==3.19.2 -e opentelemetry-api -e opentelemetry-sdk -e tests/opentelemetry-test-utils -e opentelemetry-semantic-conventions -e exporter/opentelemetry-exporter-opencensus python-opentelemetry-1.39.1/exporter/opentelemetry-exporter-opencensus/tests/000077500000000000000000000000001511654350100277555ustar00rootroot00000000000000python-opentelemetry-1.39.1/exporter/opentelemetry-exporter-opencensus/tests/__init__.py000066400000000000000000000000001511654350100320540ustar00rootroot00000000000000test_otcollector_trace_exporter.py000066400000000000000000000317441511654350100367570ustar00rootroot00000000000000python-opentelemetry-1.39.1/exporter/opentelemetry-exporter-opencensus/tests# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from unittest import mock import grpc from google.protobuf.timestamp_pb2 import ( # pylint: disable=no-name-in-module Timestamp, ) from opencensus.proto.trace.v1 import trace_pb2 import opentelemetry.exporter.opencensus.util as utils from opentelemetry import trace as trace_api from opentelemetry.exporter.opencensus.trace_exporter import ( OpenCensusSpanExporter, translate_to_collector, ) from opentelemetry.sdk import trace from opentelemetry.sdk.resources import SERVICE_NAME, Resource from opentelemetry.sdk.trace import TracerProvider from opentelemetry.sdk.trace.export import SpanExportResult from opentelemetry.test.globals_test import TraceGlobalsTest from opentelemetry.trace import TraceFlags # pylint: disable=no-member class TestCollectorSpanExporter(TraceGlobalsTest, unittest.TestCase): def test_constructor(self): mock_get_node = mock.Mock() patch = mock.patch( "opentelemetry.exporter.opencensus.util.get_node", side_effect=mock_get_node, ) trace_api.set_tracer_provider( TracerProvider( resource=Resource.create({SERVICE_NAME: "testServiceName"}) ) ) host_name = "testHostName" client = grpc.insecure_channel("") endpoint = "testEndpoint" with patch: exporter = OpenCensusSpanExporter( host_name=host_name, endpoint=endpoint, client=client, ) self.assertIs(exporter.client, client) self.assertEqual(exporter.endpoint, endpoint) mock_get_node.assert_called_with("testServiceName", host_name) def test_get_collector_span_kind(self): result = utils.get_collector_span_kind(trace_api.SpanKind.SERVER) self.assertIs(result, trace_pb2.Span.SpanKind.SERVER) result = utils.get_collector_span_kind(trace_api.SpanKind.CLIENT) self.assertIs(result, trace_pb2.Span.SpanKind.CLIENT) result = utils.get_collector_span_kind(trace_api.SpanKind.CONSUMER) self.assertIs(result, trace_pb2.Span.SpanKind.SPAN_KIND_UNSPECIFIED) result = utils.get_collector_span_kind(trace_api.SpanKind.PRODUCER) self.assertIs(result, trace_pb2.Span.SpanKind.SPAN_KIND_UNSPECIFIED) result = utils.get_collector_span_kind(trace_api.SpanKind.INTERNAL) self.assertIs(result, trace_pb2.Span.SpanKind.SPAN_KIND_UNSPECIFIED) def test_proto_timestamp_from_time_ns(self): result = utils.proto_timestamp_from_time_ns(12345) self.assertIsInstance(result, Timestamp) self.assertEqual(result.nanos, 12345) # pylint: disable=too-many-locals # pylint: disable=too-many-statements def test_translate_to_collector(self): trace_id = 0x6E0C63257DE34C926F9EFCD03927272E span_id = 0x34BF92DEEFC58C92 parent_id = 0x1111111111111111 base_time = 683647322 * 10**9 # in ns start_times = ( base_time, base_time + 150 * 10**6, base_time + 300 * 10**6, ) durations = (50 * 10**6, 100 * 10**6, 200 * 10**6) end_times = ( start_times[0] + durations[0], start_times[1] + durations[1], start_times[2] + durations[2], ) span_context = trace_api.SpanContext( trace_id, span_id, is_remote=False, trace_flags=TraceFlags(TraceFlags.SAMPLED), trace_state=trace_api.TraceState([("testkey", "testvalue")]), ) parent_span_context = trace_api.SpanContext( trace_id, parent_id, is_remote=False ) other_context = trace_api.SpanContext( trace_id, span_id, is_remote=False ) event_attributes = { "annotation_bool": True, "annotation_string": "annotation_test", "key_float": 0.3, } event_timestamp = base_time + 50 * 10**6 event = trace.Event( name="event0", timestamp=event_timestamp, attributes=event_attributes, ) link_attributes = {"key_bool": True} link_1 = trace_api.Link( context=other_context, attributes=link_attributes ) link_2 = trace_api.Link( context=parent_span_context, attributes=link_attributes ) span_1 = trace._Span( name="test1", context=span_context, parent=parent_span_context, events=(event,), links=(link_1,), kind=trace_api.SpanKind.CLIENT, ) span_2 = trace._Span( name="test2", context=parent_span_context, parent=None, kind=trace_api.SpanKind.SERVER, ) span_3 = trace._Span( name="test3", context=other_context, links=(link_2,), parent=span_2.get_span_context(), ) otel_spans = [span_1, span_2, span_3] otel_spans[0].start(start_time=start_times[0]) otel_spans[0].set_attribute("key_bool", False) otel_spans[0].set_attribute("key_string", "hello_world") otel_spans[0].set_attribute("key_float", 111.22) otel_spans[0].set_attribute("key_int", 333) otel_spans[0].set_status(trace_api.Status(trace_api.StatusCode.OK)) otel_spans[0].end(end_time=end_times[0]) otel_spans[1].start(start_time=start_times[1]) otel_spans[1].set_status( trace_api.Status( trace_api.StatusCode.ERROR, {"test", "val"}, ) ) otel_spans[1].end(end_time=end_times[1]) otel_spans[2].start(start_time=start_times[2]) otel_spans[2].end(end_time=end_times[2]) output_spans = translate_to_collector(otel_spans) self.assertEqual(len(output_spans), 3) self.assertEqual( output_spans[0].trace_id, b"n\x0cc%}\xe3L\x92o\x9e\xfc\xd09''." ) self.assertEqual( output_spans[0].span_id, b"4\xbf\x92\xde\xef\xc5\x8c\x92" ) self.assertEqual( output_spans[0].name, trace_pb2.TruncatableString(value="test1") ) self.assertEqual( output_spans[1].name, trace_pb2.TruncatableString(value="test2") ) self.assertEqual( output_spans[2].name, trace_pb2.TruncatableString(value="test3") ) self.assertEqual( output_spans[0].start_time.seconds, int(start_times[0] / 1000000000), ) self.assertEqual( output_spans[0].end_time.seconds, int(end_times[0] / 1000000000) ) self.assertEqual(output_spans[0].kind, trace_api.SpanKind.CLIENT.value) self.assertEqual(output_spans[1].kind, trace_api.SpanKind.SERVER.value) self.assertEqual( output_spans[0].parent_span_id, b"\x11\x11\x11\x11\x11\x11\x11\x11" ) self.assertEqual( output_spans[2].parent_span_id, b"\x11\x11\x11\x11\x11\x11\x11\x11" ) self.assertEqual( output_spans[0].status.code, trace_api.StatusCode.OK.value, ) self.assertEqual(len(output_spans[0].tracestate.entries), 1) self.assertEqual(output_spans[0].tracestate.entries[0].key, "testkey") self.assertEqual( output_spans[0].tracestate.entries[0].value, "testvalue" ) self.assertEqual( output_spans[0].attributes.attribute_map["key_bool"].bool_value, False, ) self.assertEqual( output_spans[0] .attributes.attribute_map["key_string"] .string_value.value, "hello_world", ) self.assertEqual( output_spans[0].attributes.attribute_map["key_float"].double_value, 111.22, ) self.assertEqual( output_spans[0].attributes.attribute_map["key_int"].int_value, 333 ) self.assertEqual( output_spans[0].time_events.time_event[0].time.seconds, 683647322 ) self.assertEqual( output_spans[0] .time_events.time_event[0] .annotation.description.value, "event0", ) self.assertEqual( output_spans[0] .time_events.time_event[0] .annotation.attributes.attribute_map["annotation_bool"] .bool_value, True, ) self.assertEqual( output_spans[0] .time_events.time_event[0] .annotation.attributes.attribute_map["annotation_string"] .string_value.value, "annotation_test", ) self.assertEqual( output_spans[0] .time_events.time_event[0] .annotation.attributes.attribute_map["key_float"] .double_value, 0.3, ) self.assertEqual( output_spans[0].links.link[0].trace_id, b"n\x0cc%}\xe3L\x92o\x9e\xfc\xd09''.", ) self.assertEqual( output_spans[0].links.link[0].span_id, b"4\xbf\x92\xde\xef\xc5\x8c\x92", ) self.assertEqual( output_spans[0].links.link[0].type, trace_pb2.Span.Link.Type.TYPE_UNSPECIFIED, ) self.assertEqual( output_spans[1].status.code, trace_api.StatusCode.ERROR.value, ) self.assertEqual( output_spans[2].links.link[0].type, trace_pb2.Span.Link.Type.PARENT_LINKED_SPAN, ) self.assertEqual( output_spans[0] .links.link[0] .attributes.attribute_map["key_bool"] .bool_value, True, ) def test_export(self): mock_client = mock.MagicMock() mock_export = mock.MagicMock() mock_client.Export = mock_export host_name = "testHostName" collector_exporter = OpenCensusSpanExporter( client=mock_client, host_name=host_name ) trace_id = 0x6E0C63257DE34C926F9EFCD03927272E span_id = 0x34BF92DEEFC58C92 span_context = trace_api.SpanContext( trace_id, span_id, is_remote=False, trace_flags=TraceFlags(TraceFlags.SAMPLED), ) otel_spans = [ trace._Span( name="test1", context=span_context, kind=trace_api.SpanKind.CLIENT, ) ] result_status = collector_exporter.export(otel_spans) self.assertEqual(SpanExportResult.SUCCESS, result_status) # pylint: disable=unsubscriptable-object export_arg = mock_export.call_args[0] service_request = next(export_arg[0]) output_spans = getattr(service_request, "spans") output_node = getattr(service_request, "node") self.assertEqual(len(output_spans), 1) self.assertIsNotNone(getattr(output_node, "library_info")) self.assertIsNotNone(getattr(output_node, "service_info")) output_identifier = getattr(output_node, "identifier") self.assertEqual( getattr(output_identifier, "host_name"), "testHostName" ) def test_export_service_name(self): trace_api.set_tracer_provider( TracerProvider( resource=Resource.create({SERVICE_NAME: "testServiceName"}) ) ) mock_client = mock.MagicMock() mock_export = mock.MagicMock() mock_client.Export = mock_export host_name = "testHostName" collector_exporter = OpenCensusSpanExporter( client=mock_client, host_name=host_name ) self.assertEqual( collector_exporter.node.service_info.name, "testServiceName" ) trace_id = 0x6E0C63257DE34C926F9EFCD03927272E span_id = 0x34BF92DEEFC58C92 span_context = trace_api.SpanContext( trace_id, span_id, is_remote=False, trace_flags=TraceFlags(TraceFlags.SAMPLED), ) resource = Resource.create({SERVICE_NAME: "test"}) otel_spans = [ trace._Span( name="test1", context=span_context, kind=trace_api.SpanKind.CLIENT, resource=resource, ) ] result_status = collector_exporter.export(otel_spans) self.assertEqual(SpanExportResult.SUCCESS, result_status) self.assertEqual(collector_exporter.node.service_info.name, "test") python-opentelemetry-1.39.1/exporter/opentelemetry-exporter-otlp-proto-common/000077500000000000000000000000001511654350100300365ustar00rootroot00000000000000python-opentelemetry-1.39.1/exporter/opentelemetry-exporter-otlp-proto-common/LICENSE000066400000000000000000000261351511654350100310520ustar00rootroot00000000000000 Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. Copyright [yyyy] [name of copyright owner] Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. python-opentelemetry-1.39.1/exporter/opentelemetry-exporter-otlp-proto-common/README.rst000066400000000000000000000012751511654350100315320ustar00rootroot00000000000000OpenTelemetry Protobuf Encoding =============================== |pypi| .. |pypi| image:: https://badge.fury.io/py/opentelemetry-exporter-otlp-proto-common.svg :target: https://pypi.org/project/opentelemetry-exporter-otlp-proto-common/ This library is provided as a convenience to encode to Protobuf. Currently used by: * opentelemetry-exporter-otlp-proto-grpc * opentelemetry-exporter-otlp-proto-http Installation ------------ :: pip install opentelemetry-exporter-otlp-proto-common References ---------- * `OpenTelemetry `_ * `OpenTelemetry Protocol Specification `_ python-opentelemetry-1.39.1/exporter/opentelemetry-exporter-otlp-proto-common/pyproject.toml000066400000000000000000000025231511654350100327540ustar00rootroot00000000000000[build-system] requires = ["hatchling"] build-backend = "hatchling.build" [project] name = "opentelemetry-exporter-otlp-proto-common" dynamic = ["version"] description = "OpenTelemetry Protobuf encoding" readme = "README.rst" license = "Apache-2.0" requires-python = ">=3.9" authors = [ { name = "OpenTelemetry Authors", email = "cncf-opentelemetry-contributors@lists.cncf.io" }, ] classifiers = [ "Development Status :: 5 - Production/Stable", "Framework :: OpenTelemetry", "Framework :: OpenTelemetry :: Exporters", "Intended Audience :: Developers", "Programming Language :: Python", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.9", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", "Programming Language :: Python :: 3.12", "Programming Language :: Python :: 3.13", ] dependencies = [ "opentelemetry-proto == 1.39.1", ] [project.urls] Homepage = "https://github.com/open-telemetry/opentelemetry-python/tree/main/exporter/opentelemetry-exporter-otlp-proto-common" Repository = "https://github.com/open-telemetry/opentelemetry-python" [tool.hatch.version] path = "src/opentelemetry/exporter/otlp/proto/common/version/__init__.py" [tool.hatch.build.targets.sdist] include = [ "/src", "/tests", ] [tool.hatch.build.targets.wheel] packages = ["src/opentelemetry"] python-opentelemetry-1.39.1/exporter/opentelemetry-exporter-otlp-proto-common/src/000077500000000000000000000000001511654350100306255ustar00rootroot00000000000000python-opentelemetry-1.39.1/exporter/opentelemetry-exporter-otlp-proto-common/src/opentelemetry/000077500000000000000000000000001511654350100335215ustar00rootroot00000000000000exporter/000077500000000000000000000000001511654350100353125ustar00rootroot00000000000000python-opentelemetry-1.39.1/exporter/opentelemetry-exporter-otlp-proto-common/src/opentelemetryotlp/000077500000000000000000000000001511654350100362705ustar00rootroot00000000000000python-opentelemetry-1.39.1/exporter/opentelemetry-exporter-otlp-proto-common/src/opentelemetry/exporterproto/000077500000000000000000000000001511654350100374335ustar00rootroot00000000000000python-opentelemetry-1.39.1/exporter/opentelemetry-exporter-otlp-proto-common/src/opentelemetry/exporter/otlpcommon/000077500000000000000000000000001511654350100407235ustar00rootroot00000000000000python-opentelemetry-1.39.1/exporter/opentelemetry-exporter-otlp-proto-common/src/opentelemetry/exporter/otlp/proto__init__.py000066400000000000000000000012561511654350100430400ustar00rootroot00000000000000python-opentelemetry-1.39.1/exporter/opentelemetry-exporter-otlp-proto-common/src/opentelemetry/exporter/otlp/proto/common# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from opentelemetry.exporter.otlp.proto.common.version import __version__ __all__ = ["__version__"] _internal/000077500000000000000000000000001511654350100426765ustar00rootroot00000000000000python-opentelemetry-1.39.1/exporter/opentelemetry-exporter-otlp-proto-common/src/opentelemetry/exporter/otlp/proto/common__init__.py000066400000000000000000000126371511654350100450200ustar00rootroot00000000000000python-opentelemetry-1.39.1/exporter/opentelemetry-exporter-otlp-proto-common/src/opentelemetry/exporter/otlp/proto/common/_internal# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import annotations import logging from collections.abc import Sequence from typing import ( Any, Callable, Dict, List, Mapping, Optional, TypeVar, ) from opentelemetry.proto.common.v1.common_pb2 import AnyValue as PB2AnyValue from opentelemetry.proto.common.v1.common_pb2 import ( ArrayValue as PB2ArrayValue, ) from opentelemetry.proto.common.v1.common_pb2 import ( InstrumentationScope as PB2InstrumentationScope, ) from opentelemetry.proto.common.v1.common_pb2 import KeyValue as PB2KeyValue from opentelemetry.proto.common.v1.common_pb2 import ( KeyValueList as PB2KeyValueList, ) from opentelemetry.proto.resource.v1.resource_pb2 import ( Resource as PB2Resource, ) from opentelemetry.sdk.trace import Resource from opentelemetry.sdk.util.instrumentation import InstrumentationScope from opentelemetry.util.types import _ExtendedAttributes _logger = logging.getLogger(__name__) _TypingResourceT = TypeVar("_TypingResourceT") _ResourceDataT = TypeVar("_ResourceDataT") def _encode_instrumentation_scope( instrumentation_scope: InstrumentationScope, ) -> PB2InstrumentationScope: if instrumentation_scope is None: return PB2InstrumentationScope() return PB2InstrumentationScope( name=instrumentation_scope.name, version=instrumentation_scope.version, attributes=_encode_attributes(instrumentation_scope.attributes), ) def _encode_resource(resource: Resource) -> PB2Resource: return PB2Resource(attributes=_encode_attributes(resource.attributes)) def _encode_value( value: Any, allow_null: bool = False ) -> Optional[PB2AnyValue]: if allow_null is True and value is None: return None if isinstance(value, bool): return PB2AnyValue(bool_value=value) if isinstance(value, str): return PB2AnyValue(string_value=value) if isinstance(value, int): return PB2AnyValue(int_value=value) if isinstance(value, float): return PB2AnyValue(double_value=value) if isinstance(value, bytes): return PB2AnyValue(bytes_value=value) if isinstance(value, Sequence): return PB2AnyValue( array_value=PB2ArrayValue( values=_encode_array(value, allow_null=allow_null) ) ) elif isinstance(value, Mapping): return PB2AnyValue( kvlist_value=PB2KeyValueList( values=[ _encode_key_value(str(k), v, allow_null=allow_null) for k, v in value.items() ] ) ) raise Exception(f"Invalid type {type(value)} of value {value}") def _encode_key_value( key: str, value: Any, allow_null: bool = False ) -> PB2KeyValue: return PB2KeyValue( key=key, value=_encode_value(value, allow_null=allow_null) ) def _encode_array( array: Sequence[Any], allow_null: bool = False ) -> Sequence[PB2AnyValue]: if not allow_null: # Let the exception get raised by _encode_value() return [_encode_value(v, allow_null=allow_null) for v in array] return [ _encode_value(v, allow_null=allow_null) if v is not None # Use an empty AnyValue to represent None in an array. Behavior may change pending # https://github.com/open-telemetry/opentelemetry-specification/issues/4392 else PB2AnyValue() for v in array ] def _encode_span_id(span_id: int) -> bytes: return span_id.to_bytes(length=8, byteorder="big", signed=False) def _encode_trace_id(trace_id: int) -> bytes: return trace_id.to_bytes(length=16, byteorder="big", signed=False) def _encode_attributes( attributes: _ExtendedAttributes, allow_null: bool = False, ) -> Optional[List[PB2KeyValue]]: if attributes: pb2_attributes = [] for key, value in attributes.items(): # pylint: disable=broad-exception-caught try: pb2_attributes.append( _encode_key_value(key, value, allow_null=allow_null) ) except Exception as error: _logger.exception("Failed to encode key %s: %s", key, error) else: pb2_attributes = None return pb2_attributes def _get_resource_data( sdk_resource_scope_data: Dict[Resource, _ResourceDataT], resource_class: Callable[..., _TypingResourceT], name: str, ) -> List[_TypingResourceT]: resource_data = [] for ( sdk_resource, scope_data, ) in sdk_resource_scope_data.items(): collector_resource = PB2Resource( attributes=_encode_attributes(sdk_resource.attributes) ) resource_data.append( resource_class( **{ "resource": collector_resource, "scope_{}".format(name): scope_data.values(), } ) ) return resource_data _log_encoder/000077500000000000000000000000001511654350100453155ustar00rootroot00000000000000python-opentelemetry-1.39.1/exporter/opentelemetry-exporter-otlp-proto-common/src/opentelemetry/exporter/otlp/proto/common/_internal__init__.py000066400000000000000000000073561511654350100474410ustar00rootroot00000000000000python-opentelemetry-1.39.1/exporter/opentelemetry-exporter-otlp-proto-common/src/opentelemetry/exporter/otlp/proto/common/_internal/_log_encoder# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from collections import defaultdict from typing import List, Sequence from opentelemetry.exporter.otlp.proto.common._internal import ( _encode_attributes, _encode_instrumentation_scope, _encode_resource, _encode_span_id, _encode_trace_id, _encode_value, ) from opentelemetry.proto.collector.logs.v1.logs_service_pb2 import ( ExportLogsServiceRequest, ) from opentelemetry.proto.logs.v1.logs_pb2 import LogRecord as PB2LogRecord from opentelemetry.proto.logs.v1.logs_pb2 import ( ResourceLogs, ScopeLogs, ) from opentelemetry.sdk._logs import ReadableLogRecord def encode_logs( batch: Sequence[ReadableLogRecord], ) -> ExportLogsServiceRequest: return ExportLogsServiceRequest(resource_logs=_encode_resource_logs(batch)) def _encode_log(readable_log_record: ReadableLogRecord) -> PB2LogRecord: span_id = ( None if readable_log_record.log_record.span_id == 0 else _encode_span_id(readable_log_record.log_record.span_id) ) trace_id = ( None if readable_log_record.log_record.trace_id == 0 else _encode_trace_id(readable_log_record.log_record.trace_id) ) body = readable_log_record.log_record.body return PB2LogRecord( time_unix_nano=readable_log_record.log_record.timestamp, observed_time_unix_nano=readable_log_record.log_record.observed_timestamp, span_id=span_id, trace_id=trace_id, flags=int(readable_log_record.log_record.trace_flags), body=_encode_value(body, allow_null=True), severity_text=readable_log_record.log_record.severity_text, attributes=_encode_attributes( readable_log_record.log_record.attributes, allow_null=True ), dropped_attributes_count=readable_log_record.dropped_attributes, severity_number=getattr( readable_log_record.log_record.severity_number, "value", None ), event_name=readable_log_record.log_record.event_name, ) def _encode_resource_logs( batch: Sequence[ReadableLogRecord], ) -> List[ResourceLogs]: sdk_resource_logs = defaultdict(lambda: defaultdict(list)) for readable_log in batch: sdk_resource = readable_log.resource sdk_instrumentation = readable_log.instrumentation_scope or None pb2_log = _encode_log(readable_log) sdk_resource_logs[sdk_resource][sdk_instrumentation].append(pb2_log) pb2_resource_logs = [] for sdk_resource, sdk_instrumentations in sdk_resource_logs.items(): scope_logs = [] for sdk_instrumentation, pb2_logs in sdk_instrumentations.items(): scope_logs.append( ScopeLogs( scope=(_encode_instrumentation_scope(sdk_instrumentation)), log_records=pb2_logs, schema_url=sdk_instrumentation.schema_url if sdk_instrumentation else None, ) ) pb2_resource_logs.append( ResourceLogs( resource=_encode_resource(sdk_resource), scope_logs=scope_logs, schema_url=sdk_resource.schema_url, ) ) return pb2_resource_logs metrics_encoder/000077500000000000000000000000001511654350100460435ustar00rootroot00000000000000python-opentelemetry-1.39.1/exporter/opentelemetry-exporter-otlp-proto-common/src/opentelemetry/exporter/otlp/proto/common/_internal__init__.py000066400000000000000000000344571511654350100501710ustar00rootroot00000000000000python-opentelemetry-1.39.1/exporter/opentelemetry-exporter-otlp-proto-common/src/opentelemetry/exporter/otlp/proto/common/_internal/metrics_encoder# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import annotations import logging from os import environ from typing import Dict, List from opentelemetry.exporter.otlp.proto.common._internal import ( _encode_attributes, _encode_instrumentation_scope, _encode_span_id, _encode_trace_id, ) from opentelemetry.proto.collector.metrics.v1.metrics_service_pb2 import ( ExportMetricsServiceRequest, ) from opentelemetry.proto.metrics.v1 import metrics_pb2 as pb2 from opentelemetry.proto.resource.v1.resource_pb2 import ( Resource as PB2Resource, ) from opentelemetry.sdk.environment_variables import ( OTEL_EXPORTER_OTLP_METRICS_DEFAULT_HISTOGRAM_AGGREGATION, OTEL_EXPORTER_OTLP_METRICS_TEMPORALITY_PREFERENCE, ) from opentelemetry.sdk.metrics import ( Counter, Exemplar, Histogram, ObservableCounter, ObservableGauge, ObservableUpDownCounter, UpDownCounter, ) from opentelemetry.sdk.metrics.export import ( AggregationTemporality, Gauge, MetricExporter, MetricsData, Sum, ) from opentelemetry.sdk.metrics.export import ( ExponentialHistogram as ExponentialHistogramType, ) from opentelemetry.sdk.metrics.export import ( Histogram as HistogramType, ) from opentelemetry.sdk.metrics.view import ( Aggregation, ExplicitBucketHistogramAggregation, ExponentialBucketHistogramAggregation, ) _logger = logging.getLogger(__name__) class OTLPMetricExporterMixin: def _common_configuration( self, preferred_temporality: dict[type, AggregationTemporality] | None = None, preferred_aggregation: dict[type, Aggregation] | None = None, ) -> None: MetricExporter.__init__( self, preferred_temporality=self._get_temporality(preferred_temporality), preferred_aggregation=self._get_aggregation(preferred_aggregation), ) def _get_temporality( self, preferred_temporality: Dict[type, AggregationTemporality] ) -> Dict[type, AggregationTemporality]: otel_exporter_otlp_metrics_temporality_preference = ( environ.get( OTEL_EXPORTER_OTLP_METRICS_TEMPORALITY_PREFERENCE, "CUMULATIVE", ) .upper() .strip() ) if otel_exporter_otlp_metrics_temporality_preference == "DELTA": instrument_class_temporality = { Counter: AggregationTemporality.DELTA, UpDownCounter: AggregationTemporality.CUMULATIVE, Histogram: AggregationTemporality.DELTA, ObservableCounter: AggregationTemporality.DELTA, ObservableUpDownCounter: AggregationTemporality.CUMULATIVE, ObservableGauge: AggregationTemporality.CUMULATIVE, } elif otel_exporter_otlp_metrics_temporality_preference == "LOWMEMORY": instrument_class_temporality = { Counter: AggregationTemporality.DELTA, UpDownCounter: AggregationTemporality.CUMULATIVE, Histogram: AggregationTemporality.DELTA, ObservableCounter: AggregationTemporality.CUMULATIVE, ObservableUpDownCounter: AggregationTemporality.CUMULATIVE, ObservableGauge: AggregationTemporality.CUMULATIVE, } else: if otel_exporter_otlp_metrics_temporality_preference != ( "CUMULATIVE" ): _logger.warning( "Unrecognized OTEL_EXPORTER_OTLP_METRICS_TEMPORALITY_PREFERENCE" " value found: " "%s, " "using CUMULATIVE", otel_exporter_otlp_metrics_temporality_preference, ) instrument_class_temporality = { Counter: AggregationTemporality.CUMULATIVE, UpDownCounter: AggregationTemporality.CUMULATIVE, Histogram: AggregationTemporality.CUMULATIVE, ObservableCounter: AggregationTemporality.CUMULATIVE, ObservableUpDownCounter: AggregationTemporality.CUMULATIVE, ObservableGauge: AggregationTemporality.CUMULATIVE, } instrument_class_temporality.update(preferred_temporality or {}) return instrument_class_temporality def _get_aggregation( self, preferred_aggregation: Dict[type, Aggregation], ) -> Dict[type, Aggregation]: otel_exporter_otlp_metrics_default_histogram_aggregation = environ.get( OTEL_EXPORTER_OTLP_METRICS_DEFAULT_HISTOGRAM_AGGREGATION, "explicit_bucket_histogram", ) if otel_exporter_otlp_metrics_default_histogram_aggregation == ( "base2_exponential_bucket_histogram" ): instrument_class_aggregation = { Histogram: ExponentialBucketHistogramAggregation(), } else: if otel_exporter_otlp_metrics_default_histogram_aggregation != ( "explicit_bucket_histogram" ): _logger.warning( ( "Invalid value for %s: %s, using explicit bucket " "histogram aggregation" ), OTEL_EXPORTER_OTLP_METRICS_DEFAULT_HISTOGRAM_AGGREGATION, otel_exporter_otlp_metrics_default_histogram_aggregation, ) instrument_class_aggregation = { Histogram: ExplicitBucketHistogramAggregation(), } instrument_class_aggregation.update(preferred_aggregation or {}) return instrument_class_aggregation class EncodingException(Exception): """ Raised by encode_metrics() when an exception is caught during encoding. Contains the problematic metric so the misbehaving metric name and details can be logged during exception handling. """ def __init__(self, original_exception, metric): super().__init__() self.original_exception = original_exception self.metric = metric def __str__(self): return f"{self.metric}\n{self.original_exception}" def encode_metrics(data: MetricsData) -> ExportMetricsServiceRequest: resource_metrics_dict = {} for resource_metrics in data.resource_metrics: _encode_resource_metrics(resource_metrics, resource_metrics_dict) resource_data = [] for ( sdk_resource, scope_data, ) in resource_metrics_dict.items(): resource_data.append( pb2.ResourceMetrics( resource=PB2Resource( attributes=_encode_attributes(sdk_resource.attributes) ), scope_metrics=scope_data.values(), schema_url=sdk_resource.schema_url, ) ) return ExportMetricsServiceRequest(resource_metrics=resource_data) def _encode_resource_metrics(resource_metrics, resource_metrics_dict): resource = resource_metrics.resource # It is safe to assume that each entry in data.resource_metrics is # associated with an unique resource. scope_metrics_dict = {} resource_metrics_dict[resource] = scope_metrics_dict for scope_metrics in resource_metrics.scope_metrics: instrumentation_scope = scope_metrics.scope # The SDK groups metrics in instrumentation scopes already so # there is no need to check for existing instrumentation scopes # here. pb2_scope_metrics = pb2.ScopeMetrics( scope=_encode_instrumentation_scope(instrumentation_scope), schema_url=instrumentation_scope.schema_url, ) scope_metrics_dict[instrumentation_scope] = pb2_scope_metrics for metric in scope_metrics.metrics: pb2_metric = pb2.Metric( name=metric.name, description=metric.description, unit=metric.unit, ) try: _encode_metric(metric, pb2_metric) except Exception as ex: # `from None` so we don't get "During handling of the above exception, another exception occurred:" raise EncodingException(ex, metric) from None pb2_scope_metrics.metrics.append(pb2_metric) def _encode_metric(metric, pb2_metric): if isinstance(metric.data, Gauge): for data_point in metric.data.data_points: pt = pb2.NumberDataPoint( attributes=_encode_attributes(data_point.attributes), time_unix_nano=data_point.time_unix_nano, exemplars=_encode_exemplars(data_point.exemplars), ) if isinstance(data_point.value, int): pt.as_int = data_point.value else: pt.as_double = data_point.value pb2_metric.gauge.data_points.append(pt) elif isinstance(metric.data, HistogramType): for data_point in metric.data.data_points: pt = pb2.HistogramDataPoint( attributes=_encode_attributes(data_point.attributes), time_unix_nano=data_point.time_unix_nano, start_time_unix_nano=data_point.start_time_unix_nano, exemplars=_encode_exemplars(data_point.exemplars), count=data_point.count, sum=data_point.sum, bucket_counts=data_point.bucket_counts, explicit_bounds=data_point.explicit_bounds, max=data_point.max, min=data_point.min, ) pb2_metric.histogram.aggregation_temporality = ( metric.data.aggregation_temporality ) pb2_metric.histogram.data_points.append(pt) elif isinstance(metric.data, Sum): for data_point in metric.data.data_points: pt = pb2.NumberDataPoint( attributes=_encode_attributes(data_point.attributes), start_time_unix_nano=data_point.start_time_unix_nano, time_unix_nano=data_point.time_unix_nano, exemplars=_encode_exemplars(data_point.exemplars), ) if isinstance(data_point.value, int): pt.as_int = data_point.value else: pt.as_double = data_point.value # note that because sum is a message type, the # fields must be set individually rather than # instantiating a pb2.Sum and setting it once pb2_metric.sum.aggregation_temporality = ( metric.data.aggregation_temporality ) pb2_metric.sum.is_monotonic = metric.data.is_monotonic pb2_metric.sum.data_points.append(pt) elif isinstance(metric.data, ExponentialHistogramType): for data_point in metric.data.data_points: if data_point.positive.bucket_counts: positive = pb2.ExponentialHistogramDataPoint.Buckets( offset=data_point.positive.offset, bucket_counts=data_point.positive.bucket_counts, ) else: positive = None if data_point.negative.bucket_counts: negative = pb2.ExponentialHistogramDataPoint.Buckets( offset=data_point.negative.offset, bucket_counts=data_point.negative.bucket_counts, ) else: negative = None pt = pb2.ExponentialHistogramDataPoint( attributes=_encode_attributes(data_point.attributes), time_unix_nano=data_point.time_unix_nano, start_time_unix_nano=data_point.start_time_unix_nano, exemplars=_encode_exemplars(data_point.exemplars), count=data_point.count, sum=data_point.sum, scale=data_point.scale, zero_count=data_point.zero_count, positive=positive, negative=negative, flags=data_point.flags, max=data_point.max, min=data_point.min, ) pb2_metric.exponential_histogram.aggregation_temporality = ( metric.data.aggregation_temporality ) pb2_metric.exponential_histogram.data_points.append(pt) else: _logger.warning( "unsupported data type %s", metric.data.__class__.__name__, ) def _encode_exemplars(sdk_exemplars: List[Exemplar]) -> List[pb2.Exemplar]: """ Converts a list of SDK Exemplars into a list of protobuf Exemplars. Args: sdk_exemplars (list): The list of exemplars from the OpenTelemetry SDK. Returns: list: A list of protobuf exemplars. """ pb_exemplars = [] for sdk_exemplar in sdk_exemplars: if ( sdk_exemplar.span_id is not None and sdk_exemplar.trace_id is not None ): pb_exemplar = pb2.Exemplar( time_unix_nano=sdk_exemplar.time_unix_nano, span_id=_encode_span_id(sdk_exemplar.span_id), trace_id=_encode_trace_id(sdk_exemplar.trace_id), filtered_attributes=_encode_attributes( sdk_exemplar.filtered_attributes ), ) else: pb_exemplar = pb2.Exemplar( time_unix_nano=sdk_exemplar.time_unix_nano, filtered_attributes=_encode_attributes( sdk_exemplar.filtered_attributes ), ) # Assign the value based on its type in the SDK exemplar if isinstance(sdk_exemplar.value, float): pb_exemplar.as_double = sdk_exemplar.value elif isinstance(sdk_exemplar.value, int): pb_exemplar.as_int = sdk_exemplar.value else: raise ValueError("Exemplar value must be an int or float") pb_exemplars.append(pb_exemplar) return pb_exemplars trace_encoder/000077500000000000000000000000001511654350100454735ustar00rootroot00000000000000python-opentelemetry-1.39.1/exporter/opentelemetry-exporter-otlp-proto-common/src/opentelemetry/exporter/otlp/proto/common/_internal__init__.py000066400000000000000000000151431511654350100476100ustar00rootroot00000000000000python-opentelemetry-1.39.1/exporter/opentelemetry-exporter-otlp-proto-common/src/opentelemetry/exporter/otlp/proto/common/_internal/trace_encoder# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging from collections import defaultdict from typing import List, Optional, Sequence from opentelemetry.exporter.otlp.proto.common._internal import ( _encode_attributes, _encode_instrumentation_scope, _encode_resource, _encode_span_id, _encode_trace_id, ) from opentelemetry.proto.collector.trace.v1.trace_service_pb2 import ( ExportTraceServiceRequest as PB2ExportTraceServiceRequest, ) from opentelemetry.proto.trace.v1.trace_pb2 import ( ResourceSpans as PB2ResourceSpans, ) from opentelemetry.proto.trace.v1.trace_pb2 import ScopeSpans as PB2ScopeSpans from opentelemetry.proto.trace.v1.trace_pb2 import Span as PB2SPan from opentelemetry.proto.trace.v1.trace_pb2 import SpanFlags as PB2SpanFlags from opentelemetry.proto.trace.v1.trace_pb2 import Status as PB2Status from opentelemetry.sdk.trace import Event, ReadableSpan from opentelemetry.trace import Link, SpanKind from opentelemetry.trace.span import SpanContext, Status, TraceState # pylint: disable=E1101 _SPAN_KIND_MAP = { SpanKind.INTERNAL: PB2SPan.SpanKind.SPAN_KIND_INTERNAL, SpanKind.SERVER: PB2SPan.SpanKind.SPAN_KIND_SERVER, SpanKind.CLIENT: PB2SPan.SpanKind.SPAN_KIND_CLIENT, SpanKind.PRODUCER: PB2SPan.SpanKind.SPAN_KIND_PRODUCER, SpanKind.CONSUMER: PB2SPan.SpanKind.SPAN_KIND_CONSUMER, } _logger = logging.getLogger(__name__) def encode_spans( sdk_spans: Sequence[ReadableSpan], ) -> PB2ExportTraceServiceRequest: return PB2ExportTraceServiceRequest( resource_spans=_encode_resource_spans(sdk_spans) ) def _encode_resource_spans( sdk_spans: Sequence[ReadableSpan], ) -> List[PB2ResourceSpans]: # We need to inspect the spans and group + structure them as: # # Resource # Instrumentation Library # Spans # # First loop organizes the SDK spans in this structure. Protobuf messages # are not hashable so we stick with SDK data in this phase. # # Second loop encodes the data into Protobuf format. # sdk_resource_spans = defaultdict(lambda: defaultdict(list)) for sdk_span in sdk_spans: sdk_resource = sdk_span.resource sdk_instrumentation = sdk_span.instrumentation_scope or None pb2_span = _encode_span(sdk_span) sdk_resource_spans[sdk_resource][sdk_instrumentation].append(pb2_span) pb2_resource_spans = [] for sdk_resource, sdk_instrumentations in sdk_resource_spans.items(): scope_spans = [] for sdk_instrumentation, pb2_spans in sdk_instrumentations.items(): scope_spans.append( PB2ScopeSpans( scope=(_encode_instrumentation_scope(sdk_instrumentation)), spans=pb2_spans, schema_url=sdk_instrumentation.schema_url if sdk_instrumentation else None, ) ) pb2_resource_spans.append( PB2ResourceSpans( resource=_encode_resource(sdk_resource), scope_spans=scope_spans, schema_url=sdk_resource.schema_url, ) ) return pb2_resource_spans def _span_flags(parent_span_context: Optional[SpanContext]) -> int: flags = PB2SpanFlags.SPAN_FLAGS_CONTEXT_HAS_IS_REMOTE_MASK if parent_span_context and parent_span_context.is_remote: flags |= PB2SpanFlags.SPAN_FLAGS_CONTEXT_IS_REMOTE_MASK return flags def _encode_span(sdk_span: ReadableSpan) -> PB2SPan: span_context = sdk_span.get_span_context() return PB2SPan( trace_id=_encode_trace_id(span_context.trace_id), span_id=_encode_span_id(span_context.span_id), trace_state=_encode_trace_state(span_context.trace_state), parent_span_id=_encode_parent_id(sdk_span.parent), name=sdk_span.name, kind=_SPAN_KIND_MAP[sdk_span.kind], start_time_unix_nano=sdk_span.start_time, end_time_unix_nano=sdk_span.end_time, attributes=_encode_attributes(sdk_span.attributes), events=_encode_events(sdk_span.events), links=_encode_links(sdk_span.links), status=_encode_status(sdk_span.status), dropped_attributes_count=sdk_span.dropped_attributes, dropped_events_count=sdk_span.dropped_events, dropped_links_count=sdk_span.dropped_links, flags=_span_flags(sdk_span.parent), ) def _encode_events( events: Sequence[Event], ) -> Optional[List[PB2SPan.Event]]: pb2_events = None if events: pb2_events = [] for event in events: encoded_event = PB2SPan.Event( name=event.name, time_unix_nano=event.timestamp, attributes=_encode_attributes(event.attributes), dropped_attributes_count=event.dropped_attributes, ) pb2_events.append(encoded_event) return pb2_events def _encode_links(links: Sequence[Link]) -> Sequence[PB2SPan.Link]: pb2_links = None if links: pb2_links = [] for link in links: encoded_link = PB2SPan.Link( trace_id=_encode_trace_id(link.context.trace_id), span_id=_encode_span_id(link.context.span_id), attributes=_encode_attributes(link.attributes), dropped_attributes_count=link.dropped_attributes, flags=_span_flags(link.context), ) pb2_links.append(encoded_link) return pb2_links def _encode_status(status: Status) -> Optional[PB2Status]: pb2_status = None if status is not None: pb2_status = PB2Status( code=status.status_code.value, message=status.description, ) return pb2_status def _encode_trace_state(trace_state: TraceState) -> Optional[str]: pb2_trace_state = None if trace_state is not None: pb2_trace_state = ",".join( [f"{key}={value}" for key, value in (trace_state.items())] ) return pb2_trace_state def _encode_parent_id(context: Optional[SpanContext]) -> Optional[bytes]: if context: return _encode_span_id(context.span_id) return None _log_encoder.py000066400000000000000000000013061511654350100437140ustar00rootroot00000000000000python-opentelemetry-1.39.1/exporter/opentelemetry-exporter-otlp-proto-common/src/opentelemetry/exporter/otlp/proto/common# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from opentelemetry.exporter.otlp.proto.common._internal._log_encoder import ( encode_logs, ) __all__ = ["encode_logs"] metrics_encoder.py000066400000000000000000000013171511654350100444440ustar00rootroot00000000000000python-opentelemetry-1.39.1/exporter/opentelemetry-exporter-otlp-proto-common/src/opentelemetry/exporter/otlp/proto/common# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from opentelemetry.exporter.otlp.proto.common._internal.metrics_encoder import ( encode_metrics, ) __all__ = ["encode_metrics"] py.typed000066400000000000000000000000001511654350100424100ustar00rootroot00000000000000python-opentelemetry-1.39.1/exporter/opentelemetry-exporter-otlp-proto-common/src/opentelemetry/exporter/otlp/proto/commontrace_encoder.py000066400000000000000000000013111511654350100440660ustar00rootroot00000000000000python-opentelemetry-1.39.1/exporter/opentelemetry-exporter-otlp-proto-common/src/opentelemetry/exporter/otlp/proto/common# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from opentelemetry.exporter.otlp.proto.common._internal.trace_encoder import ( encode_spans, ) __all__ = ["encode_spans"] version/000077500000000000000000000000001511654350100424105ustar00rootroot00000000000000python-opentelemetry-1.39.1/exporter/opentelemetry-exporter-otlp-proto-common/src/opentelemetry/exporter/otlp/proto/common__init__.py000066400000000000000000000011401511654350100445150ustar00rootroot00000000000000python-opentelemetry-1.39.1/exporter/opentelemetry-exporter-otlp-proto-common/src/opentelemetry/exporter/otlp/proto/common/version# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. __version__ = "1.39.1" python-opentelemetry-1.39.1/exporter/opentelemetry-exporter-otlp-proto-common/test-requirements.txt000066400000000000000000000006121511654350100342760ustar00rootroot00000000000000asgiref==3.7.2 importlib-metadata==6.11.0 iniconfig==2.0.0 packaging==24.0 pluggy==1.5.0 protobuf==5.26.1 py-cpuinfo==9.0.0 pytest==7.4.4 tomli==2.0.1 typing_extensions==4.10.0 wrapt==1.16.0 zipp==3.19.2 -e opentelemetry-api -e opentelemetry-sdk -e opentelemetry-semantic-conventions -e tests/opentelemetry-test-utils -e opentelemetry-proto -e exporter/opentelemetry-exporter-otlp-proto-common python-opentelemetry-1.39.1/exporter/opentelemetry-exporter-otlp-proto-common/tests/000077500000000000000000000000001511654350100312005ustar00rootroot00000000000000python-opentelemetry-1.39.1/exporter/opentelemetry-exporter-otlp-proto-common/tests/__init__.py000066400000000000000000000000001511654350100332770ustar00rootroot00000000000000test_attribute_encoder.py000066400000000000000000000111521511654350100362340ustar00rootroot00000000000000python-opentelemetry-1.39.1/exporter/opentelemetry-exporter-otlp-proto-common/tests# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from logging import ERROR from opentelemetry.exporter.otlp.proto.common._internal import ( _encode_attributes, ) from opentelemetry.proto.common.v1.common_pb2 import AnyValue as PB2AnyValue from opentelemetry.proto.common.v1.common_pb2 import ( ArrayValue as PB2ArrayValue, ) from opentelemetry.proto.common.v1.common_pb2 import KeyValue as PB2KeyValue class TestOTLPAttributeEncoder(unittest.TestCase): def test_encode_attributes_all_kinds(self): result = _encode_attributes( { "a": 1, # int "b": 3.14, # float "c": False, # bool "hello": "world", # str "greet": ["hola", "bonjour"], # Sequence[str] "data": [1, 2], # Sequence[int] "data_granular": [1.4, 2.4], # Sequence[float] "binary_data": b"x00\x01\x02", # bytes } ) self.assertEqual( result, [ PB2KeyValue(key="a", value=PB2AnyValue(int_value=1)), PB2KeyValue(key="b", value=PB2AnyValue(double_value=3.14)), PB2KeyValue(key="c", value=PB2AnyValue(bool_value=False)), PB2KeyValue( key="hello", value=PB2AnyValue(string_value="world") ), PB2KeyValue( key="greet", value=PB2AnyValue( array_value=PB2ArrayValue( values=[ PB2AnyValue(string_value="hola"), PB2AnyValue(string_value="bonjour"), ] ) ), ), PB2KeyValue( key="data", value=PB2AnyValue( array_value=PB2ArrayValue( values=[ PB2AnyValue(int_value=1), PB2AnyValue(int_value=2), ] ) ), ), PB2KeyValue( key="data_granular", value=PB2AnyValue( array_value=PB2ArrayValue( values=[ PB2AnyValue(double_value=1.4), PB2AnyValue(double_value=2.4), ] ) ), ), PB2KeyValue( key="binary_data", value=PB2AnyValue(bytes_value=b"x00\x01\x02"), ), ], ) def test_encode_attributes_error_list_none(self): with self.assertLogs(level=ERROR) as error: result = _encode_attributes( {"a": 1, "bad_key": ["test", None, "test"], "b": 2} ) self.assertEqual(len(error.records), 1) self.assertEqual(error.records[0].msg, "Failed to encode key %s: %s") self.assertEqual(error.records[0].args[0], "bad_key") self.assertIsInstance(error.records[0].args[1], Exception) self.assertEqual( result, [ PB2KeyValue(key="a", value=PB2AnyValue(int_value=1)), PB2KeyValue(key="b", value=PB2AnyValue(int_value=2)), ], ) def test_encode_attributes_error_logs_key(self): with self.assertLogs(level=ERROR) as error: result = _encode_attributes({"a": 1, "bad_key": None, "b": 2}) self.assertEqual(len(error.records), 1) self.assertEqual(error.records[0].msg, "Failed to encode key %s: %s") self.assertEqual(error.records[0].args[0], "bad_key") self.assertIsInstance(error.records[0].args[1], Exception) self.assertEqual( result, [ PB2KeyValue(key="a", value=PB2AnyValue(int_value=1)), PB2KeyValue(key="b", value=PB2AnyValue(int_value=2)), ], ) test_log_encoder.py000066400000000000000000000725041511654350100350220ustar00rootroot00000000000000python-opentelemetry-1.39.1/exporter/opentelemetry-exporter-otlp-proto-common/tests# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from typing import List, Tuple from opentelemetry._logs import LogRecord, SeverityNumber from opentelemetry.exporter.otlp.proto.common._internal import ( _encode_attributes, _encode_span_id, _encode_trace_id, _encode_value, ) from opentelemetry.exporter.otlp.proto.common._log_encoder import encode_logs from opentelemetry.proto.collector.logs.v1.logs_service_pb2 import ( ExportLogsServiceRequest, ) from opentelemetry.proto.common.v1.common_pb2 import AnyValue as PB2AnyValue from opentelemetry.proto.common.v1.common_pb2 import ( ArrayValue as PB2ArrayValue, ) from opentelemetry.proto.common.v1.common_pb2 import ( InstrumentationScope as PB2InstrumentationScope, ) from opentelemetry.proto.common.v1.common_pb2 import KeyValue as PB2KeyValue from opentelemetry.proto.common.v1.common_pb2 import ( KeyValueList as PB2KeyValueList, ) from opentelemetry.proto.logs.v1.logs_pb2 import LogRecord as PB2LogRecord from opentelemetry.proto.logs.v1.logs_pb2 import ( ResourceLogs as PB2ResourceLogs, ) from opentelemetry.proto.logs.v1.logs_pb2 import ScopeLogs as PB2ScopeLogs from opentelemetry.proto.resource.v1.resource_pb2 import ( Resource as PB2Resource, ) from opentelemetry.sdk._logs import LogRecordLimits, ReadWriteLogRecord from opentelemetry.sdk.resources import Resource as SDKResource from opentelemetry.sdk.util.instrumentation import InstrumentationScope from opentelemetry.trace import ( NonRecordingSpan, SpanContext, TraceFlags, set_span_in_context, ) class TestOTLPLogEncoder(unittest.TestCase): def test_encode(self): sdk_logs, expected_encoding = self.get_test_logs() self.assertEqual(encode_logs(sdk_logs), expected_encoding) def test_encode_no_body(self): sdk_logs, expected_encoding = self.get_test_logs() for log in sdk_logs: log.log_record.body = None for resource_log in expected_encoding.resource_logs: for scope_log in resource_log.scope_logs: for log_record in scope_log.log_records: log_record.ClearField("body") self.assertEqual(encode_logs(sdk_logs), expected_encoding) def test_dropped_attributes_count(self): sdk_logs = self._get_test_logs_dropped_attributes() encoded_logs = encode_logs(sdk_logs) self.assertTrue(hasattr(sdk_logs[0], "dropped_attributes")) self.assertEqual( # pylint:disable=no-member encoded_logs.resource_logs[0] .scope_logs[0] .log_records[0] .dropped_attributes_count, 2, ) @staticmethod def _get_sdk_log_data() -> List[ReadWriteLogRecord]: # pylint:disable=too-many-locals ctx_log1 = set_span_in_context( NonRecordingSpan( SpanContext( 89564621134313219400156819398935297684, 1312458408527513268, False, TraceFlags(0x01), ) ) ) log1 = ReadWriteLogRecord( LogRecord( timestamp=1644650195189786880, observed_timestamp=1644650195189786881, context=ctx_log1, severity_text="WARN", severity_number=SeverityNumber.WARN, body="Do not go gentle into that good night. Rage, rage against the dying of the light", attributes={"a": 1, "b": "c"}, ), resource=SDKResource( {"first_resource": "value"}, "resource_schema_url", ), instrumentation_scope=InstrumentationScope( "first_name", "first_version" ), ) log2 = ReadWriteLogRecord( LogRecord( timestamp=1644650249738562048, observed_timestamp=1644650249738562049, severity_text="WARN", severity_number=SeverityNumber.WARN, body="Cooper, this is no time for caution!", attributes={}, ), resource=SDKResource({"second_resource": "CASE"}), instrumentation_scope=InstrumentationScope( "second_name", "second_version" ), ) ctx_log3 = set_span_in_context( NonRecordingSpan( SpanContext( 271615924622795969659406376515024083555, 4242561578944770265, False, TraceFlags(0x01), ) ) ) log3 = ReadWriteLogRecord( LogRecord( timestamp=1644650427658989056, observed_timestamp=1644650427658989057, context=ctx_log3, severity_text="DEBUG", severity_number=SeverityNumber.DEBUG, body="To our galaxy", attributes={"a": 1, "b": "c"}, ), resource=SDKResource({"second_resource": "CASE"}), instrumentation_scope=None, ) ctx_log4 = set_span_in_context( NonRecordingSpan( SpanContext( 212592107417388365804938480559624925555, 6077757853989569223, False, TraceFlags(0x01), ) ) ) log4 = ReadWriteLogRecord( LogRecord( timestamp=1644650584292683008, observed_timestamp=1644650584292683009, context=ctx_log4, severity_text="INFO", severity_number=SeverityNumber.INFO, body="Love is the one thing that transcends time and space", attributes={"filename": "model.py", "func_name": "run_method"}, ), resource=SDKResource( {"first_resource": "value"}, "resource_schema_url", ), instrumentation_scope=InstrumentationScope( "another_name", "another_version" ), ) ctx_log5 = set_span_in_context( NonRecordingSpan( SpanContext( 212592107417388365804938480559624925555, 6077757853989569445, False, TraceFlags(0x01), ) ) ) log5 = ReadWriteLogRecord( LogRecord( timestamp=1644650584292683009, observed_timestamp=1644650584292683010, context=ctx_log5, severity_text="INFO", severity_number=SeverityNumber.INFO, body={"error": None, "array_with_nones": [1, None, 2]}, attributes={}, ), resource=SDKResource({}), instrumentation_scope=InstrumentationScope( "last_name", "last_version" ), ) ctx_log6 = set_span_in_context( NonRecordingSpan( SpanContext( 212592107417388365804938480559624925522, 6077757853989569222, False, TraceFlags(0x01), ) ) ) log6 = ReadWriteLogRecord( LogRecord( timestamp=1644650584292683022, observed_timestamp=1644650584292683022, context=ctx_log6, severity_text="ERROR", severity_number=SeverityNumber.ERROR, body="This instrumentation scope has a schema url", attributes={"filename": "model.py", "func_name": "run_method"}, ), resource=SDKResource( {"first_resource": "value"}, "resource_schema_url", ), instrumentation_scope=InstrumentationScope( "scope_with_url", "scope_with_url_version", "instrumentation_schema_url", ), ) ctx_log7 = set_span_in_context( NonRecordingSpan( SpanContext( 212592107417388365804938480559624925533, 6077757853989569233, False, TraceFlags(0x01), ) ) ) log7 = ReadWriteLogRecord( LogRecord( timestamp=1644650584292683033, observed_timestamp=1644650584292683033, context=ctx_log7, severity_text="FATAL", severity_number=SeverityNumber.FATAL, body="This instrumentation scope has a schema url and attributes", attributes={"filename": "model.py", "func_name": "run_method"}, ), resource=SDKResource( {"first_resource": "value"}, "resource_schema_url", ), instrumentation_scope=InstrumentationScope( "scope_with_attributes", "scope_with_attributes_version", "instrumentation_schema_url", {"one": 1, "two": "2"}, ), ) ctx_log8 = set_span_in_context( NonRecordingSpan( SpanContext( 212592107417388365804938480559624925566, 6077757853989569466, False, TraceFlags(0x01), ) ) ) log8 = ReadWriteLogRecord( LogRecord( timestamp=1644650584292683044, observed_timestamp=1644650584292683044, context=ctx_log8, severity_text="INFO", severity_number=SeverityNumber.INFO, body="Test export of extended attributes", attributes={ "extended": { "sequence": [{"inner": "mapping", "none": None}] } }, ), resource=SDKResource({}), instrumentation_scope=InstrumentationScope( "extended_name", "extended_version" ), ) ctx_log9 = set_span_in_context( NonRecordingSpan( SpanContext( 212592107417388365804938480559624925566, 6077757853989569466, False, TraceFlags(0x01), ) ) ) log9 = ReadWriteLogRecord( LogRecord( # these are otherwise set by default observed_timestamp=1644650584292683045, context=ctx_log9, ), resource=SDKResource({}), instrumentation_scope=InstrumentationScope( "empty_log_record_name", "empty_log_record_version" ), ) return [log1, log2, log3, log4, log5, log6, log7, log8, log9] def get_test_logs( self, ) -> Tuple[List[ReadWriteLogRecord], ExportLogsServiceRequest]: sdk_logs = self._get_sdk_log_data() pb2_service_request = ExportLogsServiceRequest( resource_logs=[ PB2ResourceLogs( resource=PB2Resource( attributes=[ PB2KeyValue( key="first_resource", value=PB2AnyValue(string_value="value"), ) ] ), scope_logs=[ PB2ScopeLogs( scope=PB2InstrumentationScope( name="first_name", version="first_version" ), log_records=[ PB2LogRecord( time_unix_nano=1644650195189786880, observed_time_unix_nano=1644650195189786881, trace_id=_encode_trace_id( 89564621134313219400156819398935297684 ), span_id=_encode_span_id( 1312458408527513268 ), flags=int(TraceFlags(0x01)), severity_text="WARN", severity_number=SeverityNumber.WARN.value, body=_encode_value( "Do not go gentle into that good night. Rage, rage against the dying of the light" ), attributes=_encode_attributes( {"a": 1, "b": "c"}, allow_null=True, ), ) ], ), PB2ScopeLogs( scope=PB2InstrumentationScope( name="another_name", version="another_version", ), log_records=[ PB2LogRecord( time_unix_nano=1644650584292683008, observed_time_unix_nano=1644650584292683009, trace_id=_encode_trace_id( 212592107417388365804938480559624925555 ), span_id=_encode_span_id( 6077757853989569223 ), flags=int(TraceFlags(0x01)), severity_text="INFO", severity_number=SeverityNumber.INFO.value, body=_encode_value( "Love is the one thing that transcends time and space" ), attributes=_encode_attributes( { "filename": "model.py", "func_name": "run_method", }, allow_null=True, ), ) ], ), PB2ScopeLogs( scope=PB2InstrumentationScope( name="scope_with_url", version="scope_with_url_version", ), schema_url="instrumentation_schema_url", log_records=[ PB2LogRecord( time_unix_nano=1644650584292683022, observed_time_unix_nano=1644650584292683022, trace_id=_encode_trace_id( 212592107417388365804938480559624925522 ), span_id=_encode_span_id( 6077757853989569222 ), flags=int(TraceFlags(0x01)), severity_text="ERROR", severity_number=SeverityNumber.ERROR.value, body=_encode_value( "This instrumentation scope has a schema url" ), attributes=_encode_attributes( { "filename": "model.py", "func_name": "run_method", }, allow_null=True, ), ) ], ), PB2ScopeLogs( scope=PB2InstrumentationScope( name="scope_with_attributes", version="scope_with_attributes_version", attributes=_encode_attributes( {"one": 1, "two": "2"}, allow_null=True, ), ), schema_url="instrumentation_schema_url", log_records=[ PB2LogRecord( time_unix_nano=1644650584292683033, observed_time_unix_nano=1644650584292683033, trace_id=_encode_trace_id( 212592107417388365804938480559624925533 ), span_id=_encode_span_id( 6077757853989569233 ), flags=int(TraceFlags(0x01)), severity_text="FATAL", severity_number=SeverityNumber.FATAL.value, body=_encode_value( "This instrumentation scope has a schema url and attributes" ), attributes=_encode_attributes( { "filename": "model.py", "func_name": "run_method", }, allow_null=True, ), ) ], ), ], schema_url="resource_schema_url", ), PB2ResourceLogs( resource=PB2Resource( attributes=[ PB2KeyValue( key="second_resource", value=PB2AnyValue(string_value="CASE"), ) ] ), scope_logs=[ PB2ScopeLogs( scope=PB2InstrumentationScope( name="second_name", version="second_version", ), log_records=[ PB2LogRecord( time_unix_nano=1644650249738562048, observed_time_unix_nano=1644650249738562049, trace_id=None, span_id=None, flags=int(TraceFlags.DEFAULT), severity_text="WARN", severity_number=SeverityNumber.WARN.value, body=_encode_value( "Cooper, this is no time for caution!" ), attributes={}, ), ], ), PB2ScopeLogs( scope=PB2InstrumentationScope(), log_records=[ PB2LogRecord( time_unix_nano=1644650427658989056, observed_time_unix_nano=1644650427658989057, trace_id=_encode_trace_id( 271615924622795969659406376515024083555 ), span_id=_encode_span_id( 4242561578944770265 ), flags=int(TraceFlags(0x01)), severity_text="DEBUG", severity_number=SeverityNumber.DEBUG.value, body=_encode_value("To our galaxy"), attributes=_encode_attributes( {"a": 1, "b": "c"}, allow_null=True, ), ), ], ), ], ), PB2ResourceLogs( resource=PB2Resource(), scope_logs=[ PB2ScopeLogs( scope=PB2InstrumentationScope( name="last_name", version="last_version", ), log_records=[ PB2LogRecord( time_unix_nano=1644650584292683009, observed_time_unix_nano=1644650584292683010, trace_id=_encode_trace_id( 212592107417388365804938480559624925555 ), span_id=_encode_span_id( 6077757853989569445, ), flags=int(TraceFlags(0x01)), severity_text="INFO", severity_number=SeverityNumber.INFO.value, body=PB2AnyValue( kvlist_value=PB2KeyValueList( values=[ PB2KeyValue(key="error"), PB2KeyValue( key="array_with_nones", value=PB2AnyValue( array_value=PB2ArrayValue( values=[ PB2AnyValue( int_value=1 ), PB2AnyValue(), PB2AnyValue( int_value=2 ), ] ) ), ), ] ) ), attributes={}, ), ], ), PB2ScopeLogs( scope=PB2InstrumentationScope( name="extended_name", version="extended_version", ), log_records=[ PB2LogRecord( time_unix_nano=1644650584292683044, observed_time_unix_nano=1644650584292683044, trace_id=_encode_trace_id( 212592107417388365804938480559624925566 ), span_id=_encode_span_id( 6077757853989569466, ), flags=int(TraceFlags(0x01)), severity_text="INFO", severity_number=SeverityNumber.INFO.value, body=_encode_value( "Test export of extended attributes" ), attributes=_encode_attributes( { "extended": { "sequence": [ { "inner": "mapping", "none": None, } ] } }, allow_null=True, ), ), ], ), PB2ScopeLogs( scope=PB2InstrumentationScope( name="empty_log_record_name", version="empty_log_record_version", ), log_records=[ PB2LogRecord( time_unix_nano=None, observed_time_unix_nano=1644650584292683045, trace_id=_encode_trace_id( 212592107417388365804938480559624925566 ), span_id=_encode_span_id( 6077757853989569466, ), flags=int(TraceFlags(0x01)), severity_text=None, severity_number=None, body=None, attributes=None, ), ], ), ], ), ] ) return sdk_logs, pb2_service_request @staticmethod def _get_test_logs_dropped_attributes() -> List[ReadWriteLogRecord]: ctx_log1 = set_span_in_context( NonRecordingSpan( SpanContext( 89564621134313219400156819398935297684, 1312458408527513268, False, TraceFlags(0x01), ) ) ) log1 = ReadWriteLogRecord( LogRecord( timestamp=1644650195189786880, context=ctx_log1, severity_text="WARN", severity_number=SeverityNumber.WARN, body="Do not go gentle into that good night. Rage, rage against the dying of the light", attributes={"a": 1, "b": "c", "user_id": "B121092"}, ), resource=SDKResource({"first_resource": "value"}), limits=LogRecordLimits(max_attributes=1), instrumentation_scope=InstrumentationScope( "first_name", "first_version" ), ) ctx_log2 = set_span_in_context( NonRecordingSpan(SpanContext(0, 0, False)) ) log2 = ReadWriteLogRecord( LogRecord( timestamp=1644650249738562048, context=ctx_log2, severity_text="WARN", severity_number=SeverityNumber.WARN, body="Cooper, this is no time for caution!", attributes={}, ), resource=SDKResource({"second_resource": "CASE"}), instrumentation_scope=InstrumentationScope( "second_name", "second_version" ), ) return [log1, log2] test_metrics_encoder.py000066400000000000000000001553131511654350100357070ustar00rootroot00000000000000python-opentelemetry-1.39.1/exporter/opentelemetry-exporter-otlp-proto-common/tests# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # pylint: disable=protected-access,too-many-lines import unittest from opentelemetry.exporter.otlp.proto.common._internal.metrics_encoder import ( EncodingException, ) from opentelemetry.exporter.otlp.proto.common.metrics_encoder import ( encode_metrics, ) from opentelemetry.proto.collector.metrics.v1.metrics_service_pb2 import ( ExportMetricsServiceRequest, ) from opentelemetry.proto.common.v1.common_pb2 import ( AnyValue, InstrumentationScope, KeyValue, ) from opentelemetry.proto.metrics.v1 import metrics_pb2 as pb2 from opentelemetry.proto.resource.v1.resource_pb2 import ( Resource as OTLPResource, ) from opentelemetry.sdk.metrics import Exemplar from opentelemetry.sdk.metrics.export import ( AggregationTemporality, Buckets, ExponentialHistogramDataPoint, HistogramDataPoint, Metric, MetricsData, ResourceMetrics, ScopeMetrics, ) from opentelemetry.sdk.metrics.export import ( ExponentialHistogram as ExponentialHistogramType, ) from opentelemetry.sdk.metrics.export import Histogram as HistogramType from opentelemetry.sdk.resources import Resource from opentelemetry.sdk.util.instrumentation import ( InstrumentationScope as SDKInstrumentationScope, ) from opentelemetry.test.metrictestutil import _generate_gauge, _generate_sum class TestOTLPMetricsEncoder(unittest.TestCase): span_id = int("6e0c63257de34c92", 16) trace_id = int("d4cda95b652f4a1592b449d5929fda1b", 16) histogram = Metric( name="histogram", description="foo", unit="s", data=HistogramType( data_points=[ HistogramDataPoint( attributes={"a": 1, "b": True}, start_time_unix_nano=1641946016139533244, time_unix_nano=1641946016139533244, exemplars=[ Exemplar( {"filtered": "banana"}, 298.0, 1641946016139533400, span_id, trace_id, ), Exemplar( {"filtered": "banana"}, 298.0, 1641946016139533400, None, None, ), ], count=5, sum=67, bucket_counts=[1, 4], explicit_bounds=[10.0, 20.0], min=8, max=18, ) ], aggregation_temporality=AggregationTemporality.DELTA, ), ) def test_encode_sum_int(self): metrics_data = MetricsData( resource_metrics=[ ResourceMetrics( resource=Resource( attributes={"a": 1, "b": False}, schema_url="resource_schema_url", ), scope_metrics=[ ScopeMetrics( scope=SDKInstrumentationScope( name="first_name", version="first_version", schema_url="instrumentation_scope_schema_url", ), metrics=[_generate_sum("sum_int", 33)], schema_url="instrumentation_scope_schema_url", ) ], schema_url="resource_schema_url", ) ] ) expected = ExportMetricsServiceRequest( resource_metrics=[ pb2.ResourceMetrics( schema_url="resource_schema_url", resource=OTLPResource( attributes=[ KeyValue(key="a", value=AnyValue(int_value=1)), KeyValue( key="b", value=AnyValue(bool_value=False) ), ] ), scope_metrics=[ pb2.ScopeMetrics( scope=InstrumentationScope( name="first_name", version="first_version" ), schema_url="instrumentation_scope_schema_url", metrics=[ pb2.Metric( name="sum_int", unit="s", description="foo", sum=pb2.Sum( data_points=[ pb2.NumberDataPoint( attributes=[ KeyValue( key="a", value=AnyValue( int_value=1 ), ), KeyValue( key="b", value=AnyValue( bool_value=True ), ), ], start_time_unix_nano=1641946015139533244, time_unix_nano=1641946016139533244, as_int=33, ) ], aggregation_temporality=AggregationTemporality.CUMULATIVE, is_monotonic=True, ), ) ], ) ], ) ] ) actual = encode_metrics(metrics_data) self.assertEqual(expected, actual) def test_encode_sum_double(self): metrics_data = MetricsData( resource_metrics=[ ResourceMetrics( resource=Resource( attributes={"a": 1, "b": False}, schema_url="resource_schema_url", ), scope_metrics=[ ScopeMetrics( scope=SDKInstrumentationScope( name="first_name", version="first_version", schema_url="instrumentation_scope_schema_url", ), metrics=[_generate_sum("sum_double", 2.98)], schema_url="instrumentation_scope_schema_url", ) ], schema_url="resource_schema_url", ) ] ) expected = ExportMetricsServiceRequest( resource_metrics=[ pb2.ResourceMetrics( schema_url="resource_schema_url", resource=OTLPResource( attributes=[ KeyValue(key="a", value=AnyValue(int_value=1)), KeyValue( key="b", value=AnyValue(bool_value=False) ), ] ), scope_metrics=[ pb2.ScopeMetrics( scope=InstrumentationScope( name="first_name", version="first_version" ), schema_url="instrumentation_scope_schema_url", metrics=[ pb2.Metric( name="sum_double", unit="s", description="foo", sum=pb2.Sum( data_points=[ pb2.NumberDataPoint( attributes=[ KeyValue( key="a", value=AnyValue( int_value=1 ), ), KeyValue( key="b", value=AnyValue( bool_value=True ), ), ], start_time_unix_nano=1641946015139533244, time_unix_nano=1641946016139533244, as_double=2.98, ) ], aggregation_temporality=AggregationTemporality.CUMULATIVE, is_monotonic=True, ), ) ], ) ], ) ] ) actual = encode_metrics(metrics_data) self.assertEqual(expected, actual) def test_encode_gauge_int(self): metrics_data = MetricsData( resource_metrics=[ ResourceMetrics( resource=Resource( attributes={"a": 1, "b": False}, schema_url="resource_schema_url", ), scope_metrics=[ ScopeMetrics( scope=SDKInstrumentationScope( name="first_name", version="first_version", schema_url="instrumentation_scope_schema_url", ), metrics=[_generate_gauge("gauge_int", 9000)], schema_url="instrumentation_scope_schema_url", ) ], schema_url="resource_schema_url", ) ] ) expected = ExportMetricsServiceRequest( resource_metrics=[ pb2.ResourceMetrics( schema_url="resource_schema_url", resource=OTLPResource( attributes=[ KeyValue(key="a", value=AnyValue(int_value=1)), KeyValue( key="b", value=AnyValue(bool_value=False) ), ] ), scope_metrics=[ pb2.ScopeMetrics( scope=InstrumentationScope( name="first_name", version="first_version" ), schema_url="instrumentation_scope_schema_url", metrics=[ pb2.Metric( name="gauge_int", unit="s", description="foo", gauge=pb2.Gauge( data_points=[ pb2.NumberDataPoint( attributes=[ KeyValue( key="a", value=AnyValue( int_value=1 ), ), KeyValue( key="b", value=AnyValue( bool_value=True ), ), ], time_unix_nano=1641946016139533244, start_time_unix_nano=0, as_int=9000, ) ], ), ) ], ) ], ) ] ) actual = encode_metrics(metrics_data) self.assertEqual(expected, actual) def test_encode_gauge_double(self): metrics_data = MetricsData( resource_metrics=[ ResourceMetrics( resource=Resource( attributes={"a": 1, "b": False}, schema_url="resource_schema_url", ), scope_metrics=[ ScopeMetrics( scope=SDKInstrumentationScope( name="first_name", version="first_version", schema_url="instrumentation_scope_schema_url", ), metrics=[_generate_gauge("gauge_double", 52.028)], schema_url="instrumentation_scope_schema_url", ) ], schema_url="resource_schema_url", ) ] ) expected = ExportMetricsServiceRequest( resource_metrics=[ pb2.ResourceMetrics( schema_url="resource_schema_url", resource=OTLPResource( attributes=[ KeyValue(key="a", value=AnyValue(int_value=1)), KeyValue( key="b", value=AnyValue(bool_value=False) ), ] ), scope_metrics=[ pb2.ScopeMetrics( scope=InstrumentationScope( name="first_name", version="first_version" ), schema_url="instrumentation_scope_schema_url", metrics=[ pb2.Metric( name="gauge_double", unit="s", description="foo", gauge=pb2.Gauge( data_points=[ pb2.NumberDataPoint( attributes=[ KeyValue( key="a", value=AnyValue( int_value=1 ), ), KeyValue( key="b", value=AnyValue( bool_value=True ), ), ], time_unix_nano=1641946016139533244, as_double=52.028, ) ], ), ) ], ) ], ) ] ) actual = encode_metrics(metrics_data) self.assertEqual(expected, actual) def test_encode_histogram(self): metrics_data = MetricsData( resource_metrics=[ ResourceMetrics( resource=Resource( attributes={"a": 1, "b": False}, schema_url="resource_schema_url", ), scope_metrics=[ ScopeMetrics( scope=SDKInstrumentationScope( name="first_name", version="first_version", schema_url="instrumentation_scope_schema_url", ), metrics=[self.histogram], schema_url="instrumentation_scope_schema_url", ) ], schema_url="resource_schema_url", ) ] ) expected = ExportMetricsServiceRequest( resource_metrics=[ pb2.ResourceMetrics( schema_url="resource_schema_url", resource=OTLPResource( attributes=[ KeyValue(key="a", value=AnyValue(int_value=1)), KeyValue( key="b", value=AnyValue(bool_value=False) ), ] ), scope_metrics=[ pb2.ScopeMetrics( scope=InstrumentationScope( name="first_name", version="first_version" ), schema_url="instrumentation_scope_schema_url", metrics=[ pb2.Metric( name="histogram", unit="s", description="foo", histogram=pb2.Histogram( data_points=[ pb2.HistogramDataPoint( attributes=[ KeyValue( key="a", value=AnyValue( int_value=1 ), ), KeyValue( key="b", value=AnyValue( bool_value=True ), ), ], start_time_unix_nano=1641946016139533244, time_unix_nano=1641946016139533244, count=5, sum=67, bucket_counts=[1, 4], explicit_bounds=[10.0, 20.0], exemplars=[ pb2.Exemplar( time_unix_nano=1641946016139533400, as_double=298, span_id=b"n\x0cc%}\xe3L\x92", trace_id=b"\xd4\xcd\xa9[e/J\x15\x92\xb4I\xd5\x92\x9f\xda\x1b", filtered_attributes=[ KeyValue( key="filtered", value=AnyValue( string_value="banana" ), ) ], ), pb2.Exemplar( time_unix_nano=1641946016139533400, as_double=298, filtered_attributes=[ KeyValue( key="filtered", value=AnyValue( string_value="banana" ), ) ], ), ], max=18.0, min=8.0, ) ], aggregation_temporality=AggregationTemporality.DELTA, ), ) ], ) ], ) ] ) actual = encode_metrics(metrics_data) self.assertEqual(expected, actual) def test_encode_multiple_scope_histogram(self): metrics_data = MetricsData( resource_metrics=[ ResourceMetrics( resource=Resource( attributes={"a": 1, "b": False}, schema_url="resource_schema_url", ), scope_metrics=[ ScopeMetrics( scope=SDKInstrumentationScope( name="first_name", version="first_version", schema_url="instrumentation_scope_schema_url", ), metrics=[self.histogram, self.histogram], schema_url="instrumentation_scope_schema_url", ), ScopeMetrics( scope=SDKInstrumentationScope( name="second_name", version="second_version", schema_url="instrumentation_scope_schema_url", ), metrics=[self.histogram], schema_url="instrumentation_scope_schema_url", ), ScopeMetrics( scope=SDKInstrumentationScope( name="third_name", version="third_version", schema_url="instrumentation_scope_schema_url", ), metrics=[self.histogram], schema_url="instrumentation_scope_schema_url", ), ], schema_url="resource_schema_url", ) ] ) expected = ExportMetricsServiceRequest( resource_metrics=[ pb2.ResourceMetrics( schema_url="resource_schema_url", resource=OTLPResource( attributes=[ KeyValue(key="a", value=AnyValue(int_value=1)), KeyValue( key="b", value=AnyValue(bool_value=False) ), ] ), scope_metrics=[ pb2.ScopeMetrics( scope=InstrumentationScope( name="first_name", version="first_version" ), schema_url="instrumentation_scope_schema_url", metrics=[ pb2.Metric( name="histogram", unit="s", description="foo", histogram=pb2.Histogram( data_points=[ pb2.HistogramDataPoint( attributes=[ KeyValue( key="a", value=AnyValue( int_value=1 ), ), KeyValue( key="b", value=AnyValue( bool_value=True ), ), ], start_time_unix_nano=1641946016139533244, time_unix_nano=1641946016139533244, count=5, sum=67, bucket_counts=[1, 4], explicit_bounds=[10.0, 20.0], exemplars=[ pb2.Exemplar( time_unix_nano=1641946016139533400, as_double=298, span_id=b"n\x0cc%}\xe3L\x92", trace_id=b"\xd4\xcd\xa9[e/J\x15\x92\xb4I\xd5\x92\x9f\xda\x1b", filtered_attributes=[ KeyValue( key="filtered", value=AnyValue( string_value="banana" ), ) ], ), pb2.Exemplar( time_unix_nano=1641946016139533400, as_double=298, filtered_attributes=[ KeyValue( key="filtered", value=AnyValue( string_value="banana" ), ) ], ), ], max=18.0, min=8.0, ) ], aggregation_temporality=AggregationTemporality.DELTA, ), ), pb2.Metric( name="histogram", unit="s", description="foo", histogram=pb2.Histogram( data_points=[ pb2.HistogramDataPoint( attributes=[ KeyValue( key="a", value=AnyValue( int_value=1 ), ), KeyValue( key="b", value=AnyValue( bool_value=True ), ), ], start_time_unix_nano=1641946016139533244, time_unix_nano=1641946016139533244, count=5, sum=67, bucket_counts=[1, 4], explicit_bounds=[10.0, 20.0], exemplars=[ pb2.Exemplar( time_unix_nano=1641946016139533400, as_double=298, span_id=b"n\x0cc%}\xe3L\x92", trace_id=b"\xd4\xcd\xa9[e/J\x15\x92\xb4I\xd5\x92\x9f\xda\x1b", filtered_attributes=[ KeyValue( key="filtered", value=AnyValue( string_value="banana" ), ) ], ), pb2.Exemplar( time_unix_nano=1641946016139533400, as_double=298, filtered_attributes=[ KeyValue( key="filtered", value=AnyValue( string_value="banana" ), ) ], ), ], max=18.0, min=8.0, ) ], aggregation_temporality=AggregationTemporality.DELTA, ), ), ], ), pb2.ScopeMetrics( scope=InstrumentationScope( name="second_name", version="second_version" ), schema_url="instrumentation_scope_schema_url", metrics=[ pb2.Metric( name="histogram", unit="s", description="foo", histogram=pb2.Histogram( data_points=[ pb2.HistogramDataPoint( attributes=[ KeyValue( key="a", value=AnyValue( int_value=1 ), ), KeyValue( key="b", value=AnyValue( bool_value=True ), ), ], start_time_unix_nano=1641946016139533244, time_unix_nano=1641946016139533244, count=5, sum=67, bucket_counts=[1, 4], explicit_bounds=[10.0, 20.0], exemplars=[ pb2.Exemplar( time_unix_nano=1641946016139533400, as_double=298, span_id=b"n\x0cc%}\xe3L\x92", trace_id=b"\xd4\xcd\xa9[e/J\x15\x92\xb4I\xd5\x92\x9f\xda\x1b", filtered_attributes=[ KeyValue( key="filtered", value=AnyValue( string_value="banana" ), ) ], ), pb2.Exemplar( time_unix_nano=1641946016139533400, as_double=298, filtered_attributes=[ KeyValue( key="filtered", value=AnyValue( string_value="banana" ), ) ], ), ], max=18.0, min=8.0, ) ], aggregation_temporality=AggregationTemporality.DELTA, ), ) ], ), pb2.ScopeMetrics( scope=InstrumentationScope( name="third_name", version="third_version" ), schema_url="instrumentation_scope_schema_url", metrics=[ pb2.Metric( name="histogram", unit="s", description="foo", histogram=pb2.Histogram( data_points=[ pb2.HistogramDataPoint( attributes=[ KeyValue( key="a", value=AnyValue( int_value=1 ), ), KeyValue( key="b", value=AnyValue( bool_value=True ), ), ], start_time_unix_nano=1641946016139533244, time_unix_nano=1641946016139533244, count=5, sum=67, bucket_counts=[1, 4], explicit_bounds=[10.0, 20.0], exemplars=[ pb2.Exemplar( time_unix_nano=1641946016139533400, as_double=298, span_id=b"n\x0cc%}\xe3L\x92", trace_id=b"\xd4\xcd\xa9[e/J\x15\x92\xb4I\xd5\x92\x9f\xda\x1b", filtered_attributes=[ KeyValue( key="filtered", value=AnyValue( string_value="banana" ), ) ], ), pb2.Exemplar( time_unix_nano=1641946016139533400, as_double=298, filtered_attributes=[ KeyValue( key="filtered", value=AnyValue( string_value="banana" ), ) ], ), ], max=18.0, min=8.0, ) ], aggregation_temporality=AggregationTemporality.DELTA, ), ) ], ), ], ) ] ) actual = encode_metrics(metrics_data) self.assertEqual(expected, actual) def test_encode_exponential_histogram(self): exponential_histogram = Metric( name="exponential_histogram", description="description", unit="unit", data=ExponentialHistogramType( data_points=[ ExponentialHistogramDataPoint( attributes={"a": 1, "b": True}, start_time_unix_nano=0, time_unix_nano=1, count=2, sum=3, scale=4, zero_count=5, positive=Buckets(offset=6, bucket_counts=[7, 8]), negative=Buckets(offset=9, bucket_counts=[10, 11]), flags=12, min=13.0, max=14.0, ) ], aggregation_temporality=AggregationTemporality.DELTA, ), ) metrics_data = MetricsData( resource_metrics=[ ResourceMetrics( resource=Resource( attributes={"a": 1, "b": False}, schema_url="resource_schema_url", ), scope_metrics=[ ScopeMetrics( scope=SDKInstrumentationScope( name="first_name", version="first_version", schema_url="instrumentation_scope_schema_url", ), metrics=[exponential_histogram], schema_url="instrumentation_scope_schema_url", ) ], schema_url="resource_schema_url", ) ] ) expected = ExportMetricsServiceRequest( resource_metrics=[ pb2.ResourceMetrics( schema_url="resource_schema_url", resource=OTLPResource( attributes=[ KeyValue(key="a", value=AnyValue(int_value=1)), KeyValue( key="b", value=AnyValue(bool_value=False) ), ] ), scope_metrics=[ pb2.ScopeMetrics( scope=InstrumentationScope( name="first_name", version="first_version" ), schema_url="instrumentation_scope_schema_url", metrics=[ pb2.Metric( name="exponential_histogram", unit="unit", description="description", exponential_histogram=pb2.ExponentialHistogram( data_points=[ pb2.ExponentialHistogramDataPoint( attributes=[ KeyValue( key="a", value=AnyValue( int_value=1 ), ), KeyValue( key="b", value=AnyValue( bool_value=True ), ), ], start_time_unix_nano=0, time_unix_nano=1, count=2, sum=3, scale=4, zero_count=5, positive=pb2.ExponentialHistogramDataPoint.Buckets( offset=6, bucket_counts=[7, 8], ), negative=pb2.ExponentialHistogramDataPoint.Buckets( offset=9, bucket_counts=[10, 11], ), flags=12, exemplars=[], min=13.0, max=14.0, ) ], aggregation_temporality=AggregationTemporality.DELTA, ), ) ], ) ], ) ] ) # pylint: disable=protected-access actual = encode_metrics(metrics_data) self.assertEqual(expected, actual) def test_encoding_exception_reraise(self): # this number is too big to fit in a signed 64-bit proto field and causes a ValueError big_number = 2**63 metrics_data = MetricsData( resource_metrics=[ ResourceMetrics( resource=Resource( attributes={}, schema_url="resource_schema_url", ), scope_metrics=[ ScopeMetrics( scope=SDKInstrumentationScope( name="first_name", version="first_version", schema_url="instrumentation_scope_schema_url", ), metrics=[_generate_sum("sum_double", big_number)], schema_url="instrumentation_scope_schema_url", ) ], schema_url="resource_schema_url", ) ] ) with self.assertRaises(EncodingException) as context: encode_metrics(metrics_data) # assert that the EncodingException wraps the metric and original exception assert isinstance(context.exception.metric, Metric) assert isinstance(context.exception.original_exception, ValueError) def test_encode_scope_with_attributes(self): metrics_data = MetricsData( resource_metrics=[ ResourceMetrics( resource=Resource( attributes=None, schema_url="resource_schema_url", ), scope_metrics=[ ScopeMetrics( scope=SDKInstrumentationScope( name="first_name", version="first_version", schema_url="instrumentation_scope_schema_url", attributes={"one": 1, "two": "2"}, ), metrics=[_generate_sum("sum_int", 88)], schema_url="instrumentation_scope_schema_url", ) ], schema_url="resource_schema_url", ) ] ) expected = ExportMetricsServiceRequest( resource_metrics=[ pb2.ResourceMetrics( schema_url="resource_schema_url", resource=OTLPResource(), scope_metrics=[ pb2.ScopeMetrics( scope=InstrumentationScope( name="first_name", version="first_version", attributes=[ KeyValue( key="one", value=AnyValue(int_value=1) ), KeyValue( key="two", value=AnyValue(string_value="2"), ), ], ), schema_url="instrumentation_scope_schema_url", metrics=[ pb2.Metric( name="sum_int", unit="s", description="foo", sum=pb2.Sum( data_points=[ pb2.NumberDataPoint( attributes=[ KeyValue( key="a", value=AnyValue( int_value=1 ), ), KeyValue( key="b", value=AnyValue( bool_value=True ), ), ], start_time_unix_nano=1641946015139533244, time_unix_nano=1641946016139533244, as_int=88, ) ], aggregation_temporality=AggregationTemporality.CUMULATIVE, is_monotonic=True, ), ) ], ) ], ) ] ) actual = encode_metrics(metrics_data) self.assertEqual(expected, actual) test_trace_encoder.py000066400000000000000000000532271511654350100353400ustar00rootroot00000000000000python-opentelemetry-1.39.1/exporter/opentelemetry-exporter-otlp-proto-common/tests# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # pylint: disable=protected-access import unittest from typing import List, Tuple from opentelemetry.exporter.otlp.proto.common._internal import ( _encode_span_id, _encode_trace_id, ) from opentelemetry.exporter.otlp.proto.common._internal.trace_encoder import ( _SPAN_KIND_MAP, _encode_status, ) from opentelemetry.exporter.otlp.proto.common.trace_encoder import encode_spans from opentelemetry.proto.collector.trace.v1.trace_service_pb2 import ( ExportTraceServiceRequest as PB2ExportTraceServiceRequest, ) from opentelemetry.proto.common.v1.common_pb2 import AnyValue as PB2AnyValue from opentelemetry.proto.common.v1.common_pb2 import ( InstrumentationScope as PB2InstrumentationScope, ) from opentelemetry.proto.common.v1.common_pb2 import KeyValue as PB2KeyValue from opentelemetry.proto.resource.v1.resource_pb2 import ( Resource as PB2Resource, ) from opentelemetry.proto.trace.v1.trace_pb2 import ( ResourceSpans as PB2ResourceSpans, ) from opentelemetry.proto.trace.v1.trace_pb2 import ScopeSpans as PB2ScopeSpans from opentelemetry.proto.trace.v1.trace_pb2 import Span as PB2SPan from opentelemetry.proto.trace.v1.trace_pb2 import Status as PB2Status from opentelemetry.sdk.trace import Event as SDKEvent from opentelemetry.sdk.trace import Resource as SDKResource from opentelemetry.sdk.trace import SpanContext as SDKSpanContext from opentelemetry.sdk.trace import _Span as SDKSpan from opentelemetry.sdk.util.instrumentation import ( InstrumentationScope as SDKInstrumentationScope, ) from opentelemetry.trace import Link as SDKLink from opentelemetry.trace import SpanKind as SDKSpanKind from opentelemetry.trace import TraceFlags as SDKTraceFlags from opentelemetry.trace.status import Status as SDKStatus from opentelemetry.trace.status import StatusCode as SDKStatusCode class TestOTLPTraceEncoder(unittest.TestCase): def test_encode_spans(self): otel_spans, expected_encoding = self.get_exhaustive_test_spans() self.assertEqual(encode_spans(otel_spans), expected_encoding) @staticmethod def get_exhaustive_otel_span_list() -> List[SDKSpan]: trace_id = 0x3E0C63257DE34C926F9EFCD03927272E base_time = 683647322 * 10**9 # in ns start_times = ( base_time, base_time + 150 * 10**6, base_time + 300 * 10**6, base_time + 400 * 10**6, base_time + 500 * 10**6, base_time + 600 * 10**6, ) end_times = ( start_times[0] + (50 * 10**6), start_times[1] + (100 * 10**6), start_times[2] + (200 * 10**6), start_times[3] + (300 * 10**6), start_times[4] + (400 * 10**6), start_times[5] + (500 * 10**6), ) parent_span_context = SDKSpanContext( trace_id, 0x1111111111111111, is_remote=True ) other_context = SDKSpanContext( trace_id, 0x2222222222222222, is_remote=False ) span1 = SDKSpan( name="test-span-1", context=SDKSpanContext( trace_id, 0x34BF92DEEFC58C92, is_remote=False, trace_flags=SDKTraceFlags(SDKTraceFlags.SAMPLED), ), parent=parent_span_context, events=( SDKEvent( name="event0", timestamp=base_time + 50 * 10**6, attributes={ "annotation_bool": True, "annotation_string": "annotation_test", "key_float": 0.3, }, ), ), links=( SDKLink(context=other_context, attributes={"key_bool": True}), ), resource=SDKResource({}, "resource_schema_url"), ) span1.start(start_time=start_times[0]) span1.set_attribute("key_bool", False) span1.set_attribute("key_string", "hello_world") span1.set_attribute("key_float", 111.22) span1.set_status(SDKStatus(SDKStatusCode.ERROR, "Example description")) span1.end(end_time=end_times[0]) span2 = SDKSpan( name="test-span-2", context=parent_span_context, parent=None, resource=SDKResource(attributes={"key_resource": "some_resource"}), ) span2.start(start_time=start_times[1]) span2.end(end_time=end_times[1]) span3 = SDKSpan( name="test-span-3", context=other_context, parent=None, resource=SDKResource(attributes={"key_resource": "some_resource"}), ) span3.start(start_time=start_times[2]) span3.set_attribute("key_string", "hello_world") span3.end(end_time=end_times[2]) span4 = SDKSpan( name="test-span-4", context=other_context, parent=None, resource=SDKResource({}, "resource_schema_url"), instrumentation_scope=SDKInstrumentationScope( name="name", version="version" ), ) span4.start(start_time=start_times[3]) span4.end(end_time=end_times[3]) span5 = SDKSpan( name="test-span-5", context=other_context, parent=None, resource=SDKResource( attributes={"key_resource": "another_resource"}, schema_url="resource_schema_url", ), instrumentation_scope=SDKInstrumentationScope( name="scope_1_name", version="scope_1_version", schema_url="scope_1_schema_url", ), ) span5.start(start_time=start_times[4]) span5.end(end_time=end_times[4]) span6 = SDKSpan( name="test-span-6", context=other_context, parent=None, resource=SDKResource( attributes={"key_resource": "another_resource"}, schema_url="resource_schema_url", ), instrumentation_scope=SDKInstrumentationScope( name="scope_2_name", version="scope_2_version", schema_url="scope_2_schema_url", attributes={"one": "1", "two": 2}, ), ) span6.start(start_time=start_times[5]) span6.end(end_time=end_times[5]) return [span1, span2, span3, span4, span5, span6] def get_exhaustive_test_spans( self, ) -> Tuple[List[SDKSpan], PB2ExportTraceServiceRequest]: otel_spans = self.get_exhaustive_otel_span_list() trace_id = _encode_trace_id(otel_spans[0].context.trace_id) span_kind = _SPAN_KIND_MAP[SDKSpanKind.INTERNAL] pb2_service_request = PB2ExportTraceServiceRequest( resource_spans=[ PB2ResourceSpans( schema_url="resource_schema_url", resource=PB2Resource(), scope_spans=[ PB2ScopeSpans( scope=PB2InstrumentationScope(), spans=[ PB2SPan( trace_id=trace_id, span_id=_encode_span_id( otel_spans[0].context.span_id ), trace_state=None, parent_span_id=_encode_span_id( otel_spans[0].parent.span_id ), name=otel_spans[0].name, kind=span_kind, start_time_unix_nano=otel_spans[ 0 ].start_time, end_time_unix_nano=otel_spans[0].end_time, attributes=[ PB2KeyValue( key="key_bool", value=PB2AnyValue( bool_value=False ), ), PB2KeyValue( key="key_string", value=PB2AnyValue( string_value="hello_world" ), ), PB2KeyValue( key="key_float", value=PB2AnyValue( double_value=111.22 ), ), ], events=[ PB2SPan.Event( name="event0", time_unix_nano=otel_spans[0] .events[0] .timestamp, attributes=[ PB2KeyValue( key="annotation_bool", value=PB2AnyValue( bool_value=True ), ), PB2KeyValue( key="annotation_string", value=PB2AnyValue( string_value="annotation_test" ), ), PB2KeyValue( key="key_float", value=PB2AnyValue( double_value=0.3 ), ), ], ) ], links=[ PB2SPan.Link( trace_id=_encode_trace_id( otel_spans[0] .links[0] .context.trace_id ), span_id=_encode_span_id( otel_spans[0] .links[0] .context.span_id ), attributes=[ PB2KeyValue( key="key_bool", value=PB2AnyValue( bool_value=True ), ), ], flags=0x100, ) ], status=PB2Status( code=SDKStatusCode.ERROR.value, message="Example description", ), flags=0x300, ) ], ), PB2ScopeSpans( scope=PB2InstrumentationScope( name="name", version="version", ), spans=[ PB2SPan( trace_id=trace_id, span_id=_encode_span_id( otel_spans[3].context.span_id ), trace_state=None, parent_span_id=None, name=otel_spans[3].name, kind=span_kind, start_time_unix_nano=otel_spans[ 3 ].start_time, end_time_unix_nano=otel_spans[3].end_time, attributes=None, events=None, links=None, status={}, flags=0x100, ) ], ), ], ), PB2ResourceSpans( resource=PB2Resource( attributes=[ PB2KeyValue( key="key_resource", value=PB2AnyValue( string_value="some_resource" ), ) ] ), scope_spans=[ PB2ScopeSpans( scope=PB2InstrumentationScope(), spans=[ PB2SPan( trace_id=trace_id, span_id=_encode_span_id( otel_spans[1].context.span_id ), trace_state=None, parent_span_id=None, name=otel_spans[1].name, kind=span_kind, start_time_unix_nano=otel_spans[ 1 ].start_time, end_time_unix_nano=otel_spans[1].end_time, attributes=None, events=None, links=None, status={}, flags=0x100, ), PB2SPan( trace_id=trace_id, span_id=_encode_span_id( otel_spans[2].context.span_id ), trace_state=None, parent_span_id=None, name=otel_spans[2].name, kind=span_kind, start_time_unix_nano=otel_spans[ 2 ].start_time, end_time_unix_nano=otel_spans[2].end_time, attributes=[ PB2KeyValue( key="key_string", value=PB2AnyValue( string_value="hello_world" ), ), ], events=None, links=None, status={}, flags=0x100, ), ], ) ], ), PB2ResourceSpans( resource=PB2Resource( attributes=[ PB2KeyValue( key="key_resource", value=PB2AnyValue( string_value="another_resource" ), ), ], ), schema_url="resource_schema_url", scope_spans=[ PB2ScopeSpans( scope=PB2InstrumentationScope( name="scope_1_name", version="scope_1_version" ), schema_url="scope_1_schema_url", spans=[ PB2SPan( trace_id=trace_id, span_id=_encode_span_id( otel_spans[4].context.span_id ), trace_state=None, parent_span_id=None, name=otel_spans[4].name, kind=span_kind, start_time_unix_nano=otel_spans[ 4 ].start_time, end_time_unix_nano=otel_spans[4].end_time, attributes=None, events=None, links=None, status={}, flags=0x100, ), ], ), PB2ScopeSpans( scope=PB2InstrumentationScope( name="scope_2_name", version="scope_2_version", attributes=[ PB2KeyValue( key="one", value=PB2AnyValue(string_value="1"), ), PB2KeyValue( key="two", value=PB2AnyValue(int_value=2), ), ], ), schema_url="scope_2_schema_url", spans=[ PB2SPan( trace_id=trace_id, span_id=_encode_span_id( otel_spans[5].context.span_id ), trace_state=None, parent_span_id=None, name=otel_spans[5].name, kind=span_kind, start_time_unix_nano=otel_spans[ 5 ].start_time, end_time_unix_nano=otel_spans[5].end_time, attributes=None, events=None, links=None, status={}, flags=0x100, ), ], ), ], ), ] ) return otel_spans, pb2_service_request def test_encode_status_code_translations(self): self.assertEqual( _encode_status(SDKStatus(status_code=SDKStatusCode.UNSET)), PB2Status( code=SDKStatusCode.UNSET.value, ), ) self.assertEqual( _encode_status(SDKStatus(status_code=SDKStatusCode.OK)), PB2Status( code=SDKStatusCode.OK.value, ), ) self.assertEqual( _encode_status(SDKStatus(status_code=SDKStatusCode.ERROR)), PB2Status( code=SDKStatusCode.ERROR.value, ), ) python-opentelemetry-1.39.1/exporter/opentelemetry-exporter-otlp-proto-grpc/000077500000000000000000000000001511654350100275015ustar00rootroot00000000000000python-opentelemetry-1.39.1/exporter/opentelemetry-exporter-otlp-proto-grpc/LICENSE000066400000000000000000000261351511654350100305150ustar00rootroot00000000000000 Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. Copyright [yyyy] [name of copyright owner] Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. python-opentelemetry-1.39.1/exporter/opentelemetry-exporter-otlp-proto-grpc/README.rst000066400000000000000000000016041511654350100311710ustar00rootroot00000000000000OpenTelemetry Collector Protobuf over gRPC Exporter =================================================== |pypi| .. |pypi| image:: https://badge.fury.io/py/opentelemetry-exporter-otlp-proto-grpc.svg :target: https://pypi.org/project/opentelemetry-exporter-otlp-proto-grpc/ This library allows to export data to the OpenTelemetry Collector using the OpenTelemetry Protocol using Protobuf over gRPC. Installation ------------ :: pip install opentelemetry-exporter-otlp-proto-grpc References ---------- * `OpenTelemetry Collector Exporter `_ * `OpenTelemetry Collector `_ * `OpenTelemetry `_ * `OpenTelemetry Protocol Specification `_ benchmark-requirements.txt000066400000000000000000000000301511654350100346270ustar00rootroot00000000000000python-opentelemetry-1.39.1/exporter/opentelemetry-exporter-otlp-proto-grpcpytest-benchmark==4.0.0 python-opentelemetry-1.39.1/exporter/opentelemetry-exporter-otlp-proto-grpc/benchmarks/000077500000000000000000000000001511654350100316165ustar00rootroot00000000000000test_benchmark_trace_exporter.py000066400000000000000000000053561511654350100402210ustar00rootroot00000000000000python-opentelemetry-1.39.1/exporter/opentelemetry-exporter-otlp-proto-grpc/benchmarks# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # pylint: disable=invalid-name from unittest.mock import patch from opentelemetry.exporter.otlp.proto.grpc.trace_exporter import ( OTLPSpanExporter, ) from opentelemetry.sdk.trace import TracerProvider, sampling from opentelemetry.sdk.trace.export import ( BatchSpanProcessor, SimpleSpanProcessor, ) def get_tracer_with_processor(span_processor_class): span_processor = span_processor_class(OTLPSpanExporter()) tracer = TracerProvider( active_span_processor=span_processor, sampler=sampling.DEFAULT_ON, ).get_tracer("pipeline_benchmark_tracer") return tracer class MockTraceServiceStub: def __init__(self, channel): self.Export = lambda *args, **kwargs: None @patch( "opentelemetry.exporter.otlp.proto.grpc.trace_exporter.OTLPSpanExporter._stub", new=MockTraceServiceStub, ) def test_simple_span_processor(benchmark): tracer = get_tracer_with_processor(SimpleSpanProcessor) def create_spans_to_be_exported(): span = tracer.start_span( "benchmarkedSpan", ) for i in range(10): span.set_attribute( f"benchmarkAttribute_{i}", f"benchmarkAttrValue_{i}", ) span.end() benchmark(create_spans_to_be_exported) @patch( "opentelemetry.exporter.otlp.proto.grpc.trace_exporter.OTLPSpanExporter._stub", new=MockTraceServiceStub, ) def test_batch_span_processor(benchmark): """Runs benchmark tests using BatchSpanProcessor. One particular call by pytest-benchmark will be much more expensive since the batch export thread will activate and consume a lot of CPU to process all the spans. For this reason, focus on the average measurement. Do not focus on the min/max measurements which will be misleading. """ tracer = get_tracer_with_processor(BatchSpanProcessor) def create_spans_to_be_exported(): span = tracer.start_span( "benchmarkedSpan", ) for i in range(10): span.set_attribute( f"benchmarkAttribute_{i}", f"benchmarkAttrValue_{i}", ) span.end() benchmark(create_spans_to_be_exported) python-opentelemetry-1.39.1/exporter/opentelemetry-exporter-otlp-proto-grpc/pyproject.toml000066400000000000000000000042611511654350100324200ustar00rootroot00000000000000[build-system] requires = ["hatchling"] build-backend = "hatchling.build" [project] name = "opentelemetry-exporter-otlp-proto-grpc" dynamic = ["version"] description = "OpenTelemetry Collector Protobuf over gRPC Exporter" readme = "README.rst" license = "Apache-2.0" requires-python = ">=3.9" authors = [ { name = "OpenTelemetry Authors", email = "cncf-opentelemetry-contributors@lists.cncf.io" }, ] classifiers = [ "Development Status :: 5 - Production/Stable", "Framework :: OpenTelemetry", "Framework :: OpenTelemetry :: Exporters", "Intended Audience :: Developers", "Programming Language :: Python", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.9", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", "Programming Language :: Python :: 3.12", "Programming Language :: Python :: 3.13", ] dependencies = [ "googleapis-common-protos ~= 1.57", "grpcio >= 1.63.2, < 2.0.0; python_version < '3.13'", "grpcio >= 1.66.2, < 2.0.0; python_version >= '3.13'", "opentelemetry-api ~= 1.15", "opentelemetry-proto == 1.39.1", "opentelemetry-sdk ~= 1.39.1", "opentelemetry-exporter-otlp-proto-common == 1.39.1", "typing-extensions >= 4.6.0", ] [project.entry-points.opentelemetry_logs_exporter] otlp_proto_grpc = "opentelemetry.exporter.otlp.proto.grpc._log_exporter:OTLPLogExporter" [project.entry-points.opentelemetry_metrics_exporter] otlp_proto_grpc = "opentelemetry.exporter.otlp.proto.grpc.metric_exporter:OTLPMetricExporter" [project.entry-points.opentelemetry_traces_exporter] otlp_proto_grpc = "opentelemetry.exporter.otlp.proto.grpc.trace_exporter:OTLPSpanExporter" [project.urls] Homepage = "https://github.com/open-telemetry/opentelemetry-python/tree/main/exporter/opentelemetry-exporter-otlp-proto-grpc" Repository = "https://github.com/open-telemetry/opentelemetry-python" [project.optional-dependencies] gcp-auth = [ "opentelemetry-exporter-credential-provider-gcp >= 0.59b0", ] [tool.hatch.version] path = "src/opentelemetry/exporter/otlp/proto/grpc/version/__init__.py" [tool.hatch.build.targets.sdist] include = [ "/src", "/tests", ] [tool.hatch.build.targets.wheel] packages = ["src/opentelemetry"] python-opentelemetry-1.39.1/exporter/opentelemetry-exporter-otlp-proto-grpc/src/000077500000000000000000000000001511654350100302705ustar00rootroot00000000000000python-opentelemetry-1.39.1/exporter/opentelemetry-exporter-otlp-proto-grpc/src/opentelemetry/000077500000000000000000000000001511654350100331645ustar00rootroot00000000000000exporter/000077500000000000000000000000001511654350100347555ustar00rootroot00000000000000python-opentelemetry-1.39.1/exporter/opentelemetry-exporter-otlp-proto-grpc/src/opentelemetryotlp/000077500000000000000000000000001511654350100357335ustar00rootroot00000000000000python-opentelemetry-1.39.1/exporter/opentelemetry-exporter-otlp-proto-grpc/src/opentelemetry/exporterproto/000077500000000000000000000000001511654350100370765ustar00rootroot00000000000000python-opentelemetry-1.39.1/exporter/opentelemetry-exporter-otlp-proto-grpc/src/opentelemetry/exporter/otlpgrpc/000077500000000000000000000000001511654350100400315ustar00rootroot00000000000000python-opentelemetry-1.39.1/exporter/opentelemetry-exporter-otlp-proto-grpc/src/opentelemetry/exporter/otlp/proto__init__.py000066400000000000000000000052101511654350100421400ustar00rootroot00000000000000python-opentelemetry-1.39.1/exporter/opentelemetry-exporter-otlp-proto-grpc/src/opentelemetry/exporter/otlp/proto/grpc# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ This library allows to export tracing data to an OTLP collector. Usage ----- The **OTLP Span Exporter** allows to export `OpenTelemetry`_ traces to the `OTLP`_ collector. You can configure the exporter with the following environment variables: - :envvar:`OTEL_EXPORTER_OTLP_TRACES_TIMEOUT` - :envvar:`OTEL_EXPORTER_OTLP_TRACES_PROTOCOL` - :envvar:`OTEL_EXPORTER_OTLP_TRACES_HEADERS` - :envvar:`OTEL_EXPORTER_OTLP_TRACES_ENDPOINT` - :envvar:`OTEL_EXPORTER_OTLP_TRACES_COMPRESSION` - :envvar:`OTEL_EXPORTER_OTLP_TRACES_CERTIFICATE` - :envvar:`OTEL_EXPORTER_OTLP_TIMEOUT` - :envvar:`OTEL_EXPORTER_OTLP_PROTOCOL` - :envvar:`OTEL_EXPORTER_OTLP_HEADERS` - :envvar:`OTEL_EXPORTER_OTLP_ENDPOINT` - :envvar:`OTEL_EXPORTER_OTLP_COMPRESSION` - :envvar:`OTEL_EXPORTER_OTLP_CERTIFICATE` .. _OTLP: https://github.com/open-telemetry/opentelemetry-collector/ .. _OpenTelemetry: https://github.com/open-telemetry/opentelemetry-python/ .. code:: python from opentelemetry import trace from opentelemetry.exporter.otlp.proto.grpc.trace_exporter import OTLPSpanExporter from opentelemetry.sdk.resources import Resource from opentelemetry.sdk.trace import TracerProvider from opentelemetry.sdk.trace.export import BatchSpanProcessor # Resource can be required for some backends, e.g. Jaeger # If resource wouldn't be set - traces wouldn't appears in Jaeger resource = Resource(attributes={ "service.name": "service" }) trace.set_tracer_provider(TracerProvider(resource=resource)) tracer = trace.get_tracer(__name__) otlp_exporter = OTLPSpanExporter(endpoint="http://localhost:4317", insecure=True) span_processor = BatchSpanProcessor(otlp_exporter) trace.get_tracer_provider().add_span_processor(span_processor) with tracer.start_as_current_span("foo"): print("Hello world!") API --- """ from .version import __version__ _USER_AGENT_HEADER_VALUE = "OTel-OTLP-Exporter-Python/" + __version__ _OTLP_GRPC_CHANNEL_OPTIONS = [ # this will appear in the http User-Agent header ("grpc.primary_user_agent", _USER_AGENT_HEADER_VALUE) ] _log_exporter/000077500000000000000000000000001511654350100427015ustar00rootroot00000000000000python-opentelemetry-1.39.1/exporter/opentelemetry-exporter-otlp-proto-grpc/src/opentelemetry/exporter/otlp/proto/grpc__init__.py000066400000000000000000000111461511654350100450150ustar00rootroot00000000000000python-opentelemetry-1.39.1/exporter/opentelemetry-exporter-otlp-proto-grpc/src/opentelemetry/exporter/otlp/proto/grpc/_log_exporter# Copyright The OpenTelemetry Authors # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from os import environ from typing import Dict, Literal, Optional, Sequence, Tuple, Union from typing import Sequence as TypingSequence from grpc import ChannelCredentials, Compression from opentelemetry.exporter.otlp.proto.common._log_encoder import encode_logs from opentelemetry.exporter.otlp.proto.grpc.exporter import ( OTLPExporterMixin, _get_credentials, environ_to_compression, ) from opentelemetry.proto.collector.logs.v1.logs_service_pb2 import ( ExportLogsServiceRequest, ) from opentelemetry.proto.collector.logs.v1.logs_service_pb2_grpc import ( LogsServiceStub, ) from opentelemetry.sdk._logs import ReadableLogRecord from opentelemetry.sdk._logs.export import ( LogRecordExporter, LogRecordExportResult, ) from opentelemetry.sdk.environment_variables import ( _OTEL_PYTHON_EXPORTER_OTLP_GRPC_LOGS_CREDENTIAL_PROVIDER, OTEL_EXPORTER_OTLP_LOGS_CERTIFICATE, OTEL_EXPORTER_OTLP_LOGS_CLIENT_CERTIFICATE, OTEL_EXPORTER_OTLP_LOGS_CLIENT_KEY, OTEL_EXPORTER_OTLP_LOGS_COMPRESSION, OTEL_EXPORTER_OTLP_LOGS_ENDPOINT, OTEL_EXPORTER_OTLP_LOGS_HEADERS, OTEL_EXPORTER_OTLP_LOGS_INSECURE, OTEL_EXPORTER_OTLP_LOGS_TIMEOUT, ) class OTLPLogExporter( LogRecordExporter, OTLPExporterMixin[ Sequence[ReadableLogRecord], ExportLogsServiceRequest, LogRecordExportResult, LogsServiceStub, ], ): def __init__( self, endpoint: Optional[str] = None, insecure: Optional[bool] = None, credentials: Optional[ChannelCredentials] = None, headers: Optional[ Union[TypingSequence[Tuple[str, str]], Dict[str, str], str] ] = None, timeout: Optional[float] = None, compression: Optional[Compression] = None, channel_options: Optional[Tuple[Tuple[str, str]]] = None, ): insecure_logs = environ.get(OTEL_EXPORTER_OTLP_LOGS_INSECURE) if insecure is None and insecure_logs is not None: insecure = insecure_logs.lower() == "true" if ( not insecure and environ.get(OTEL_EXPORTER_OTLP_LOGS_CERTIFICATE) is not None ): credentials = _get_credentials( credentials, _OTEL_PYTHON_EXPORTER_OTLP_GRPC_LOGS_CREDENTIAL_PROVIDER, OTEL_EXPORTER_OTLP_LOGS_CERTIFICATE, OTEL_EXPORTER_OTLP_LOGS_CLIENT_KEY, OTEL_EXPORTER_OTLP_LOGS_CLIENT_CERTIFICATE, ) environ_timeout = environ.get(OTEL_EXPORTER_OTLP_LOGS_TIMEOUT) environ_timeout = ( float(environ_timeout) if environ_timeout is not None else None ) compression = ( environ_to_compression(OTEL_EXPORTER_OTLP_LOGS_COMPRESSION) if compression is None else compression ) OTLPExporterMixin.__init__( self, endpoint=endpoint or environ.get(OTEL_EXPORTER_OTLP_LOGS_ENDPOINT), insecure=insecure, credentials=credentials, headers=headers or environ.get(OTEL_EXPORTER_OTLP_LOGS_HEADERS), timeout=timeout or environ_timeout, compression=compression, stub=LogsServiceStub, result=LogRecordExportResult, channel_options=channel_options, ) def _translate_data( self, data: Sequence[ReadableLogRecord] ) -> ExportLogsServiceRequest: return encode_logs(data) def export( # type: ignore [reportIncompatibleMethodOverride] self, batch: Sequence[ReadableLogRecord], ) -> Literal[LogRecordExportResult.SUCCESS, LogRecordExportResult.FAILURE]: return OTLPExporterMixin._export(self, batch) def shutdown(self, timeout_millis: float = 30_000, **kwargs) -> None: OTLPExporterMixin.shutdown(self, timeout_millis=timeout_millis) def force_flush(self, timeout_millis: float = 10_000) -> bool: """Nothing is buffered in this exporter, so this method does nothing.""" return True @property def _exporting(self) -> str: return "logs" exporter.py000066400000000000000000000366221511654350100422640ustar00rootroot00000000000000python-opentelemetry-1.39.1/exporter/opentelemetry-exporter-otlp-proto-grpc/src/opentelemetry/exporter/otlp/proto/grpc# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """OTLP Exporter""" import random import threading from abc import ABC, abstractmethod from collections.abc import Sequence # noqa: F401 from logging import getLogger from os import environ from time import time from typing import ( # noqa: F401 Any, Callable, Dict, Generic, List, Literal, NewType, Optional, Tuple, Type, TypeVar, Union, ) from typing import Sequence as TypingSequence from urllib.parse import urlparse from google.rpc.error_details_pb2 import RetryInfo from typing_extensions import deprecated from grpc import ( ChannelCredentials, Compression, RpcError, StatusCode, insecure_channel, secure_channel, ssl_channel_credentials, ) from opentelemetry.exporter.otlp.proto.common._internal import ( _get_resource_data, ) from opentelemetry.exporter.otlp.proto.grpc import ( _OTLP_GRPC_CHANNEL_OPTIONS, ) from opentelemetry.proto.collector.logs.v1.logs_service_pb2 import ( ExportLogsServiceRequest, ) from opentelemetry.proto.collector.logs.v1.logs_service_pb2_grpc import ( LogsServiceStub, ) from opentelemetry.proto.collector.metrics.v1.metrics_service_pb2 import ( ExportMetricsServiceRequest, ) from opentelemetry.proto.collector.metrics.v1.metrics_service_pb2_grpc import ( MetricsServiceStub, ) from opentelemetry.proto.collector.trace.v1.trace_service_pb2 import ( ExportTraceServiceRequest, ) from opentelemetry.proto.collector.trace.v1.trace_service_pb2_grpc import ( TraceServiceStub, ) from opentelemetry.proto.common.v1.common_pb2 import ( # noqa: F401 AnyValue, ArrayValue, KeyValue, ) from opentelemetry.proto.resource.v1.resource_pb2 import Resource # noqa: F401 from opentelemetry.sdk._logs import ReadableLogRecord from opentelemetry.sdk._logs.export import LogRecordExportResult from opentelemetry.sdk._shared_internal import DuplicateFilter from opentelemetry.sdk.environment_variables import ( _OTEL_PYTHON_EXPORTER_OTLP_GRPC_CREDENTIAL_PROVIDER, OTEL_EXPORTER_OTLP_CERTIFICATE, OTEL_EXPORTER_OTLP_CLIENT_CERTIFICATE, OTEL_EXPORTER_OTLP_CLIENT_KEY, OTEL_EXPORTER_OTLP_COMPRESSION, OTEL_EXPORTER_OTLP_ENDPOINT, OTEL_EXPORTER_OTLP_HEADERS, OTEL_EXPORTER_OTLP_INSECURE, OTEL_EXPORTER_OTLP_TIMEOUT, ) from opentelemetry.sdk.metrics.export import MetricExportResult, MetricsData from opentelemetry.sdk.resources import Resource as SDKResource from opentelemetry.sdk.trace import ReadableSpan from opentelemetry.sdk.trace.export import SpanExportResult from opentelemetry.util._importlib_metadata import entry_points from opentelemetry.util.re import parse_env_headers _RETRYABLE_ERROR_CODES = frozenset( [ StatusCode.CANCELLED, StatusCode.DEADLINE_EXCEEDED, StatusCode.RESOURCE_EXHAUSTED, StatusCode.ABORTED, StatusCode.OUT_OF_RANGE, StatusCode.UNAVAILABLE, StatusCode.DATA_LOSS, ] ) _MAX_RETRYS = 6 logger = getLogger(__name__) # This prevents logs generated when a log fails to be written to generate another log which fails to be written etc. etc. logger.addFilter(DuplicateFilter()) SDKDataT = TypeVar( "SDKDataT", TypingSequence[ReadableLogRecord], MetricsData, TypingSequence[ReadableSpan], ) ResourceDataT = TypeVar("ResourceDataT") TypingResourceT = TypeVar("TypingResourceT") ExportServiceRequestT = TypeVar( "ExportServiceRequestT", ExportTraceServiceRequest, ExportMetricsServiceRequest, ExportLogsServiceRequest, ) ExportResultT = TypeVar( "ExportResultT", LogRecordExportResult, MetricExportResult, SpanExportResult, ) ExportStubT = TypeVar( "ExportStubT", TraceServiceStub, MetricsServiceStub, LogsServiceStub ) _ENVIRON_TO_COMPRESSION = { None: None, "gzip": Compression.Gzip, } class InvalidCompressionValueException(Exception): def __init__(self, environ_key: str, environ_value: str): super().__init__( 'Invalid value "{}" for compression envvar {}'.format( environ_value, environ_key ) ) def environ_to_compression(environ_key: str) -> Optional[Compression]: environ_value = ( environ[environ_key].lower().strip() if environ_key in environ else None ) if ( environ_value not in _ENVIRON_TO_COMPRESSION and environ_value is not None ): raise InvalidCompressionValueException(environ_key, environ_value) return _ENVIRON_TO_COMPRESSION[environ_value] @deprecated( "Use one of the encoders from opentelemetry-exporter-otlp-proto-common instead. Deprecated since version 1.18.0.", ) def get_resource_data( sdk_resource_scope_data: Dict[SDKResource, ResourceDataT], resource_class: Callable[..., TypingResourceT], name: str, ) -> List[TypingResourceT]: return _get_resource_data(sdk_resource_scope_data, resource_class, name) def _read_file(file_path: str) -> Optional[bytes]: try: with open(file_path, "rb") as file: return file.read() except FileNotFoundError as e: logger.exception( "Failed to read file: %s. Please check if the file exists and is accessible.", e.filename, ) return None def _load_credentials( certificate_file: Optional[str], client_key_file: Optional[str], client_certificate_file: Optional[str], ) -> ChannelCredentials: root_certificates = ( _read_file(certificate_file) if certificate_file else None ) private_key = _read_file(client_key_file) if client_key_file else None certificate_chain = ( _read_file(client_certificate_file) if client_certificate_file else None ) return ssl_channel_credentials( root_certificates=root_certificates, private_key=private_key, certificate_chain=certificate_chain, ) def _get_credentials( creds: Optional[ChannelCredentials], credential_entry_point_env_key: str, certificate_file_env_key: str, client_key_file_env_key: str, client_certificate_file_env_key: str, ) -> ChannelCredentials: if creds is not None: return creds _credential_env = environ.get(credential_entry_point_env_key) if _credential_env: try: maybe_channel_creds = next( iter( entry_points( group="opentelemetry_otlp_credential_provider", name=_credential_env, ) ) ).load()() except StopIteration: raise RuntimeError( f"Requested component '{_credential_env}' not found in " f"entry point 'opentelemetry_otlp_credential_provider'" ) if isinstance(maybe_channel_creds, ChannelCredentials): return maybe_channel_creds else: raise RuntimeError( f"Requested component '{_credential_env}' is of type {type(maybe_channel_creds)}" f" must be of type `grpc.ChannelCredentials`." ) certificate_file = environ.get(certificate_file_env_key) if certificate_file: client_key_file = environ.get(client_key_file_env_key) client_certificate_file = environ.get(client_certificate_file_env_key) return _load_credentials( certificate_file, client_key_file, client_certificate_file ) return ssl_channel_credentials() # pylint: disable=no-member class OTLPExporterMixin( ABC, Generic[SDKDataT, ExportServiceRequestT, ExportResultT, ExportStubT] ): """OTLP span exporter Args: endpoint: OpenTelemetry Collector receiver endpoint insecure: Connection type credentials: ChannelCredentials object for server authentication headers: Headers to send when exporting timeout: Backend request timeout in seconds compression: gRPC compression method to use channel_options: gRPC channel options """ def __init__( self, stub: ExportStubT, result: ExportResultT, endpoint: Optional[str] = None, insecure: Optional[bool] = None, credentials: Optional[ChannelCredentials] = None, headers: Optional[ Union[TypingSequence[Tuple[str, str]], Dict[str, str], str] ] = None, timeout: Optional[float] = None, compression: Optional[Compression] = None, channel_options: Optional[Tuple[Tuple[str, str]]] = None, ): super().__init__() self._result = result self._stub = stub self._endpoint = endpoint or environ.get( OTEL_EXPORTER_OTLP_ENDPOINT, "http://localhost:4317" ) parsed_url = urlparse(self._endpoint) if parsed_url.scheme == "https": insecure = False insecure_exporter = environ.get(OTEL_EXPORTER_OTLP_INSECURE) if insecure is None: if insecure_exporter is not None: insecure = insecure_exporter.lower() == "true" else: insecure = parsed_url.scheme == "http" if parsed_url.netloc: self._endpoint = parsed_url.netloc self._headers = headers or environ.get(OTEL_EXPORTER_OTLP_HEADERS) if isinstance(self._headers, str): temp_headers = parse_env_headers(self._headers, liberal=True) self._headers = tuple(temp_headers.items()) elif isinstance(self._headers, dict): self._headers = tuple(self._headers.items()) if self._headers is None: self._headers = tuple() if channel_options: # merge the default channel options with the one passed as parameter overridden_options = { opt_name for (opt_name, _) in channel_options } default_options = tuple( (opt_name, opt_value) for opt_name, opt_value in _OTLP_GRPC_CHANNEL_OPTIONS if opt_name not in overridden_options ) self._channel_options = default_options + channel_options else: self._channel_options = tuple(_OTLP_GRPC_CHANNEL_OPTIONS) self._timeout = timeout or float( environ.get(OTEL_EXPORTER_OTLP_TIMEOUT, 10) ) self._collector_kwargs = None compression = ( environ_to_compression(OTEL_EXPORTER_OTLP_COMPRESSION) if compression is None else compression ) or Compression.NoCompression if insecure: self._channel = insecure_channel( self._endpoint, compression=compression, options=self._channel_options, ) else: self._credentials = _get_credentials( credentials, _OTEL_PYTHON_EXPORTER_OTLP_GRPC_CREDENTIAL_PROVIDER, OTEL_EXPORTER_OTLP_CERTIFICATE, OTEL_EXPORTER_OTLP_CLIENT_KEY, OTEL_EXPORTER_OTLP_CLIENT_CERTIFICATE, ) self._channel = secure_channel( self._endpoint, self._credentials, compression=compression, options=self._channel_options, ) self._client = self._stub(self._channel) # type: ignore [reportCallIssue] self._shutdown_in_progress = threading.Event() self._shutdown = False @abstractmethod def _translate_data( self, data: SDKDataT, ) -> ExportServiceRequestT: pass def _export( self, data: SDKDataT, ) -> ExportResultT: if self._shutdown: logger.warning("Exporter already shutdown, ignoring batch") return self._result.FAILURE # type: ignore [reportReturnType] # FIXME remove this check if the export type for traces # gets updated to a class that represents the proto # TracesData and use the code below instead. deadline_sec = time() + self._timeout for retry_num in range(_MAX_RETRYS): try: self._client.Export( request=self._translate_data(data), metadata=self._headers, timeout=deadline_sec - time(), ) return self._result.SUCCESS # type: ignore [reportReturnType] except RpcError as error: retry_info_bin = dict(error.trailing_metadata()).get( # type: ignore [reportAttributeAccessIssue] "google.rpc.retryinfo-bin" # type: ignore [reportArgumentType] ) # multiplying by a random number between .8 and 1.2 introduces a +/20% jitter to each backoff. backoff_seconds = 2**retry_num * random.uniform(0.8, 1.2) if retry_info_bin is not None: retry_info = RetryInfo() retry_info.ParseFromString(retry_info_bin) backoff_seconds = ( retry_info.retry_delay.seconds + retry_info.retry_delay.nanos / 1.0e9 ) if ( error.code() not in _RETRYABLE_ERROR_CODES # type: ignore [reportAttributeAccessIssue] or retry_num + 1 == _MAX_RETRYS or backoff_seconds > (deadline_sec - time()) or self._shutdown ): logger.error( "Failed to export %s to %s, error code: %s", self._exporting, self._endpoint, error.code(), # type: ignore [reportAttributeAccessIssue] exc_info=error.code() == StatusCode.UNKNOWN, # type: ignore [reportAttributeAccessIssue] ) return self._result.FAILURE # type: ignore [reportReturnType] logger.warning( "Transient error %s encountered while exporting %s to %s, retrying in %.2fs.", error.code(), # type: ignore [reportAttributeAccessIssue] self._exporting, self._endpoint, backoff_seconds, ) shutdown = self._shutdown_in_progress.wait(backoff_seconds) if shutdown: logger.warning("Shutdown in progress, aborting retry.") break # Not possible to reach here but the linter is complaining. return self._result.FAILURE # type: ignore [reportReturnType] def shutdown(self, timeout_millis: float = 30_000, **kwargs) -> None: if self._shutdown: logger.warning("Exporter already shutdown, ignoring call") return self._shutdown = True self._shutdown_in_progress.set() self._channel.close() @property @abstractmethod def _exporting(self) -> str: """ Returns a string that describes the overall exporter, to be used in warning messages. """ pass metric_exporter/000077500000000000000000000000001511654350100432445ustar00rootroot00000000000000python-opentelemetry-1.39.1/exporter/opentelemetry-exporter-otlp-proto-grpc/src/opentelemetry/exporter/otlp/proto/grpc__init__.py000066400000000000000000000241131511654350100453560ustar00rootroot00000000000000python-opentelemetry-1.39.1/exporter/opentelemetry-exporter-otlp-proto-grpc/src/opentelemetry/exporter/otlp/proto/grpc/metric_exporter# Copyright The OpenTelemetry Authors # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import annotations from dataclasses import replace from logging import getLogger from os import environ from typing import Iterable, List, Tuple, Union from typing import Sequence as TypingSequence from grpc import ChannelCredentials, Compression from opentelemetry.exporter.otlp.proto.common._internal.metrics_encoder import ( OTLPMetricExporterMixin, ) from opentelemetry.exporter.otlp.proto.common.metrics_encoder import ( encode_metrics, ) from opentelemetry.exporter.otlp.proto.grpc.exporter import ( # noqa: F401 OTLPExporterMixin, _get_credentials, environ_to_compression, get_resource_data, ) from opentelemetry.proto.collector.metrics.v1.metrics_service_pb2 import ( ExportMetricsServiceRequest, ) from opentelemetry.proto.collector.metrics.v1.metrics_service_pb2_grpc import ( MetricsServiceStub, ) from opentelemetry.proto.common.v1.common_pb2 import ( # noqa: F401 InstrumentationScope, ) from opentelemetry.proto.metrics.v1 import metrics_pb2 as pb2 # noqa: F401 from opentelemetry.sdk.environment_variables import ( _OTEL_PYTHON_EXPORTER_OTLP_GRPC_METRICS_CREDENTIAL_PROVIDER, OTEL_EXPORTER_OTLP_METRICS_CERTIFICATE, OTEL_EXPORTER_OTLP_METRICS_CLIENT_CERTIFICATE, OTEL_EXPORTER_OTLP_METRICS_CLIENT_KEY, OTEL_EXPORTER_OTLP_METRICS_COMPRESSION, OTEL_EXPORTER_OTLP_METRICS_ENDPOINT, OTEL_EXPORTER_OTLP_METRICS_HEADERS, OTEL_EXPORTER_OTLP_METRICS_INSECURE, OTEL_EXPORTER_OTLP_METRICS_TIMEOUT, ) from opentelemetry.sdk.metrics._internal.aggregation import Aggregation from opentelemetry.sdk.metrics.export import ( # noqa: F401 AggregationTemporality, DataPointT, Gauge, Metric, MetricExporter, MetricExportResult, MetricsData, ResourceMetrics, ScopeMetrics, Sum, ) from opentelemetry.sdk.metrics.export import ( # noqa: F401 ExponentialHistogram as ExponentialHistogramType, ) from opentelemetry.sdk.metrics.export import ( # noqa: F401 Histogram as HistogramType, ) _logger = getLogger(__name__) class OTLPMetricExporter( MetricExporter, OTLPExporterMixin[ MetricsData, ExportMetricsServiceRequest, MetricExportResult, MetricsServiceStub, ], OTLPMetricExporterMixin, ): """OTLP metric exporter Args: endpoint: Target URL to which the exporter is going to send metrics max_export_batch_size: Maximum number of data points to export in a single request. This is to deal with gRPC's 4MB message size limit. If not set there is no limit to the number of data points in a request. If it is set and the number of data points exceeds the max, the request will be split. """ def __init__( self, endpoint: str | None = None, insecure: bool | None = None, credentials: ChannelCredentials | None = None, headers: Union[TypingSequence[Tuple[str, str]], dict[str, str], str] | None = None, timeout: float | None = None, compression: Compression | None = None, preferred_temporality: dict[type, AggregationTemporality] | None = None, preferred_aggregation: dict[type, Aggregation] | None = None, max_export_batch_size: int | None = None, channel_options: Tuple[Tuple[str, str]] | None = None, ): insecure_metrics = environ.get(OTEL_EXPORTER_OTLP_METRICS_INSECURE) if insecure is None and insecure_metrics is not None: insecure = insecure_metrics.lower() == "true" if ( not insecure and environ.get(OTEL_EXPORTER_OTLP_METRICS_CERTIFICATE) is not None ): credentials = _get_credentials( credentials, _OTEL_PYTHON_EXPORTER_OTLP_GRPC_METRICS_CREDENTIAL_PROVIDER, OTEL_EXPORTER_OTLP_METRICS_CERTIFICATE, OTEL_EXPORTER_OTLP_METRICS_CLIENT_KEY, OTEL_EXPORTER_OTLP_METRICS_CLIENT_CERTIFICATE, ) environ_timeout = environ.get(OTEL_EXPORTER_OTLP_METRICS_TIMEOUT) environ_timeout = ( float(environ_timeout) if environ_timeout is not None else None ) compression = ( environ_to_compression(OTEL_EXPORTER_OTLP_METRICS_COMPRESSION) if compression is None else compression ) self._common_configuration( preferred_temporality, preferred_aggregation ) OTLPExporterMixin.__init__( self, stub=MetricsServiceStub, result=MetricExportResult, endpoint=endpoint or environ.get(OTEL_EXPORTER_OTLP_METRICS_ENDPOINT), insecure=insecure, credentials=credentials, headers=headers or environ.get(OTEL_EXPORTER_OTLP_METRICS_HEADERS), timeout=timeout or environ_timeout, compression=compression, channel_options=channel_options, ) self._max_export_batch_size: int | None = max_export_batch_size def _translate_data( # type: ignore [reportIncompatibleMethodOverride] self, data: MetricsData ) -> ExportMetricsServiceRequest: return encode_metrics(data) def export( self, metrics_data: MetricsData, timeout_millis: float = 10_000, **kwargs, ) -> MetricExportResult: # TODO(#2663): OTLPExporterMixin should pass timeout to gRPC if self._max_export_batch_size is None: return self._export(data=metrics_data) export_result = MetricExportResult.SUCCESS for split_metrics_data in self._split_metrics_data(metrics_data): split_export_result = self._export(data=split_metrics_data) if split_export_result is MetricExportResult.FAILURE: export_result = MetricExportResult.FAILURE return export_result def _split_metrics_data( self, metrics_data: MetricsData, ) -> Iterable[MetricsData]: assert self._max_export_batch_size is not None batch_size: int = 0 split_resource_metrics: List[ResourceMetrics] = [] for resource_metrics in metrics_data.resource_metrics: split_scope_metrics: List[ScopeMetrics] = [] split_resource_metrics.append( replace( resource_metrics, scope_metrics=split_scope_metrics, ) ) for scope_metrics in resource_metrics.scope_metrics: split_metrics: List[Metric] = [] split_scope_metrics.append( replace( scope_metrics, metrics=split_metrics, ) ) for metric in scope_metrics.metrics: split_data_points: List[DataPointT] = [] split_metrics.append( replace( metric, data=replace( metric.data, data_points=split_data_points, ), ) ) for data_point in metric.data.data_points: split_data_points.append(data_point) batch_size += 1 if batch_size >= self._max_export_batch_size: yield MetricsData( resource_metrics=split_resource_metrics ) # Reset all the variables batch_size = 0 split_data_points = [] split_metrics = [ replace( metric, data=replace( metric.data, data_points=split_data_points, ), ) ] split_scope_metrics = [ replace( scope_metrics, metrics=split_metrics, ) ] split_resource_metrics = [ replace( resource_metrics, scope_metrics=split_scope_metrics, ) ] if not split_data_points: # If data_points is empty remove the whole metric split_metrics.pop() if not split_metrics: # If metrics is empty remove the whole scope_metrics split_scope_metrics.pop() if not split_scope_metrics: # If scope_metrics is empty remove the whole resource_metrics split_resource_metrics.pop() if batch_size > 0: yield MetricsData(resource_metrics=split_resource_metrics) def shutdown(self, timeout_millis: float = 30_000, **kwargs) -> None: OTLPExporterMixin.shutdown(self, timeout_millis=timeout_millis) @property def _exporting(self) -> str: return "metrics" def force_flush(self, timeout_millis: float = 10_000) -> bool: """Nothing is buffered in this exporter, so this method does nothing.""" return True py.typed000066400000000000000000000000001511654350100415160ustar00rootroot00000000000000python-opentelemetry-1.39.1/exporter/opentelemetry-exporter-otlp-proto-grpc/src/opentelemetry/exporter/otlp/proto/grpctrace_exporter/000077500000000000000000000000001511654350100430575ustar00rootroot00000000000000python-opentelemetry-1.39.1/exporter/opentelemetry-exporter-otlp-proto-grpc/src/opentelemetry/exporter/otlp/proto/grpc__init__.py000066400000000000000000000124551511654350100451770ustar00rootroot00000000000000python-opentelemetry-1.39.1/exporter/opentelemetry-exporter-otlp-proto-grpc/src/opentelemetry/exporter/otlp/proto/grpc/trace_exporter# Copyright The OpenTelemetry Authors # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """OTLP Span Exporter""" import logging from os import environ from typing import Dict, Optional, Sequence, Tuple, Union from typing import Sequence as TypingSequence from grpc import ChannelCredentials, Compression from opentelemetry.exporter.otlp.proto.common.trace_encoder import ( encode_spans, ) from opentelemetry.exporter.otlp.proto.grpc.exporter import ( # noqa: F401 OTLPExporterMixin, _get_credentials, environ_to_compression, get_resource_data, ) from opentelemetry.proto.collector.trace.v1.trace_service_pb2 import ( ExportTraceServiceRequest, ) from opentelemetry.proto.collector.trace.v1.trace_service_pb2_grpc import ( TraceServiceStub, ) from opentelemetry.proto.common.v1.common_pb2 import ( # noqa: F401 InstrumentationScope, ) from opentelemetry.proto.trace.v1.trace_pb2 import ( # noqa: F401 ResourceSpans, ScopeSpans, Status, ) from opentelemetry.proto.trace.v1.trace_pb2 import ( # noqa: F401 Span as CollectorSpan, ) from opentelemetry.sdk.environment_variables import ( _OTEL_PYTHON_EXPORTER_OTLP_GRPC_TRACES_CREDENTIAL_PROVIDER, OTEL_EXPORTER_OTLP_TRACES_CERTIFICATE, OTEL_EXPORTER_OTLP_TRACES_CLIENT_CERTIFICATE, OTEL_EXPORTER_OTLP_TRACES_CLIENT_KEY, OTEL_EXPORTER_OTLP_TRACES_COMPRESSION, OTEL_EXPORTER_OTLP_TRACES_ENDPOINT, OTEL_EXPORTER_OTLP_TRACES_HEADERS, OTEL_EXPORTER_OTLP_TRACES_INSECURE, OTEL_EXPORTER_OTLP_TRACES_TIMEOUT, ) from opentelemetry.sdk.trace import ReadableSpan from opentelemetry.sdk.trace.export import SpanExporter, SpanExportResult logger = logging.getLogger(__name__) # pylint: disable=no-member class OTLPSpanExporter( SpanExporter, OTLPExporterMixin[ Sequence[ReadableSpan], ExportTraceServiceRequest, SpanExportResult, TraceServiceStub, ], ): # pylint: disable=unsubscriptable-object """OTLP span exporter Args: endpoint: OpenTelemetry Collector receiver endpoint insecure: Connection type credentials: Credentials object for server authentication headers: Headers to send when exporting timeout: Backend request timeout in seconds compression: gRPC compression method to use """ def __init__( self, endpoint: Optional[str] = None, insecure: Optional[bool] = None, credentials: Optional[ChannelCredentials] = None, headers: Optional[ Union[TypingSequence[Tuple[str, str]], Dict[str, str], str] ] = None, timeout: Optional[float] = None, compression: Optional[Compression] = None, channel_options: Optional[Tuple[Tuple[str, str]]] = None, ): insecure_spans = environ.get(OTEL_EXPORTER_OTLP_TRACES_INSECURE) if insecure is None and insecure_spans is not None: insecure = insecure_spans.lower() == "true" if ( not insecure and environ.get(OTEL_EXPORTER_OTLP_TRACES_CERTIFICATE) is not None ): credentials = _get_credentials( credentials, _OTEL_PYTHON_EXPORTER_OTLP_GRPC_TRACES_CREDENTIAL_PROVIDER, OTEL_EXPORTER_OTLP_TRACES_CERTIFICATE, OTEL_EXPORTER_OTLP_TRACES_CLIENT_KEY, OTEL_EXPORTER_OTLP_TRACES_CLIENT_CERTIFICATE, ) environ_timeout = environ.get(OTEL_EXPORTER_OTLP_TRACES_TIMEOUT) environ_timeout = ( float(environ_timeout) if environ_timeout is not None else None ) compression = ( environ_to_compression(OTEL_EXPORTER_OTLP_TRACES_COMPRESSION) if compression is None else compression ) OTLPExporterMixin.__init__( self, stub=TraceServiceStub, result=SpanExportResult, endpoint=endpoint or environ.get(OTEL_EXPORTER_OTLP_TRACES_ENDPOINT), insecure=insecure, credentials=credentials, headers=headers or environ.get(OTEL_EXPORTER_OTLP_TRACES_HEADERS), timeout=timeout or environ_timeout, compression=compression, channel_options=channel_options, ) def _translate_data( self, data: Sequence[ReadableSpan] ) -> ExportTraceServiceRequest: return encode_spans(data) def export(self, spans: Sequence[ReadableSpan]) -> SpanExportResult: return self._export(spans) def shutdown(self, timeout_millis: float = 30_000, **kwargs) -> None: OTLPExporterMixin.shutdown(self, timeout_millis=timeout_millis) def force_flush(self, timeout_millis: int = 30000) -> bool: """Nothing is buffered in this exporter, so this method does nothing.""" return True @property def _exporting(self): return "traces" version/000077500000000000000000000000001511654350100415165ustar00rootroot00000000000000python-opentelemetry-1.39.1/exporter/opentelemetry-exporter-otlp-proto-grpc/src/opentelemetry/exporter/otlp/proto/grpc__init__.py000066400000000000000000000011401511654350100436230ustar00rootroot00000000000000python-opentelemetry-1.39.1/exporter/opentelemetry-exporter-otlp-proto-grpc/src/opentelemetry/exporter/otlp/proto/grpc/version# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. __version__ = "1.39.1" python-opentelemetry-1.39.1/exporter/opentelemetry-exporter-otlp-proto-grpc/test-requirements.in000066400000000000000000000005171511654350100335340ustar00rootroot00000000000000colorama>=0.4.6 iniconfig>=2.0.0 packaging>=24.0 pluggy>=1.5.0 protobuf>=5.29.5 pytest>=7.4.4 -e opentelemetry-api -e tests/opentelemetry-test-utils -e exporter/opentelemetry-exporter-otlp-proto-common -e opentelemetry-proto -e opentelemetry-sdk -e opentelemetry-semantic-conventions -e exporter/opentelemetry-exporter-otlp-proto-grpc test-requirements.latest.txt000066400000000000000000000056711511654350100351670ustar00rootroot00000000000000python-opentelemetry-1.39.1/exporter/opentelemetry-exporter-otlp-proto-grpc# This file was autogenerated by uv via the following command: # uv pip compile --python 3.9 --universal -c dev-requirements.txt exporter/opentelemetry-exporter-otlp-proto-grpc/test-requirements.in -o exporter/opentelemetry-exporter-otlp-proto-grpc/test-requirements.latest.txt -e exporter/opentelemetry-exporter-otlp-proto-common # via # -r exporter/opentelemetry-exporter-otlp-proto-grpc/test-requirements.in # opentelemetry-exporter-otlp-proto-grpc -e exporter/opentelemetry-exporter-otlp-proto-grpc # via -r exporter/opentelemetry-exporter-otlp-proto-grpc/test-requirements.in -e opentelemetry-api # via # -r exporter/opentelemetry-exporter-otlp-proto-grpc/test-requirements.in # opentelemetry-exporter-otlp-proto-grpc # opentelemetry-sdk # opentelemetry-semantic-conventions # opentelemetry-test-utils -e opentelemetry-proto # via # -r exporter/opentelemetry-exporter-otlp-proto-grpc/test-requirements.in # opentelemetry-exporter-otlp-proto-common # opentelemetry-exporter-otlp-proto-grpc -e opentelemetry-sdk # via # -r exporter/opentelemetry-exporter-otlp-proto-grpc/test-requirements.in # opentelemetry-exporter-otlp-proto-grpc # opentelemetry-test-utils -e opentelemetry-semantic-conventions # via # -r exporter/opentelemetry-exporter-otlp-proto-grpc/test-requirements.in # opentelemetry-sdk -e tests/opentelemetry-test-utils # via -r exporter/opentelemetry-exporter-otlp-proto-grpc/test-requirements.in asgiref==3.7.2 # via # -c dev-requirements.txt # opentelemetry-test-utils colorama==0.4.6 # via # -r exporter/opentelemetry-exporter-otlp-proto-grpc/test-requirements.in # pytest exceptiongroup==1.3.0 ; python_full_version < '3.11' # via pytest googleapis-common-protos==1.70.0 # via opentelemetry-exporter-otlp-proto-grpc grpcio==1.73.0 # via opentelemetry-exporter-otlp-proto-grpc importlib-metadata==8.7.0 # via opentelemetry-api iniconfig==2.1.0 # via # -r exporter/opentelemetry-exporter-otlp-proto-grpc/test-requirements.in # pytest packaging==25.0 # via # -r exporter/opentelemetry-exporter-otlp-proto-grpc/test-requirements.in # pytest pluggy==1.6.0 # via # -r exporter/opentelemetry-exporter-otlp-proto-grpc/test-requirements.in # pytest protobuf==6.31.1 # via # -r exporter/opentelemetry-exporter-otlp-proto-grpc/test-requirements.in # googleapis-common-protos # opentelemetry-proto pytest==7.4.4 # via # -c dev-requirements.txt # -r exporter/opentelemetry-exporter-otlp-proto-grpc/test-requirements.in tomli==2.2.1 ; python_full_version < '3.11' # via pytest typing-extensions==4.14.0 # via # asgiref # exceptiongroup # opentelemetry-api # opentelemetry-exporter-otlp-proto-grpc # opentelemetry-sdk # opentelemetry-semantic-conventions zipp==3.23.0 # via importlib-metadata test-requirements.oldest.txt000066400000000000000000000060631511654350100351610ustar00rootroot00000000000000python-opentelemetry-1.39.1/exporter/opentelemetry-exporter-otlp-proto-grpc# This file was autogenerated by uv via the following command: # uv pip compile --python 3.9 --universal --resolution lowest -c dev-requirements.txt exporter/opentelemetry-exporter-otlp-proto-grpc/test-requirements.in -o exporter/opentelemetry-exporter-otlp-proto-grpc/test-requirements.oldest.txt -e exporter/opentelemetry-exporter-otlp-proto-common # via # -r exporter/opentelemetry-exporter-otlp-proto-grpc/test-requirements.in # opentelemetry-exporter-otlp-proto-grpc -e exporter/opentelemetry-exporter-otlp-proto-grpc # via -r exporter/opentelemetry-exporter-otlp-proto-grpc/test-requirements.in -e opentelemetry-api # via # -r exporter/opentelemetry-exporter-otlp-proto-grpc/test-requirements.in # opentelemetry-exporter-otlp-proto-grpc # opentelemetry-sdk # opentelemetry-semantic-conventions # opentelemetry-test-utils -e opentelemetry-proto # via # -r exporter/opentelemetry-exporter-otlp-proto-grpc/test-requirements.in # opentelemetry-exporter-otlp-proto-common # opentelemetry-exporter-otlp-proto-grpc -e opentelemetry-sdk # via # -r exporter/opentelemetry-exporter-otlp-proto-grpc/test-requirements.in # opentelemetry-exporter-otlp-proto-grpc # opentelemetry-test-utils -e opentelemetry-semantic-conventions # via # -r exporter/opentelemetry-exporter-otlp-proto-grpc/test-requirements.in # opentelemetry-sdk -e tests/opentelemetry-test-utils # via -r exporter/opentelemetry-exporter-otlp-proto-grpc/test-requirements.in asgiref==3.7.2 # via # -c dev-requirements.txt # opentelemetry-test-utils colorama==0.4.6 # via # -r exporter/opentelemetry-exporter-otlp-proto-grpc/test-requirements.in # pytest exceptiongroup==1.0.0 ; python_full_version < '3.11' # via pytest googleapis-common-protos==1.63.1 # via opentelemetry-exporter-otlp-proto-grpc grpcio==1.63.2 ; python_full_version < '3.13' # via opentelemetry-exporter-otlp-proto-grpc grpcio==1.66.2 ; python_full_version >= '3.13' # via opentelemetry-exporter-otlp-proto-grpc importlib-metadata==6.0.0 # via opentelemetry-api iniconfig==2.0.0 # via # -r exporter/opentelemetry-exporter-otlp-proto-grpc/test-requirements.in # pytest packaging==24.0 # via # -r exporter/opentelemetry-exporter-otlp-proto-grpc/test-requirements.in # pytest pluggy==1.5.0 # via # -r exporter/opentelemetry-exporter-otlp-proto-grpc/test-requirements.in # pytest protobuf==5.29.5 # via # -r exporter/opentelemetry-exporter-otlp-proto-grpc/test-requirements.in # googleapis-common-protos # opentelemetry-proto pytest==7.4.4 # via # -c dev-requirements.txt # -r exporter/opentelemetry-exporter-otlp-proto-grpc/test-requirements.in tomli==1.0.0 ; python_full_version < '3.11' # via pytest typing-extensions==4.6.0 # via # asgiref # opentelemetry-api # opentelemetry-exporter-otlp-proto-grpc # opentelemetry-sdk # opentelemetry-semantic-conventions zipp==0.5.0 # via importlib-metadata python-opentelemetry-1.39.1/exporter/opentelemetry-exporter-otlp-proto-grpc/tests/000077500000000000000000000000001511654350100306435ustar00rootroot00000000000000python-opentelemetry-1.39.1/exporter/opentelemetry-exporter-otlp-proto-grpc/tests/__init__.py000066400000000000000000000000001511654350100327420ustar00rootroot00000000000000python-opentelemetry-1.39.1/exporter/opentelemetry-exporter-otlp-proto-grpc/tests/fixtures/000077500000000000000000000000001511654350100325145ustar00rootroot00000000000000test-client-cert.pem000066400000000000000000000000001511654350100363140ustar00rootroot00000000000000python-opentelemetry-1.39.1/exporter/opentelemetry-exporter-otlp-proto-grpc/tests/fixturestest-client-key.pem000066400000000000000000000000001511654350100361470ustar00rootroot00000000000000python-opentelemetry-1.39.1/exporter/opentelemetry-exporter-otlp-proto-grpc/tests/fixturespython-opentelemetry-1.39.1/exporter/opentelemetry-exporter-otlp-proto-grpc/tests/fixtures/test.cert000066400000000000000000000000001511654350100343400ustar00rootroot00000000000000python-opentelemetry-1.39.1/exporter/opentelemetry-exporter-otlp-proto-grpc/tests/logs/000077500000000000000000000000001511654350100316075ustar00rootroot00000000000000python-opentelemetry-1.39.1/exporter/opentelemetry-exporter-otlp-proto-grpc/tests/logs/__init__.py000066400000000000000000000000001511654350100337060ustar00rootroot00000000000000test_otlp_logs_exporter.py000066400000000000000000000537741511654350100371130ustar00rootroot00000000000000python-opentelemetry-1.39.1/exporter/opentelemetry-exporter-otlp-proto-grpc/tests/logs# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # pylint: disable=too-many-lines import time from os.path import dirname from unittest import TestCase from unittest.mock import Mock, patch from google.protobuf.json_format import MessageToDict from grpc import ChannelCredentials, Compression from opentelemetry._logs import LogRecord, SeverityNumber from opentelemetry.exporter.otlp.proto.common._internal import _encode_value from opentelemetry.exporter.otlp.proto.grpc._log_exporter import ( OTLPLogExporter, ) from opentelemetry.proto.collector.logs.v1.logs_service_pb2 import ( ExportLogsServiceRequest, ) from opentelemetry.proto.common.v1.common_pb2 import AnyValue, KeyValue from opentelemetry.proto.common.v1.common_pb2 import ( InstrumentationScope as PB2InstrumentationScope, ) from opentelemetry.proto.logs.v1.logs_pb2 import LogRecord as PB2LogRecord from opentelemetry.proto.logs.v1.logs_pb2 import ResourceLogs, ScopeLogs from opentelemetry.proto.resource.v1.resource_pb2 import ( Resource as OTLPResource, ) from opentelemetry.sdk._logs import ReadWriteLogRecord from opentelemetry.sdk.environment_variables import ( OTEL_EXPORTER_OTLP_LOGS_CERTIFICATE, OTEL_EXPORTER_OTLP_LOGS_CLIENT_CERTIFICATE, OTEL_EXPORTER_OTLP_LOGS_CLIENT_KEY, OTEL_EXPORTER_OTLP_LOGS_COMPRESSION, OTEL_EXPORTER_OTLP_LOGS_ENDPOINT, OTEL_EXPORTER_OTLP_LOGS_HEADERS, OTEL_EXPORTER_OTLP_LOGS_TIMEOUT, ) from opentelemetry.sdk.resources import Resource as SDKResource from opentelemetry.sdk.util.instrumentation import InstrumentationScope from opentelemetry.trace import ( NonRecordingSpan, SpanContext, TraceFlags, set_span_in_context, ) THIS_DIR = dirname(__file__) class TestOTLPLogExporter(TestCase): def setUp(self): self.exporter = OTLPLogExporter() ctx_log_data_1 = set_span_in_context( NonRecordingSpan( SpanContext( 2604504634922341076776623263868986797, 5213367945872657620, False, TraceFlags(0x01), ) ) ) self.log_data_1 = ReadWriteLogRecord( LogRecord( timestamp=int(time.time() * 1e9), context=ctx_log_data_1, severity_text="WARNING", severity_number=SeverityNumber.WARN, body="Zhengzhou, We have a heaviest rains in 1000 years", attributes={"a": 1, "b": "c"}, ), resource=SDKResource({"key": "value"}), instrumentation_scope=InstrumentationScope( "first_name", "first_version" ), ) ctx_log_data_2 = set_span_in_context( NonRecordingSpan( SpanContext( 2604504634922341076776623263868986799, 5213367945872657623, False, TraceFlags(0x01), ) ) ) self.log_data_2 = ReadWriteLogRecord( LogRecord( timestamp=int(time.time() * 1e9), context=ctx_log_data_2, severity_text="INFO", severity_number=SeverityNumber.INFO2, body="Sydney, Opera House is closed", attributes={"custom_attr": [1, 2, 3]}, ), resource=SDKResource({"key": "value"}), instrumentation_scope=InstrumentationScope( "second_name", "second_version" ), ) ctx_log_data_3 = set_span_in_context( NonRecordingSpan( SpanContext( 2604504634922341076776623263868986800, 5213367945872657628, False, TraceFlags(0x01), ) ) ) self.log_data_3 = ReadWriteLogRecord( LogRecord( timestamp=int(time.time() * 1e9), context=ctx_log_data_3, severity_text="ERROR", severity_number=SeverityNumber.WARN, body="Mumbai, Boil water before drinking", ), resource=SDKResource({"service": "myapp"}), instrumentation_scope=InstrumentationScope( "third_name", "third_version" ), ) ctx_log_data_4 = set_span_in_context( NonRecordingSpan( SpanContext(0, 5213367945872657629, False, TraceFlags(0x01)) ) ) self.log_data_4 = ReadWriteLogRecord( LogRecord( timestamp=int(time.time() * 1e9), context=ctx_log_data_4, severity_text="ERROR", severity_number=SeverityNumber.WARN, body="Invalid trace id check", ), resource=SDKResource({"service": "myapp"}), instrumentation_scope=InstrumentationScope( "fourth_name", "fourth_version" ), ) ctx_log_data_5 = set_span_in_context( NonRecordingSpan( SpanContext( 2604504634922341076776623263868986801, 0, False, TraceFlags(0x01), ) ) ) self.log_data_5 = ReadWriteLogRecord( LogRecord( timestamp=int(time.time() * 1e9), context=ctx_log_data_5, severity_text="ERROR", severity_number=SeverityNumber.WARN, body="Invalid span id check", ), resource=SDKResource({"service": "myapp"}), instrumentation_scope=InstrumentationScope( "fifth_name", "fifth_version" ), ) def test_exporting(self): # pylint: disable=protected-access self.assertEqual(self.exporter._exporting, "logs") @patch.dict( "os.environ", { OTEL_EXPORTER_OTLP_LOGS_ENDPOINT: "logs:4317", OTEL_EXPORTER_OTLP_LOGS_HEADERS: " key1=value1,KEY2 = VALUE=2", OTEL_EXPORTER_OTLP_LOGS_TIMEOUT: "10", OTEL_EXPORTER_OTLP_LOGS_COMPRESSION: "gzip", }, ) @patch( "opentelemetry.exporter.otlp.proto.grpc.exporter.OTLPExporterMixin.__init__" ) def test_env_variables(self, mock_exporter_mixin): OTLPLogExporter() self.assertTrue(len(mock_exporter_mixin.call_args_list) == 1) _, kwargs = mock_exporter_mixin.call_args_list[0] self.assertEqual(kwargs["endpoint"], "logs:4317") self.assertEqual(kwargs["headers"], " key1=value1,KEY2 = VALUE=2") self.assertEqual(kwargs["timeout"], 10) self.assertEqual(kwargs["compression"], Compression.Gzip) self.assertIsNone(kwargs["credentials"]) # Create a new test method specifically for client certificates @patch.dict( "os.environ", { OTEL_EXPORTER_OTLP_LOGS_ENDPOINT: "logs:4317", OTEL_EXPORTER_OTLP_LOGS_CERTIFICATE: THIS_DIR + "/../fixtures/test.cert", OTEL_EXPORTER_OTLP_LOGS_CLIENT_CERTIFICATE: THIS_DIR + "/../fixtures/test-client-cert.pem", OTEL_EXPORTER_OTLP_LOGS_CLIENT_KEY: THIS_DIR + "/../fixtures/test-client-key.pem", OTEL_EXPORTER_OTLP_LOGS_HEADERS: " key1=value1,KEY2 = VALUE=2", OTEL_EXPORTER_OTLP_LOGS_TIMEOUT: "10", OTEL_EXPORTER_OTLP_LOGS_COMPRESSION: "gzip", }, ) @patch( "opentelemetry.exporter.otlp.proto.grpc.exporter.OTLPExporterMixin.__init__" ) def test_env_variables_with_client_certificates(self, mock_exporter_mixin): OTLPLogExporter() self.assertTrue(len(mock_exporter_mixin.call_args_list) == 1) _, kwargs = mock_exporter_mixin.call_args_list[0] self.assertEqual(kwargs["endpoint"], "logs:4317") self.assertEqual(kwargs["headers"], " key1=value1,KEY2 = VALUE=2") self.assertEqual(kwargs["timeout"], 10) self.assertEqual(kwargs["compression"], Compression.Gzip) self.assertIsNotNone(kwargs["credentials"]) self.assertIsInstance(kwargs["credentials"], ChannelCredentials) @patch.dict( "os.environ", { OTEL_EXPORTER_OTLP_LOGS_ENDPOINT: "logs:4317", OTEL_EXPORTER_OTLP_LOGS_CERTIFICATE: THIS_DIR + "/../fixtures/test.cert", OTEL_EXPORTER_OTLP_LOGS_HEADERS: " key1=value1,KEY2 = VALUE=2", OTEL_EXPORTER_OTLP_LOGS_TIMEOUT: "10", OTEL_EXPORTER_OTLP_LOGS_COMPRESSION: "gzip", }, ) @patch( "opentelemetry.exporter.otlp.proto.grpc.exporter.OTLPExporterMixin.__init__" ) @patch("logging.Logger.error") def test_env_variables_with_only_certificate( self, mock_logger_error, mock_exporter_mixin ): OTLPLogExporter() self.assertTrue(len(mock_exporter_mixin.call_args_list) == 1) _, kwargs = mock_exporter_mixin.call_args_list[0] self.assertEqual(kwargs["endpoint"], "logs:4317") self.assertEqual(kwargs["headers"], " key1=value1,KEY2 = VALUE=2") self.assertEqual(kwargs["timeout"], 10) self.assertEqual(kwargs["compression"], Compression.Gzip) self.assertIsNotNone(kwargs["credentials"]) self.assertIsInstance(kwargs["credentials"], ChannelCredentials) mock_logger_error.assert_not_called() @patch.dict( "os.environ", { OTEL_EXPORTER_OTLP_LOGS_ENDPOINT: "logs:4317", OTEL_EXPORTER_OTLP_LOGS_CERTIFICATE: THIS_DIR + "/../fixtures/test.cert", OTEL_EXPORTER_OTLP_LOGS_HEADERS: " key1=value1,KEY2 = VALUE=2", OTEL_EXPORTER_OTLP_LOGS_TIMEOUT: "10", OTEL_EXPORTER_OTLP_LOGS_COMPRESSION: "gzip", }, ) @patch( "opentelemetry.exporter.otlp.proto.grpc.exporter.OTLPExporterMixin.__init__" ) @patch("logging.Logger.error") def test_kwargs_have_precedence_over_env_variables( self, mock_logger_error, mock_exporter_mixin ): credentials_mock = Mock() OTLPLogExporter( endpoint="logs:4318", headers=(("an", "header"),), timeout=20, credentials=credentials_mock, compression=Compression.NoCompression, channel_options=(("some", "options"),), ) self.assertTrue(len(mock_exporter_mixin.call_args_list) == 1) _, kwargs = mock_exporter_mixin.call_args_list[0] self.assertEqual(kwargs["endpoint"], "logs:4318") self.assertEqual(kwargs["headers"], (("an", "header"),)) self.assertEqual(kwargs["timeout"], 20) self.assertEqual(kwargs["compression"], Compression.NoCompression) self.assertEqual(kwargs["credentials"], credentials_mock) self.assertEqual(kwargs["channel_options"], (("some", "options"),)) mock_logger_error.assert_not_called() def export_log_and_deserialize(self, log_data): # pylint: disable=protected-access translated_data = self.exporter._translate_data([log_data]) request_dict = MessageToDict(translated_data) log_records = ( request_dict.get("resourceLogs")[0] .get("scopeLogs")[0] .get("logRecords") ) return log_records def test_exported_log_without_trace_id(self): log_records = self.export_log_and_deserialize(self.log_data_4) if log_records: log_record = log_records[0] self.assertIn("spanId", log_record) self.assertNotIn( "traceId", log_record, "traceId should not be present in the log record", ) else: self.fail("No log records found") def test_exported_log_without_span_id(self): log_records = self.export_log_and_deserialize(self.log_data_5) if log_records: log_record = log_records[0] self.assertIn("traceId", log_record) self.assertNotIn( "spanId", log_record, "spanId should not be present in the log record", ) else: self.fail("No log records found") def test_translate_log_data(self): expected = ExportLogsServiceRequest( resource_logs=[ ResourceLogs( resource=OTLPResource( attributes=[ KeyValue( key="key", value=AnyValue(string_value="value") ), ] ), scope_logs=[ ScopeLogs( scope=PB2InstrumentationScope( name="first_name", version="first_version" ), log_records=[ PB2LogRecord( # pylint: disable=no-member time_unix_nano=self.log_data_1.log_record.timestamp, observed_time_unix_nano=self.log_data_1.log_record.observed_timestamp, severity_number=self.log_data_1.log_record.severity_number.value, severity_text="WARNING", span_id=int.to_bytes( 5213367945872657620, 8, "big" ), trace_id=int.to_bytes( 2604504634922341076776623263868986797, 16, "big", ), body=_encode_value( "Zhengzhou, We have a heaviest rains in 1000 years" ), attributes=[ KeyValue( key="a", value=AnyValue(int_value=1), ), KeyValue( key="b", value=AnyValue(string_value="c"), ), ], flags=int( self.log_data_1.log_record.trace_flags ), ) ], ) ], ), ] ) # pylint: disable=protected-access self.assertEqual( expected, self.exporter._translate_data([self.log_data_1]) ) def test_translate_multiple_logs(self): expected = ExportLogsServiceRequest( resource_logs=[ ResourceLogs( resource=OTLPResource( attributes=[ KeyValue( key="key", value=AnyValue(string_value="value") ), ] ), scope_logs=[ ScopeLogs( scope=PB2InstrumentationScope( name="first_name", version="first_version" ), log_records=[ PB2LogRecord( # pylint: disable=no-member time_unix_nano=self.log_data_1.log_record.timestamp, observed_time_unix_nano=self.log_data_1.log_record.observed_timestamp, severity_number=self.log_data_1.log_record.severity_number.value, severity_text="WARNING", span_id=int.to_bytes( 5213367945872657620, 8, "big" ), trace_id=int.to_bytes( 2604504634922341076776623263868986797, 16, "big", ), body=_encode_value( "Zhengzhou, We have a heaviest rains in 1000 years" ), attributes=[ KeyValue( key="a", value=AnyValue(int_value=1), ), KeyValue( key="b", value=AnyValue(string_value="c"), ), ], flags=int( self.log_data_1.log_record.trace_flags ), ) ], ), ScopeLogs( scope=PB2InstrumentationScope( name="second_name", version="second_version" ), log_records=[ PB2LogRecord( # pylint: disable=no-member time_unix_nano=self.log_data_2.log_record.timestamp, observed_time_unix_nano=self.log_data_2.log_record.observed_timestamp, severity_number=self.log_data_2.log_record.severity_number.value, severity_text="INFO", span_id=int.to_bytes( 5213367945872657623, 8, "big" ), trace_id=int.to_bytes( 2604504634922341076776623263868986799, 16, "big", ), body=_encode_value( "Sydney, Opera House is closed" ), attributes=[ KeyValue( key="custom_attr", value=_encode_value([1, 2, 3]), ), ], flags=int( self.log_data_2.log_record.trace_flags ), ) ], ), ], ), ResourceLogs( resource=OTLPResource( attributes=[ KeyValue( key="service", value=AnyValue(string_value="myapp"), ), ] ), scope_logs=[ ScopeLogs( scope=PB2InstrumentationScope( name="third_name", version="third_version" ), log_records=[ PB2LogRecord( # pylint: disable=no-member time_unix_nano=self.log_data_3.log_record.timestamp, observed_time_unix_nano=self.log_data_3.log_record.observed_timestamp, severity_number=self.log_data_3.log_record.severity_number.value, severity_text="ERROR", span_id=int.to_bytes( 5213367945872657628, 8, "big" ), trace_id=int.to_bytes( 2604504634922341076776623263868986800, 16, "big", ), body=_encode_value( "Mumbai, Boil water before drinking" ), attributes=[], flags=int( self.log_data_3.log_record.trace_flags ), ) ], ) ], ), ] ) # pylint: disable=protected-access self.assertEqual( expected, self.exporter._translate_data( [self.log_data_1, self.log_data_2, self.log_data_3] ), ) test_otlp_exporter_mixin.py000066400000000000000000000447261511654350100363240ustar00rootroot00000000000000python-opentelemetry-1.39.1/exporter/opentelemetry-exporter-otlp-proto-grpc/tests# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import threading import time import unittest from concurrent.futures import ThreadPoolExecutor from logging import WARNING, getLogger from platform import system from typing import Any, Optional, Sequence from unittest import TestCase from unittest.mock import Mock, patch from google.protobuf.duration_pb2 import ( # pylint: disable=no-name-in-module Duration, ) from google.rpc.error_details_pb2 import ( # pylint: disable=no-name-in-module RetryInfo, ) from grpc import ChannelCredentials, Compression, StatusCode, server from opentelemetry.exporter.otlp.proto.common.trace_encoder import ( encode_spans, ) from opentelemetry.exporter.otlp.proto.grpc.exporter import ( # noqa: F401 InvalidCompressionValueException, OTLPExporterMixin, environ_to_compression, ) from opentelemetry.exporter.otlp.proto.grpc.version import __version__ from opentelemetry.proto.collector.trace.v1.trace_service_pb2 import ( ExportTraceServiceRequest, ExportTraceServiceResponse, ) from opentelemetry.proto.collector.trace.v1.trace_service_pb2_grpc import ( TraceServiceServicer, TraceServiceStub, add_TraceServiceServicer_to_server, ) from opentelemetry.sdk.environment_variables import ( _OTEL_PYTHON_EXPORTER_OTLP_GRPC_CREDENTIAL_PROVIDER, OTEL_EXPORTER_OTLP_COMPRESSION, ) from opentelemetry.sdk.trace import ReadableSpan, _Span from opentelemetry.sdk.trace.export import ( SpanExporter, SpanExportResult, ) from opentelemetry.test.mock_test_classes import IterEntryPoint logger = getLogger(__name__) # The below tests use this test SpanExporter and Spans, but are testing the # underlying behavior in the mixin. A MetricExporter or LogRecordExporter could # just as easily be used. class OTLPSpanExporterForTesting( SpanExporter, OTLPExporterMixin[ ReadableSpan, ExportTraceServiceRequest, SpanExportResult, TraceServiceStub, ], ): def __init__(self, **kwargs): super().__init__(TraceServiceStub, SpanExportResult, **kwargs) def _translate_data( self, data: Sequence[ReadableSpan] ) -> ExportTraceServiceRequest: return encode_spans(data) def export(self, spans: Sequence[ReadableSpan]) -> SpanExportResult: return self._export(spans) @property def _exporting(self): return "traces" def shutdown(self, timeout_millis=30_000): return OTLPExporterMixin.shutdown(self, timeout_millis) class TraceServiceServicerWithExportParams(TraceServiceServicer): def __init__( self, export_result: StatusCode, optional_retry_nanos: Optional[int] = None, optional_export_sleep: Optional[float] = None, ): self.export_result = export_result self.optional_export_sleep = optional_export_sleep self.optional_retry_nanos = optional_retry_nanos self.num_requests = 0 # pylint: disable=invalid-name,unused-argument def Export(self, request, context): self.num_requests += 1 if self.optional_export_sleep: time.sleep(self.optional_export_sleep) if self.export_result != StatusCode.OK and self.optional_retry_nanos: context.set_trailing_metadata( ( ( "google.rpc.retryinfo-bin", RetryInfo( retry_delay=Duration( nanos=self.optional_retry_nanos ) ).SerializeToString(), ), ) ) context.set_code(self.export_result) return ExportTraceServiceResponse() class ThreadWithReturnValue(threading.Thread): def __init__( self, target=None, args=(), ): super().__init__(target=target, args=args) self._return = None def run(self): try: if self._target is not None: # type: ignore self._return = self._target(*self._args, **self._kwargs) # type: ignore finally: # Avoid a refcycle if the thread is running a function with # an argument that has a member that points to the thread. del self._target, self._args, self._kwargs # type: ignore def join(self, timeout: Optional[float] = None) -> Any: super().join(timeout=timeout) return self._return class TestOTLPExporterMixin(TestCase): def setUp(self): self.server = server(ThreadPoolExecutor(max_workers=10)) self.server.add_insecure_port("127.0.0.1:4317") self.server.start() self.exporter = OTLPSpanExporterForTesting(insecure=True) self.span = _Span( "a", context=Mock( **{ "trace_state": {"a": "b", "c": "d"}, "span_id": 10217189687419569865, "trace_id": 67545097771067222548457157018666467027, } ), ) def tearDown(self): self.server.stop(None) @patch("opentelemetry.exporter.otlp.proto.grpc.exporter.insecure_channel") @patch("opentelemetry.exporter.otlp.proto.grpc.exporter.secure_channel") def test_otlp_exporter_endpoint(self, mock_secure, mock_insecure): expected_endpoint = "localhost:4317" endpoints = [ ( "http://localhost:4317", None, mock_insecure, ), ( "localhost:4317", None, mock_secure, ), ( "http://localhost:4317", True, mock_insecure, ), ( "localhost:4317", True, mock_insecure, ), ( "http://localhost:4317", False, mock_secure, ), ( "localhost:4317", False, mock_secure, ), ( "https://localhost:4317", False, mock_secure, ), ( "https://localhost:4317", None, mock_secure, ), ( "https://localhost:4317", True, mock_secure, ), ] for endpoint, insecure, mock_method in endpoints: OTLPSpanExporterForTesting(endpoint=endpoint, insecure=insecure) self.assertEqual( 1, mock_method.call_count, f"expected {mock_method} to be called for {endpoint} {insecure}", ) self.assertEqual( expected_endpoint, mock_method.call_args[0][0], f"expected {expected_endpoint} got {mock_method.call_args[0][0]} {endpoint}", ) mock_method.reset_mock() def test_environ_to_compression(self): with patch.dict( "os.environ", { "test_gzip": "gzip", "test_gzip_caseinsensitive_with_whitespace": " GzIp ", "test_invalid": "some invalid compression", }, ): self.assertEqual( environ_to_compression("test_gzip"), Compression.Gzip ) self.assertEqual( environ_to_compression( "test_gzip_caseinsensitive_with_whitespace" ), Compression.Gzip, ) self.assertIsNone( environ_to_compression("missing_key"), ) with self.assertRaises(InvalidCompressionValueException): environ_to_compression("test_invalid") # pylint: disable=no-self-use @patch("opentelemetry.exporter.otlp.proto.grpc.exporter.insecure_channel") @patch.dict("os.environ", {}) def test_otlp_exporter_otlp_compression_unspecified( self, mock_insecure_channel ): """No env or kwarg should be NoCompression""" OTLPSpanExporterForTesting(insecure=True) mock_insecure_channel.assert_called_once_with( "localhost:4317", compression=Compression.NoCompression, options=( ( "grpc.primary_user_agent", "OTel-OTLP-Exporter-Python/" + __version__, ), ), ) @patch.dict( "os.environ", { _OTEL_PYTHON_EXPORTER_OTLP_GRPC_CREDENTIAL_PROVIDER: "credential_provider" }, ) @patch("opentelemetry.exporter.otlp.proto.grpc.exporter.entry_points") def test_that_credential_gets_passed_to_exporter(self, mock_entry_points): credential = ChannelCredentials(None) def f(): return credential mock_entry_points.configure_mock( return_value=[IterEntryPoint("custom_credential", f)] ) exporter = OTLPSpanExporterForTesting(insecure=False) # pylint: disable=protected-access assert exporter._credentials is credential @patch.dict( "os.environ", { _OTEL_PYTHON_EXPORTER_OTLP_GRPC_CREDENTIAL_PROVIDER: "credential_provider" }, ) def test_that_missing_entry_point_raises_exception(self): with self.assertRaises(RuntimeError): OTLPSpanExporterForTesting(insecure=False) @patch.dict( "os.environ", { _OTEL_PYTHON_EXPORTER_OTLP_GRPC_CREDENTIAL_PROVIDER: "credential_provider" }, ) @patch("opentelemetry.exporter.otlp.proto.grpc.exporter.entry_points") def test_that_entry_point_returning_bad_type_raises_exception( self, mock_entry_points ): def f(): return 1 mock_entry_points.configure_mock( return_value=[IterEntryPoint("custom_credential", f)] ) with self.assertRaises(RuntimeError): OTLPSpanExporterForTesting(insecure=False) # pylint: disable=no-self-use, disable=unused-argument @patch( "opentelemetry.exporter.otlp.proto.grpc.exporter.ssl_channel_credentials" ) @patch("opentelemetry.exporter.otlp.proto.grpc.exporter.secure_channel") @patch.dict("os.environ", {}) def test_no_credentials_ssl_channel_called( self, secure_channel, mock_ssl_channel ): OTLPSpanExporterForTesting(insecure=False) self.assertTrue(mock_ssl_channel.called) # pylint: disable=no-self-use @patch("opentelemetry.exporter.otlp.proto.grpc.exporter.insecure_channel") @patch.dict("os.environ", {OTEL_EXPORTER_OTLP_COMPRESSION: "gzip"}) def test_otlp_exporter_otlp_compression_envvar( self, mock_insecure_channel ): """Just OTEL_EXPORTER_OTLP_COMPRESSION should work""" OTLPSpanExporterForTesting(insecure=True) mock_insecure_channel.assert_called_once_with( "localhost:4317", compression=Compression.Gzip, options=( ( "grpc.primary_user_agent", "OTel-OTLP-Exporter-Python/" + __version__, ), ), ) def test_shutdown(self): add_TraceServiceServicer_to_server( TraceServiceServicerWithExportParams(StatusCode.OK), self.server, ) self.assertEqual( self.exporter.export([self.span]), SpanExportResult.SUCCESS ) self.exporter.shutdown() with self.assertLogs(level=WARNING) as warning: self.assertEqual( self.exporter.export([self.span]), SpanExportResult.FAILURE ) self.assertEqual( warning.records[0].message, "Exporter already shutdown, ignoring batch", ) @unittest.skipIf( system() == "Windows", "For gRPC + windows there's some added delay in the RPCs which breaks the assertion over amount of time passed.", ) def test_shutdown_interrupts_export_retry_backoff(self): add_TraceServiceServicer_to_server( TraceServiceServicerWithExportParams( StatusCode.UNAVAILABLE, ), self.server, ) export_thread = ThreadWithReturnValue( target=self.exporter.export, args=([self.span],) ) with self.assertLogs(level=WARNING) as warning: begin_wait = time.time() export_thread.start() # Wait a bit for export to fail and the backoff sleep to start time.sleep(0.05) # The code should now be in a 1 second backoff. # pylint: disable=protected-access self.assertFalse(self.exporter._shutdown_in_progress.is_set()) self.exporter.shutdown() self.assertTrue(self.exporter._shutdown_in_progress.is_set()) export_result = export_thread.join() end_wait = time.time() self.assertEqual(export_result, SpanExportResult.FAILURE) # Shutdown should have interrupted the sleep. self.assertTrue(end_wait - begin_wait < 0.2) self.assertEqual( warning.records[1].message, "Shutdown in progress, aborting retry.", ) def test_export_over_closed_grpc_channel(self): # pylint: disable=protected-access add_TraceServiceServicer_to_server( TraceServiceServicerWithExportParams(StatusCode.OK), self.server, ) self.exporter.export([self.span]) self.exporter.shutdown() data = self.exporter._translate_data([self.span]) with self.assertRaises(ValueError) as err: self.exporter._client.Export(request=data) self.assertEqual( str(err.exception), "Cannot invoke RPC on closed channel!" ) @unittest.skipIf( system() == "Windows", "For gRPC + windows there's some added delay in the RPCs which breaks the assertion over amount of time passed.", ) def test_retry_info_is_respected(self): mock_trace_service = TraceServiceServicerWithExportParams( StatusCode.UNAVAILABLE, optional_retry_nanos=200000000, # .2 seconds ) add_TraceServiceServicer_to_server( mock_trace_service, self.server, ) exporter = OTLPSpanExporterForTesting(insecure=True, timeout=10) before = time.time() self.assertEqual( exporter.export([self.span]), SpanExportResult.FAILURE, ) after = time.time() self.assertEqual(mock_trace_service.num_requests, 6) # 1 second plus wiggle room so the test passes consistently. self.assertAlmostEqual(after - before, 1, 1) @unittest.skipIf( system() == "Windows", "For gRPC + windows there's some added delay in the RPCs which breaks the assertion over amount of time passed.", ) def test_retry_not_made_if_would_exceed_timeout(self): mock_trace_service = TraceServiceServicerWithExportParams( StatusCode.UNAVAILABLE ) add_TraceServiceServicer_to_server( mock_trace_service, self.server, ) exporter = OTLPSpanExporterForTesting(insecure=True, timeout=4) before = time.time() self.assertEqual( exporter.export([self.span]), SpanExportResult.FAILURE, ) after = time.time() # Our retry starts with a 1 second backoff then doubles. # First call at time 0, second at time 1, third at time 3, fourth would exceed timeout. self.assertEqual(mock_trace_service.num_requests, 3) # There's a +/-20% jitter on each backoff. self.assertTrue(2.35 < after - before < 3.65) @unittest.skipIf( system() == "Windows", "For gRPC + windows there's some added delay in the RPCs which breaks the assertion over amount of time passed.", ) def test_timeout_set_correctly(self): mock_trace_service = TraceServiceServicerWithExportParams( StatusCode.UNAVAILABLE, optional_export_sleep=0.25 ) add_TraceServiceServicer_to_server( mock_trace_service, self.server, ) exporter = OTLPSpanExporterForTesting(insecure=True, timeout=1.4) # Should timeout after 1.4 seconds. First attempt takes .25 seconds # Then a 1 second sleep, then deadline exceeded after .15 seconds, # mid way through second call. with self.assertLogs(level=WARNING) as warning: before = time.time() # Eliminate the jitter. with patch("random.uniform", return_value=1): self.assertEqual( exporter.export([self.span]), SpanExportResult.FAILURE, ) after = time.time() self.assertEqual( "Failed to export traces to localhost:4317, error code: StatusCode.DEADLINE_EXCEEDED", warning.records[-1].message, ) self.assertEqual(mock_trace_service.num_requests, 2) self.assertAlmostEqual(after - before, 1.4, 1) def test_otlp_headers_from_env(self): # pylint: disable=protected-access # This ensures that there is no other header than standard user-agent. self.assertEqual( self.exporter._headers, (), ) def test_permanent_failure(self): with self.assertLogs(level=WARNING) as warning: add_TraceServiceServicer_to_server( TraceServiceServicerWithExportParams( StatusCode.ALREADY_EXISTS ), self.server, ) self.assertEqual( self.exporter.export([self.span]), SpanExportResult.FAILURE ) self.assertEqual( warning.records[-1].message, "Failed to export traces to localhost:4317, error code: StatusCode.ALREADY_EXISTS", ) test_otlp_metrics_exporter.py000066400000000000000000000714331511654350100366410ustar00rootroot00000000000000python-opentelemetry-1.39.1/exporter/opentelemetry-exporter-otlp-proto-grpc/tests# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # pylint: disable=too-many-lines from logging import WARNING from os import environ from os.path import dirname from typing import List from unittest import TestCase from unittest.mock import patch from grpc import ChannelCredentials, Compression from opentelemetry.exporter.otlp.proto.grpc.metric_exporter import ( OTLPMetricExporter, ) from opentelemetry.exporter.otlp.proto.grpc.version import __version__ from opentelemetry.proto.common.v1.common_pb2 import InstrumentationScope from opentelemetry.sdk.environment_variables import ( OTEL_EXPORTER_OTLP_COMPRESSION, OTEL_EXPORTER_OTLP_METRICS_CERTIFICATE, OTEL_EXPORTER_OTLP_METRICS_CLIENT_CERTIFICATE, OTEL_EXPORTER_OTLP_METRICS_CLIENT_KEY, OTEL_EXPORTER_OTLP_METRICS_COMPRESSION, OTEL_EXPORTER_OTLP_METRICS_DEFAULT_HISTOGRAM_AGGREGATION, OTEL_EXPORTER_OTLP_METRICS_ENDPOINT, OTEL_EXPORTER_OTLP_METRICS_HEADERS, OTEL_EXPORTER_OTLP_METRICS_INSECURE, OTEL_EXPORTER_OTLP_METRICS_TEMPORALITY_PREFERENCE, OTEL_EXPORTER_OTLP_METRICS_TIMEOUT, ) from opentelemetry.sdk.metrics import ( Counter, Histogram, ObservableCounter, ObservableGauge, ObservableUpDownCounter, UpDownCounter, ) from opentelemetry.sdk.metrics.export import ( AggregationTemporality, Gauge, Metric, MetricsData, NumberDataPoint, ResourceMetrics, ScopeMetrics, ) from opentelemetry.sdk.metrics.view import ( ExplicitBucketHistogramAggregation, ExponentialBucketHistogramAggregation, ) from opentelemetry.sdk.resources import Resource from opentelemetry.sdk.util.instrumentation import ( InstrumentationScope as SDKInstrumentationScope, ) from opentelemetry.test.metrictestutil import _generate_sum THIS_DIR = dirname(__file__) class TestOTLPMetricExporter(TestCase): # pylint: disable=too-many-public-methods def setUp(self): self.exporter = OTLPMetricExporter() self.metrics = { "sum_int": MetricsData( resource_metrics=[ ResourceMetrics( resource=Resource( attributes={"a": 1, "b": False}, schema_url="resource_schema_url", ), scope_metrics=[ ScopeMetrics( scope=SDKInstrumentationScope( name="first_name", version="first_version", schema_url="insrumentation_scope_schema_url", ), metrics=[_generate_sum("sum_int", 33)], schema_url="instrumentation_scope_schema_url", ) ], schema_url="resource_schema_url", ) ] ) } def test_exporting(self): # pylint: disable=protected-access self.assertEqual(self.exporter._exporting, "metrics") @patch.dict( "os.environ", {OTEL_EXPORTER_OTLP_METRICS_TEMPORALITY_PREFERENCE: "DELTA"}, ) def test_preferred_temporality(self): # pylint: disable=protected-access exporter = OTLPMetricExporter( preferred_temporality={Counter: AggregationTemporality.CUMULATIVE} ) self.assertEqual( exporter._preferred_temporality[Counter], AggregationTemporality.CUMULATIVE, ) self.assertEqual( exporter._preferred_temporality[UpDownCounter], AggregationTemporality.CUMULATIVE, ) self.assertEqual( exporter._preferred_temporality[Histogram], AggregationTemporality.DELTA, ) self.assertEqual( exporter._preferred_temporality[ObservableCounter], AggregationTemporality.DELTA, ) self.assertEqual( exporter._preferred_temporality[ObservableUpDownCounter], AggregationTemporality.CUMULATIVE, ) self.assertEqual( exporter._preferred_temporality[ObservableGauge], AggregationTemporality.CUMULATIVE, ) @patch.dict( "os.environ", { OTEL_EXPORTER_OTLP_METRICS_ENDPOINT: "collector:4317", OTEL_EXPORTER_OTLP_METRICS_HEADERS: " key1=value1,KEY2 = value=2", OTEL_EXPORTER_OTLP_METRICS_TIMEOUT: "10", OTEL_EXPORTER_OTLP_METRICS_COMPRESSION: "gzip", }, ) @patch( "opentelemetry.exporter.otlp.proto.grpc.exporter.OTLPExporterMixin.__init__" ) def test_env_variables(self, mock_exporter_mixin): OTLPMetricExporter() self.assertTrue(len(mock_exporter_mixin.call_args_list) == 1) _, kwargs = mock_exporter_mixin.call_args_list[0] self.assertEqual(kwargs["endpoint"], "collector:4317") self.assertEqual(kwargs["headers"], " key1=value1,KEY2 = value=2") self.assertEqual(kwargs["timeout"], 10) self.assertEqual(kwargs["compression"], Compression.Gzip) self.assertIsNone(kwargs["credentials"]) @patch.dict( "os.environ", { OTEL_EXPORTER_OTLP_METRICS_ENDPOINT: "collector:4317", OTEL_EXPORTER_OTLP_METRICS_CERTIFICATE: THIS_DIR + "/fixtures/test.cert", OTEL_EXPORTER_OTLP_METRICS_CLIENT_CERTIFICATE: THIS_DIR + "/fixtures/test-client-cert.pem", OTEL_EXPORTER_OTLP_METRICS_CLIENT_KEY: THIS_DIR + "/fixtures/test-client-key.pem", OTEL_EXPORTER_OTLP_METRICS_HEADERS: " key1=value1,KEY2 = value=2", OTEL_EXPORTER_OTLP_METRICS_TIMEOUT: "10", OTEL_EXPORTER_OTLP_METRICS_COMPRESSION: "gzip", }, ) @patch( "opentelemetry.exporter.otlp.proto.grpc.exporter.OTLPExporterMixin.__init__" ) def test_env_variables_with_client_certificates(self, mock_exporter_mixin): OTLPMetricExporter() self.assertTrue(len(mock_exporter_mixin.call_args_list) == 1) _, kwargs = mock_exporter_mixin.call_args_list[0] self.assertEqual(kwargs["endpoint"], "collector:4317") self.assertEqual(kwargs["headers"], " key1=value1,KEY2 = value=2") self.assertEqual(kwargs["timeout"], 10) self.assertEqual(kwargs["compression"], Compression.Gzip) self.assertIsNotNone(kwargs["credentials"]) self.assertIsInstance(kwargs["credentials"], ChannelCredentials) @patch.dict( "os.environ", { OTEL_EXPORTER_OTLP_METRICS_ENDPOINT: "collector:4317", OTEL_EXPORTER_OTLP_METRICS_CERTIFICATE: THIS_DIR + "/fixtures/test.cert", OTEL_EXPORTER_OTLP_METRICS_HEADERS: " key1=value1,KEY2 = value=2", OTEL_EXPORTER_OTLP_METRICS_TIMEOUT: "10", OTEL_EXPORTER_OTLP_METRICS_COMPRESSION: "gzip", }, ) @patch( "opentelemetry.exporter.otlp.proto.grpc.exporter.OTLPExporterMixin.__init__" ) @patch("logging.Logger.error") def test_env_variables_with_only_certificate( self, mock_logger_error, mock_exporter_mixin ): OTLPMetricExporter() self.assertTrue(len(mock_exporter_mixin.call_args_list) == 1) _, kwargs = mock_exporter_mixin.call_args_list[0] self.assertEqual(kwargs["endpoint"], "collector:4317") self.assertEqual(kwargs["headers"], " key1=value1,KEY2 = value=2") self.assertEqual(kwargs["timeout"], 10) self.assertEqual(kwargs["compression"], Compression.Gzip) self.assertIsNotNone(kwargs["credentials"]) self.assertIsInstance(kwargs["credentials"], ChannelCredentials) mock_logger_error.assert_not_called() @patch( "opentelemetry.exporter.otlp.proto.grpc.exporter.ssl_channel_credentials" ) @patch("opentelemetry.exporter.otlp.proto.grpc.exporter.secure_channel") # pylint: disable=unused-argument def test_no_credentials_error(self, mock_ssl_channel, mock_secure): OTLPMetricExporter(insecure=False) self.assertTrue(mock_ssl_channel.called) @patch.dict( "os.environ", {OTEL_EXPORTER_OTLP_METRICS_HEADERS: " key1=value1,KEY2 = VALUE=2 "}, ) @patch( "opentelemetry.exporter.otlp.proto.grpc.exporter.ssl_channel_credentials" ) @patch("opentelemetry.exporter.otlp.proto.grpc.exporter.secure_channel") # pylint: disable=unused-argument def test_otlp_headers_from_env(self, mock_ssl_channel, mock_secure): exporter = OTLPMetricExporter() # pylint: disable=protected-access self.assertEqual( exporter._headers, ( ("key1", "value1"), ("key2", "VALUE=2"), ), ) exporter = OTLPMetricExporter( headers=(("key3", "value3"), ("key4", "value4")) ) # pylint: disable=protected-access self.assertEqual( exporter._headers, ( ("key3", "value3"), ("key4", "value4"), ), ) @patch.dict( "os.environ", {OTEL_EXPORTER_OTLP_METRICS_INSECURE: "True"}, ) @patch("opentelemetry.exporter.otlp.proto.grpc.exporter.insecure_channel") # pylint: disable=unused-argument def test_otlp_insecure_from_env(self, mock_insecure): OTLPMetricExporter() # pylint: disable=protected-access self.assertTrue(mock_insecure.called) self.assertEqual( 1, mock_insecure.call_count, f"expected {mock_insecure} to be called", ) # pylint: disable=no-self-use @patch("opentelemetry.exporter.otlp.proto.grpc.exporter.insecure_channel") @patch.dict("os.environ", {OTEL_EXPORTER_OTLP_COMPRESSION: "gzip"}) def test_otlp_exporter_otlp_compression_kwarg(self, mock_insecure_channel): """Specifying kwarg should take precedence over env""" OTLPMetricExporter( insecure=True, compression=Compression.NoCompression ) mock_insecure_channel.assert_called_once_with( "localhost:4317", compression=Compression.NoCompression, options=( ( "grpc.primary_user_agent", "OTel-OTLP-Exporter-Python/" + __version__, ), ), ) # pylint: disable=no-self-use @patch("opentelemetry.exporter.otlp.proto.grpc.exporter.insecure_channel") def test_otlp_exporter_otlp_channel_options_kwarg( self, mock_insecure_channel ): OTLPMetricExporter( insecure=True, channel_options=(("some", "options"),) ) mock_insecure_channel.assert_called_once_with( "localhost:4317", compression=Compression.NoCompression, options=( ( "grpc.primary_user_agent", "OTel-OTLP-Exporter-Python/" + __version__, ), ("some", "options"), ), ) def test_split_metrics_data_many_data_points(self): # GIVEN metrics_data = MetricsData( resource_metrics=[ _resource_metrics( index=1, scope_metrics=[ _scope_metrics( index=1, metrics=[ _gauge( index=1, data_points=[ _number_data_point(11), _number_data_point(12), _number_data_point(13), ], ), ], ), ], ), ] ) # WHEN split_metrics_data: List[MetricsData] = list( # pylint: disable=protected-access OTLPMetricExporter(max_export_batch_size=2)._split_metrics_data( metrics_data=metrics_data, ) ) # THEN self.assertEqual( [ MetricsData( resource_metrics=[ _resource_metrics( index=1, scope_metrics=[ _scope_metrics( index=1, metrics=[ _gauge( index=1, data_points=[ _number_data_point(11), _number_data_point(12), ], ), ], ), ], ), ] ), MetricsData( resource_metrics=[ _resource_metrics( index=1, scope_metrics=[ _scope_metrics( index=1, metrics=[ _gauge( index=1, data_points=[ _number_data_point(13), ], ), ], ), ], ), ] ), ], split_metrics_data, ) def test_split_metrics_data_nb_data_points_equal_batch_size(self): # GIVEN metrics_data = MetricsData( resource_metrics=[ _resource_metrics( index=1, scope_metrics=[ _scope_metrics( index=1, metrics=[ _gauge( index=1, data_points=[ _number_data_point(11), _number_data_point(12), _number_data_point(13), ], ), ], ), ], ), ] ) # WHEN split_metrics_data: List[MetricsData] = list( # pylint: disable=protected-access OTLPMetricExporter(max_export_batch_size=3)._split_metrics_data( metrics_data=metrics_data, ) ) # THEN self.assertEqual( [ MetricsData( resource_metrics=[ _resource_metrics( index=1, scope_metrics=[ _scope_metrics( index=1, metrics=[ _gauge( index=1, data_points=[ _number_data_point(11), _number_data_point(12), _number_data_point(13), ], ), ], ), ], ), ] ), ], split_metrics_data, ) def test_split_metrics_data_many_resources_scopes_metrics(self): # GIVEN metrics_data = MetricsData( resource_metrics=[ _resource_metrics( index=1, scope_metrics=[ _scope_metrics( index=1, metrics=[ _gauge( index=1, data_points=[ _number_data_point(11), ], ), _gauge( index=2, data_points=[ _number_data_point(12), ], ), ], ), _scope_metrics( index=2, metrics=[ _gauge( index=3, data_points=[ _number_data_point(13), ], ), ], ), ], ), _resource_metrics( index=2, scope_metrics=[ _scope_metrics( index=3, metrics=[ _gauge( index=4, data_points=[ _number_data_point(14), ], ), ], ), ], ), ] ) # WHEN split_metrics_data: List[MetricsData] = list( # pylint: disable=protected-access OTLPMetricExporter(max_export_batch_size=2)._split_metrics_data( metrics_data=metrics_data, ) ) # THEN self.assertEqual( [ MetricsData( resource_metrics=[ _resource_metrics( index=1, scope_metrics=[ _scope_metrics( index=1, metrics=[ _gauge( index=1, data_points=[ _number_data_point(11), ], ), _gauge( index=2, data_points=[ _number_data_point(12), ], ), ], ), ], ), ] ), MetricsData( resource_metrics=[ _resource_metrics( index=1, scope_metrics=[ _scope_metrics( index=2, metrics=[ _gauge( index=3, data_points=[ _number_data_point(13), ], ), ], ), ], ), _resource_metrics( index=2, scope_metrics=[ _scope_metrics( index=3, metrics=[ _gauge( index=4, data_points=[ _number_data_point(14), ], ), ], ), ], ), ] ), ], split_metrics_data, ) @patch("opentelemetry.exporter.otlp.proto.grpc.exporter.secure_channel") def test_insecure_https_endpoint(self, mock_secure_channel): OTLPMetricExporter(endpoint="https://ab.c:123", insecure=True) mock_secure_channel.assert_called() def test_aggregation_temporality(self): # pylint: disable=protected-access otlp_metric_exporter = OTLPMetricExporter() for ( temporality ) in otlp_metric_exporter._preferred_temporality.values(): self.assertEqual(temporality, AggregationTemporality.CUMULATIVE) with patch.dict( environ, {OTEL_EXPORTER_OTLP_METRICS_TEMPORALITY_PREFERENCE: "CUMULATIVE"}, ): otlp_metric_exporter = OTLPMetricExporter() for ( temporality ) in otlp_metric_exporter._preferred_temporality.values(): self.assertEqual( temporality, AggregationTemporality.CUMULATIVE ) with patch.dict( environ, {OTEL_EXPORTER_OTLP_METRICS_TEMPORALITY_PREFERENCE: "ABC"} ): with self.assertLogs(level=WARNING): otlp_metric_exporter = OTLPMetricExporter() for ( temporality ) in otlp_metric_exporter._preferred_temporality.values(): self.assertEqual( temporality, AggregationTemporality.CUMULATIVE ) with patch.dict( environ, {OTEL_EXPORTER_OTLP_METRICS_TEMPORALITY_PREFERENCE: "DELTA"}, ): otlp_metric_exporter = OTLPMetricExporter() self.assertEqual( otlp_metric_exporter._preferred_temporality[Counter], AggregationTemporality.DELTA, ) self.assertEqual( otlp_metric_exporter._preferred_temporality[UpDownCounter], AggregationTemporality.CUMULATIVE, ) self.assertEqual( otlp_metric_exporter._preferred_temporality[Histogram], AggregationTemporality.DELTA, ) self.assertEqual( otlp_metric_exporter._preferred_temporality[ObservableCounter], AggregationTemporality.DELTA, ) self.assertEqual( otlp_metric_exporter._preferred_temporality[ ObservableUpDownCounter ], AggregationTemporality.CUMULATIVE, ) self.assertEqual( otlp_metric_exporter._preferred_temporality[ObservableGauge], AggregationTemporality.CUMULATIVE, ) with patch.dict( environ, {OTEL_EXPORTER_OTLP_METRICS_TEMPORALITY_PREFERENCE: "LOWMEMORY"}, ): otlp_metric_exporter = OTLPMetricExporter() self.assertEqual( otlp_metric_exporter._preferred_temporality[Counter], AggregationTemporality.DELTA, ) self.assertEqual( otlp_metric_exporter._preferred_temporality[UpDownCounter], AggregationTemporality.CUMULATIVE, ) self.assertEqual( otlp_metric_exporter._preferred_temporality[Histogram], AggregationTemporality.DELTA, ) self.assertEqual( otlp_metric_exporter._preferred_temporality[ObservableCounter], AggregationTemporality.CUMULATIVE, ) self.assertEqual( otlp_metric_exporter._preferred_temporality[ ObservableUpDownCounter ], AggregationTemporality.CUMULATIVE, ) self.assertEqual( otlp_metric_exporter._preferred_temporality[ObservableGauge], AggregationTemporality.CUMULATIVE, ) def test_exponential_explicit_bucket_histogram(self): self.assertIsInstance( # pylint: disable=protected-access OTLPMetricExporter()._preferred_aggregation[Histogram], ExplicitBucketHistogramAggregation, ) with patch.dict( environ, { OTEL_EXPORTER_OTLP_METRICS_DEFAULT_HISTOGRAM_AGGREGATION: "base2_exponential_bucket_histogram" }, ): self.assertIsInstance( # pylint: disable=protected-access OTLPMetricExporter()._preferred_aggregation[Histogram], ExponentialBucketHistogramAggregation, ) with patch.dict( environ, {OTEL_EXPORTER_OTLP_METRICS_DEFAULT_HISTOGRAM_AGGREGATION: "abc"}, ): with self.assertLogs(level=WARNING) as log: self.assertIsInstance( # pylint: disable=protected-access OTLPMetricExporter()._preferred_aggregation[Histogram], ExplicitBucketHistogramAggregation, ) self.assertIn( ( "Invalid value for OTEL_EXPORTER_OTLP_METRICS_DEFAULT_" "HISTOGRAM_AGGREGATION: abc, using explicit bucket " "histogram aggregation" ), log.output[0], ) with patch.dict( environ, { OTEL_EXPORTER_OTLP_METRICS_DEFAULT_HISTOGRAM_AGGREGATION: "explicit_bucket_histogram" }, ): self.assertIsInstance( # pylint: disable=protected-access OTLPMetricExporter()._preferred_aggregation[Histogram], ExplicitBucketHistogramAggregation, ) def test_preferred_aggregation_override(self): histogram_aggregation = ExplicitBucketHistogramAggregation( boundaries=[0.05, 0.1, 0.5, 1, 5, 10], ) exporter = OTLPMetricExporter( preferred_aggregation={ Histogram: histogram_aggregation, }, ) self.assertEqual( # pylint: disable=protected-access exporter._preferred_aggregation[Histogram], histogram_aggregation, ) def _resource_metrics( index: int, scope_metrics: List[ScopeMetrics] ) -> ResourceMetrics: return ResourceMetrics( resource=Resource( attributes={"a": index}, schema_url=f"resource_url_{index}", ), schema_url=f"resource_url_{index}", scope_metrics=scope_metrics, ) def _scope_metrics(index: int, metrics: List[Metric]) -> ScopeMetrics: return ScopeMetrics( scope=InstrumentationScope(name=f"scope_{index}"), schema_url=f"scope_url_{index}", metrics=metrics, ) def _gauge(index: int, data_points: List[NumberDataPoint]) -> Metric: return Metric( name=f"gauge_{index}", description="description", unit="unit", data=Gauge(data_points=data_points), ) def _number_data_point(value: int) -> NumberDataPoint: return NumberDataPoint( attributes={"a": 1, "b": True}, start_time_unix_nano=1641946015139533244, time_unix_nano=1641946016139533244, value=value, ) test_otlp_trace_exporter.py000066400000000000000000000777621511654350100363040ustar00rootroot00000000000000python-opentelemetry-1.39.1/exporter/opentelemetry-exporter-otlp-proto-grpc/tests# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # pylint: disable=too-many-lines import os from unittest import TestCase from unittest.mock import Mock, PropertyMock, patch from grpc import ChannelCredentials, Compression from opentelemetry.attributes import BoundedAttributes from opentelemetry.exporter.otlp.proto.common._internal import ( _encode_key_value, ) from opentelemetry.exporter.otlp.proto.grpc.trace_exporter import ( OTLPSpanExporter, ) from opentelemetry.exporter.otlp.proto.grpc.version import __version__ from opentelemetry.proto.collector.trace.v1.trace_service_pb2 import ( ExportTraceServiceRequest, ) from opentelemetry.proto.common.v1.common_pb2 import ( AnyValue, ArrayValue, KeyValue, ) from opentelemetry.proto.common.v1.common_pb2 import ( InstrumentationScope as PB2InstrumentationScope, ) from opentelemetry.proto.resource.v1.resource_pb2 import ( Resource as OTLPResource, ) from opentelemetry.proto.trace.v1.trace_pb2 import ( ResourceSpans, ScopeSpans, Status, ) from opentelemetry.proto.trace.v1.trace_pb2 import Span as OTLPSpan from opentelemetry.sdk.environment_variables import ( OTEL_EXPORTER_OTLP_COMPRESSION, OTEL_EXPORTER_OTLP_TRACES_CERTIFICATE, OTEL_EXPORTER_OTLP_TRACES_CLIENT_CERTIFICATE, OTEL_EXPORTER_OTLP_TRACES_CLIENT_KEY, OTEL_EXPORTER_OTLP_TRACES_COMPRESSION, OTEL_EXPORTER_OTLP_TRACES_ENDPOINT, OTEL_EXPORTER_OTLP_TRACES_HEADERS, OTEL_EXPORTER_OTLP_TRACES_INSECURE, OTEL_EXPORTER_OTLP_TRACES_TIMEOUT, ) from opentelemetry.sdk.resources import Resource as SDKResource from opentelemetry.sdk.trace import Status as SDKStatus from opentelemetry.sdk.trace import StatusCode as SDKStatusCode from opentelemetry.sdk.trace import TracerProvider, _Span from opentelemetry.sdk.trace.export import ( SimpleSpanProcessor, ) from opentelemetry.sdk.util.instrumentation import InstrumentationScope from opentelemetry.test.spantestutil import ( get_span_with_dropped_attributes_events_links, ) THIS_DIR = os.path.dirname(__file__) class TestOTLPSpanExporter(TestCase): # pylint: disable=too-many-public-methods def setUp(self): tracer_provider = TracerProvider() self.exporter = OTLPSpanExporter(insecure=True) tracer_provider.add_span_processor(SimpleSpanProcessor(self.exporter)) self.tracer = tracer_provider.get_tracer(__name__) event_mock = Mock( **{ "timestamp": 1591240820506462784, "attributes": BoundedAttributes( attributes={"a": 1, "b": False} ), } ) type(event_mock).name = PropertyMock(return_value="a") type(event_mock).dropped_attributes = PropertyMock(return_value=0) self.span = _Span( "a", context=Mock( **{ "trace_state": {"a": "b", "c": "d"}, "span_id": 10217189687419569865, "trace_id": 67545097771067222548457157018666467027, } ), resource=SDKResource({"a": 1, "b": False}), parent=Mock(**{"span_id": 12345}), attributes=BoundedAttributes(attributes={"a": 1, "b": True}), events=[event_mock], links=[ Mock( **{ "context.trace_id": 1, "context.span_id": 2, "attributes": BoundedAttributes( attributes={"a": 1, "b": False} ), "dropped_attributes": 0, "kind": OTLPSpan.SpanKind.SPAN_KIND_INTERNAL, # pylint: disable=no-member } ) ], instrumentation_scope=InstrumentationScope( name="name", version="version" ), ) self.span2 = _Span( "b", context=Mock( **{ "trace_state": {"a": "b", "c": "d"}, "span_id": 10217189687419569865, "trace_id": 67545097771067222548457157018666467027, } ), resource=SDKResource({"a": 2, "b": False}), parent=Mock(**{"span_id": 12345}), instrumentation_scope=InstrumentationScope( name="name", version="version" ), ) self.span3 = _Span( "c", context=Mock( **{ "trace_state": {"a": "b", "c": "d"}, "span_id": 10217189687419569865, "trace_id": 67545097771067222548457157018666467027, } ), resource=SDKResource({"a": 1, "b": False}), parent=Mock(**{"span_id": 12345}), instrumentation_scope=InstrumentationScope( name="name2", version="version2" ), ) self.span.start() self.span.end() self.span2.start() self.span2.end() self.span3.start() self.span3.end() def test_exporting(self): # pylint: disable=protected-access self.assertEqual(self.exporter._exporting, "traces") @patch.dict( "os.environ", { OTEL_EXPORTER_OTLP_TRACES_ENDPOINT: "collector:4317", OTEL_EXPORTER_OTLP_TRACES_HEADERS: " key1=value1,KEY2 = value=2", OTEL_EXPORTER_OTLP_TRACES_TIMEOUT: "10", OTEL_EXPORTER_OTLP_TRACES_COMPRESSION: "gzip", }, ) @patch( "opentelemetry.exporter.otlp.proto.grpc.exporter.OTLPExporterMixin.__init__" ) def test_env_variables(self, mock_exporter_mixin): OTLPSpanExporter() self.assertTrue(len(mock_exporter_mixin.call_args_list) == 1) _, kwargs = mock_exporter_mixin.call_args_list[0] self.assertEqual(kwargs["endpoint"], "collector:4317") self.assertEqual(kwargs["headers"], " key1=value1,KEY2 = value=2") self.assertEqual(kwargs["timeout"], 10) self.assertEqual(kwargs["compression"], Compression.Gzip) self.assertIsNone(kwargs["credentials"]) @patch.dict( "os.environ", { OTEL_EXPORTER_OTLP_TRACES_ENDPOINT: "collector:4317", OTEL_EXPORTER_OTLP_TRACES_CERTIFICATE: THIS_DIR + "/fixtures/test.cert", OTEL_EXPORTER_OTLP_TRACES_CLIENT_CERTIFICATE: THIS_DIR + "/fixtures/test-client-cert.pem", OTEL_EXPORTER_OTLP_TRACES_CLIENT_KEY: THIS_DIR + "/fixtures/test-client-key.pem", OTEL_EXPORTER_OTLP_TRACES_HEADERS: " key1=value1,KEY2 = value=2", OTEL_EXPORTER_OTLP_TRACES_TIMEOUT: "10", OTEL_EXPORTER_OTLP_TRACES_COMPRESSION: "gzip", }, ) @patch( "opentelemetry.exporter.otlp.proto.grpc.exporter.OTLPExporterMixin.__init__" ) def test_env_variables_with_client_certificates(self, mock_exporter_mixin): OTLPSpanExporter() self.assertTrue(len(mock_exporter_mixin.call_args_list) == 1) _, kwargs = mock_exporter_mixin.call_args_list[0] self.assertEqual(kwargs["endpoint"], "collector:4317") self.assertEqual(kwargs["headers"], " key1=value1,KEY2 = value=2") self.assertEqual(kwargs["timeout"], 10) self.assertEqual(kwargs["compression"], Compression.Gzip) self.assertIsNotNone(kwargs["credentials"]) self.assertIsInstance(kwargs["credentials"], ChannelCredentials) @patch.dict( "os.environ", { OTEL_EXPORTER_OTLP_TRACES_ENDPOINT: "collector:4317", OTEL_EXPORTER_OTLP_TRACES_CERTIFICATE: THIS_DIR + "/fixtures/test.cert", OTEL_EXPORTER_OTLP_TRACES_HEADERS: " key1=value1,KEY2 = value=2", OTEL_EXPORTER_OTLP_TRACES_TIMEOUT: "10", OTEL_EXPORTER_OTLP_TRACES_COMPRESSION: "gzip", }, ) @patch( "opentelemetry.exporter.otlp.proto.grpc.exporter.OTLPExporterMixin.__init__" ) @patch("logging.Logger.error") def test_env_variables_with_only_certificate( self, mock_logger_error, mock_exporter_mixin ): OTLPSpanExporter() self.assertTrue(len(mock_exporter_mixin.call_args_list) == 1) _, kwargs = mock_exporter_mixin.call_args_list[0] self.assertEqual(kwargs["endpoint"], "collector:4317") self.assertEqual(kwargs["headers"], " key1=value1,KEY2 = value=2") self.assertEqual(kwargs["timeout"], 10) self.assertEqual(kwargs["compression"], Compression.Gzip) self.assertIsNotNone(kwargs["credentials"]) self.assertIsInstance(kwargs["credentials"], ChannelCredentials) mock_logger_error.assert_not_called() @patch( "opentelemetry.exporter.otlp.proto.grpc.exporter.ssl_channel_credentials" ) @patch("opentelemetry.exporter.otlp.proto.grpc.exporter.secure_channel") # pylint: disable=unused-argument def test_no_credentials_error(self, mock_ssl_channel, mock_secure): OTLPSpanExporter(insecure=False) self.assertTrue(mock_ssl_channel.called) @patch.dict( "os.environ", {OTEL_EXPORTER_OTLP_TRACES_HEADERS: " key1=value1,KEY2 = VALUE=2 "}, ) @patch( "opentelemetry.exporter.otlp.proto.grpc.exporter.ssl_channel_credentials" ) @patch("opentelemetry.exporter.otlp.proto.grpc.exporter.secure_channel") # pylint: disable=unused-argument def test_otlp_headers_from_env(self, mock_ssl_channel, mock_secure): exporter = OTLPSpanExporter() # pylint: disable=protected-access self.assertEqual( exporter._headers, ( ("key1", "value1"), ("key2", "VALUE=2"), ), ) exporter = OTLPSpanExporter( headers=(("key3", "value3"), ("key4", "value4")) ) # pylint: disable=protected-access self.assertEqual( exporter._headers, ( ("key3", "value3"), ("key4", "value4"), ), ) exporter = OTLPSpanExporter( headers={"key5": "value5", "key6": "value6"} ) # pylint: disable=protected-access self.assertEqual( exporter._headers, ( ("key5", "value5"), ("key6", "value6"), ), ) @patch.dict( "os.environ", {OTEL_EXPORTER_OTLP_TRACES_INSECURE: "True"}, ) @patch("opentelemetry.exporter.otlp.proto.grpc.exporter.insecure_channel") # pylint: disable=unused-argument def test_otlp_insecure_from_env(self, mock_insecure): OTLPSpanExporter() # pylint: disable=protected-access self.assertTrue(mock_insecure.called) self.assertEqual( 1, mock_insecure.call_count, f"expected {mock_insecure} to be called", ) # pylint: disable=no-self-use @patch("opentelemetry.exporter.otlp.proto.grpc.exporter.insecure_channel") @patch.dict("os.environ", {OTEL_EXPORTER_OTLP_COMPRESSION: "gzip"}) def test_otlp_exporter_otlp_compression_kwarg(self, mock_insecure_channel): """Specifying kwarg should take precedence over env""" OTLPSpanExporter(insecure=True, compression=Compression.NoCompression) mock_insecure_channel.assert_called_once_with( "localhost:4317", compression=Compression.NoCompression, options=( ( "grpc.primary_user_agent", "OTel-OTLP-Exporter-Python/" + __version__, ), ), ) # pylint: disable=no-self-use @patch("opentelemetry.exporter.otlp.proto.grpc.exporter.insecure_channel") @patch.dict( "os.environ", {OTEL_EXPORTER_OTLP_TRACES_COMPRESSION: "gzip"}, ) def test_otlp_exporter_otlp_compression_precendence( self, mock_insecure_channel ): """OTEL_EXPORTER_OTLP_TRACES_COMPRESSION as higher priority than OTEL_EXPORTER_OTLP_COMPRESSION """ OTLPSpanExporter(insecure=True) mock_insecure_channel.assert_called_once_with( "localhost:4317", compression=Compression.Gzip, options=( ( "grpc.primary_user_agent", "OTel-OTLP-Exporter-Python/" + __version__, ), ), ) # pylint: disable=no-self-use @patch("opentelemetry.exporter.otlp.proto.grpc.exporter.insecure_channel") def test_otlp_exporter_otlp_channel_options_kwarg( self, mock_insecure_channel ): OTLPSpanExporter(insecure=True, channel_options=(("some", "options"),)) mock_insecure_channel.assert_called_once_with( "localhost:4317", compression=Compression.NoCompression, options=( ( "grpc.primary_user_agent", "OTel-OTLP-Exporter-Python/" + __version__, ), ("some", "options"), ), ) def test_translate_spans(self): expected = ExportTraceServiceRequest( resource_spans=[ ResourceSpans( resource=OTLPResource( attributes=[ KeyValue(key="a", value=AnyValue(int_value=1)), KeyValue( key="b", value=AnyValue(bool_value=False) ), ] ), scope_spans=[ ScopeSpans( scope=PB2InstrumentationScope( name="name", version="version" ), spans=[ OTLPSpan( # pylint: disable=no-member name="a", start_time_unix_nano=self.span.start_time, end_time_unix_nano=self.span.end_time, trace_state="a=b,c=d", span_id=int.to_bytes( 10217189687419569865, 8, "big" ), trace_id=int.to_bytes( 67545097771067222548457157018666467027, 16, "big", ), parent_span_id=( b"\000\000\000\000\000\00009" ), kind=( OTLPSpan.SpanKind.SPAN_KIND_INTERNAL ), attributes=[ KeyValue( key="a", value=AnyValue(int_value=1), ), KeyValue( key="b", value=AnyValue(bool_value=True), ), ], events=[ OTLPSpan.Event( name="a", time_unix_nano=1591240820506462784, attributes=[ KeyValue( key="a", value=AnyValue( int_value=1 ), ), KeyValue( key="b", value=AnyValue( bool_value=False ), ), ], ) ], status=Status(code=0, message=""), links=[ OTLPSpan.Link( trace_id=int.to_bytes( 1, 16, "big" ), span_id=int.to_bytes(2, 8, "big"), attributes=[ KeyValue( key="a", value=AnyValue( int_value=1 ), ), KeyValue( key="b", value=AnyValue( bool_value=False ), ), ], flags=0x300, ) ], flags=0x300, ) ], ) ], ), ] ) # pylint: disable=protected-access self.assertEqual(expected, self.exporter._translate_data([self.span])) def test_translate_spans_multi(self): expected = ExportTraceServiceRequest( resource_spans=[ ResourceSpans( resource=OTLPResource( attributes=[ KeyValue(key="a", value=AnyValue(int_value=1)), KeyValue( key="b", value=AnyValue(bool_value=False) ), ] ), scope_spans=[ ScopeSpans( scope=PB2InstrumentationScope( name="name", version="version" ), spans=[ OTLPSpan( # pylint: disable=no-member name="a", start_time_unix_nano=self.span.start_time, end_time_unix_nano=self.span.end_time, trace_state="a=b,c=d", span_id=int.to_bytes( 10217189687419569865, 8, "big" ), trace_id=int.to_bytes( 67545097771067222548457157018666467027, 16, "big", ), parent_span_id=( b"\000\000\000\000\000\00009" ), kind=( OTLPSpan.SpanKind.SPAN_KIND_INTERNAL ), attributes=[ KeyValue( key="a", value=AnyValue(int_value=1), ), KeyValue( key="b", value=AnyValue(bool_value=True), ), ], events=[ OTLPSpan.Event( name="a", time_unix_nano=1591240820506462784, attributes=[ KeyValue( key="a", value=AnyValue( int_value=1 ), ), KeyValue( key="b", value=AnyValue( bool_value=False ), ), ], ) ], status=Status(code=0, message=""), links=[ OTLPSpan.Link( trace_id=int.to_bytes( 1, 16, "big" ), span_id=int.to_bytes(2, 8, "big"), attributes=[ KeyValue( key="a", value=AnyValue( int_value=1 ), ), KeyValue( key="b", value=AnyValue( bool_value=False ), ), ], flags=0x300, ) ], flags=0x300, ) ], ), ScopeSpans( scope=PB2InstrumentationScope( name="name2", version="version2" ), spans=[ OTLPSpan( # pylint: disable=no-member name="c", start_time_unix_nano=self.span3.start_time, end_time_unix_nano=self.span3.end_time, trace_state="a=b,c=d", span_id=int.to_bytes( 10217189687419569865, 8, "big" ), trace_id=int.to_bytes( 67545097771067222548457157018666467027, 16, "big", ), parent_span_id=( b"\000\000\000\000\000\00009" ), kind=( OTLPSpan.SpanKind.SPAN_KIND_INTERNAL ), status=Status(code=0, message=""), flags=0x300, ) ], ), ], ), ResourceSpans( resource=OTLPResource( attributes=[ KeyValue(key="a", value=AnyValue(int_value=2)), KeyValue( key="b", value=AnyValue(bool_value=False) ), ] ), scope_spans=[ ScopeSpans( scope=PB2InstrumentationScope( name="name", version="version" ), spans=[ OTLPSpan( # pylint: disable=no-member name="b", start_time_unix_nano=self.span2.start_time, end_time_unix_nano=self.span2.end_time, trace_state="a=b,c=d", span_id=int.to_bytes( 10217189687419569865, 8, "big" ), trace_id=int.to_bytes( 67545097771067222548457157018666467027, 16, "big", ), parent_span_id=( b"\000\000\000\000\000\00009" ), kind=( OTLPSpan.SpanKind.SPAN_KIND_INTERNAL ), status=Status(code=0, message=""), flags=0x300, ) ], ) ], ), ] ) # pylint: disable=protected-access self.assertEqual( expected, self.exporter._translate_data([self.span, self.span2, self.span3]), ) def _check_translated_status( self, translated: ExportTraceServiceRequest, code_expected: Status, ): status = translated.resource_spans[0].scope_spans[0].spans[0].status self.assertEqual( status.code, code_expected, ) def test_span_status_translate(self): # pylint: disable=protected-access,no-member unset = SDKStatus(status_code=SDKStatusCode.UNSET) ok = SDKStatus(status_code=SDKStatusCode.OK) error = SDKStatus(status_code=SDKStatusCode.ERROR) unset_translated = self.exporter._translate_data( [_create_span_with_status(unset)] ) ok_translated = self.exporter._translate_data( [_create_span_with_status(ok)] ) error_translated = self.exporter._translate_data( [_create_span_with_status(error)] ) self._check_translated_status( unset_translated, Status.STATUS_CODE_UNSET, ) self._check_translated_status( ok_translated, Status.STATUS_CODE_OK, ) self._check_translated_status( error_translated, Status.STATUS_CODE_ERROR, ) # pylint:disable=no-member def test_translate_key_values(self): bool_value = _encode_key_value("bool_type", False) self.assertTrue(isinstance(bool_value, KeyValue)) self.assertEqual(bool_value.key, "bool_type") self.assertTrue(isinstance(bool_value.value, AnyValue)) self.assertFalse(bool_value.value.bool_value) str_value = _encode_key_value("str_type", "str") self.assertTrue(isinstance(str_value, KeyValue)) self.assertEqual(str_value.key, "str_type") self.assertTrue(isinstance(str_value.value, AnyValue)) self.assertEqual(str_value.value.string_value, "str") int_value = _encode_key_value("int_type", 2) self.assertTrue(isinstance(int_value, KeyValue)) self.assertEqual(int_value.key, "int_type") self.assertTrue(isinstance(int_value.value, AnyValue)) self.assertEqual(int_value.value.int_value, 2) double_value = _encode_key_value("double_type", 3.2) self.assertTrue(isinstance(double_value, KeyValue)) self.assertEqual(double_value.key, "double_type") self.assertTrue(isinstance(double_value.value, AnyValue)) self.assertEqual(double_value.value.double_value, 3.2) seq_value = _encode_key_value("seq_type", ["asd", "123"]) self.assertTrue(isinstance(seq_value, KeyValue)) self.assertEqual(seq_value.key, "seq_type") self.assertTrue(isinstance(seq_value.value, AnyValue)) self.assertTrue(isinstance(seq_value.value.array_value, ArrayValue)) arr_value = seq_value.value.array_value self.assertTrue(isinstance(arr_value.values[0], AnyValue)) self.assertEqual(arr_value.values[0].string_value, "asd") self.assertTrue(isinstance(arr_value.values[1], AnyValue)) self.assertEqual(arr_value.values[1].string_value, "123") def test_dropped_values(self): span = get_span_with_dropped_attributes_events_links() # pylint:disable=protected-access translated = self.exporter._translate_data([span]) self.assertEqual( 1, translated.resource_spans[0] .scope_spans[0] .spans[0] .dropped_links_count, ) self.assertEqual( 2, translated.resource_spans[0] .scope_spans[0] .spans[0] .dropped_attributes_count, ) self.assertEqual( 3, translated.resource_spans[0] .scope_spans[0] .spans[0] .dropped_events_count, ) self.assertEqual( 2, translated.resource_spans[0] .scope_spans[0] .spans[0] .links[0] .dropped_attributes_count, ) self.assertEqual( 2, translated.resource_spans[0] .scope_spans[0] .spans[0] .events[0] .dropped_attributes_count, ) def _create_span_with_status(status: SDKStatus): span = _Span( "a", context=Mock( **{ "trace_state": {"a": "b", "c": "d"}, "span_id": 10217189687419569865, "trace_id": 67545097771067222548457157018666467027, } ), parent=Mock(**{"span_id": 12345}), instrumentation_scope=InstrumentationScope( name="name", version="version" ), ) span.set_status(status) return span python-opentelemetry-1.39.1/exporter/opentelemetry-exporter-otlp-proto-http/000077500000000000000000000000001511654350100275255ustar00rootroot00000000000000python-opentelemetry-1.39.1/exporter/opentelemetry-exporter-otlp-proto-http/LICENSE000066400000000000000000000261351511654350100305410ustar00rootroot00000000000000 Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. Copyright [yyyy] [name of copyright owner] Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. python-opentelemetry-1.39.1/exporter/opentelemetry-exporter-otlp-proto-http/README.rst000066400000000000000000000016041511654350100312150ustar00rootroot00000000000000OpenTelemetry Collector Protobuf over HTTP Exporter =================================================== |pypi| .. |pypi| image:: https://badge.fury.io/py/opentelemetry-exporter-otlp-proto-http.svg :target: https://pypi.org/project/opentelemetry-exporter-otlp-proto-http/ This library allows to export data to the OpenTelemetry Collector using the OpenTelemetry Protocol using Protobuf over HTTP. Installation ------------ :: pip install opentelemetry-exporter-otlp-proto-http References ---------- * `OpenTelemetry Collector Exporter `_ * `OpenTelemetry Collector `_ * `OpenTelemetry `_ * `OpenTelemetry Protocol Specification `_ python-opentelemetry-1.39.1/exporter/opentelemetry-exporter-otlp-proto-http/pyproject.toml000066400000000000000000000041251511654350100324430ustar00rootroot00000000000000[build-system] requires = ["hatchling"] build-backend = "hatchling.build" [project] name = "opentelemetry-exporter-otlp-proto-http" dynamic = ["version"] description = "OpenTelemetry Collector Protobuf over HTTP Exporter" readme = "README.rst" license = "Apache-2.0" requires-python = ">=3.9" authors = [ { name = "OpenTelemetry Authors", email = "cncf-opentelemetry-contributors@lists.cncf.io" }, ] classifiers = [ "Development Status :: 5 - Production/Stable", "Framework :: OpenTelemetry", "Framework :: OpenTelemetry :: Exporters", "Intended Audience :: Developers", "Programming Language :: Python", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.9", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", "Programming Language :: Python :: 3.12", "Programming Language :: Python :: 3.13", ] dependencies = [ "googleapis-common-protos ~= 1.52", "opentelemetry-api ~= 1.15", "opentelemetry-proto == 1.39.1", "opentelemetry-sdk ~= 1.39.1", "opentelemetry-exporter-otlp-proto-common == 1.39.1", "requests ~= 2.7", "typing-extensions >= 4.5.0", ] [project.entry-points.opentelemetry_traces_exporter] otlp_proto_http = "opentelemetry.exporter.otlp.proto.http.trace_exporter:OTLPSpanExporter" [project.entry-points.opentelemetry_metrics_exporter] otlp_proto_http = "opentelemetry.exporter.otlp.proto.http.metric_exporter:OTLPMetricExporter" [project.entry-points.opentelemetry_logs_exporter] otlp_proto_http = "opentelemetry.exporter.otlp.proto.http._log_exporter:OTLPLogExporter" [project.urls] Homepage = "https://github.com/open-telemetry/opentelemetry-python/tree/main/exporter/opentelemetry-exporter-otlp-proto-http" Repository = "https://github.com/open-telemetry/opentelemetry-python" [project.optional-dependencies] gcp-auth = [ "opentelemetry-exporter-credential-provider-gcp >= 0.59b0", ] [tool.hatch.version] path = "src/opentelemetry/exporter/otlp/proto/http/version/__init__.py" [tool.hatch.build.targets.sdist] include = [ "/src", "/tests", ] [tool.hatch.build.targets.wheel] packages = ["src/opentelemetry"] python-opentelemetry-1.39.1/exporter/opentelemetry-exporter-otlp-proto-http/src/000077500000000000000000000000001511654350100303145ustar00rootroot00000000000000python-opentelemetry-1.39.1/exporter/opentelemetry-exporter-otlp-proto-http/src/opentelemetry/000077500000000000000000000000001511654350100332105ustar00rootroot00000000000000exporter/000077500000000000000000000000001511654350100350015ustar00rootroot00000000000000python-opentelemetry-1.39.1/exporter/opentelemetry-exporter-otlp-proto-http/src/opentelemetryotlp/000077500000000000000000000000001511654350100357575ustar00rootroot00000000000000python-opentelemetry-1.39.1/exporter/opentelemetry-exporter-otlp-proto-http/src/opentelemetry/exporterproto/000077500000000000000000000000001511654350100371225ustar00rootroot00000000000000python-opentelemetry-1.39.1/exporter/opentelemetry-exporter-otlp-proto-http/src/opentelemetry/exporter/otlphttp/000077500000000000000000000000001511654350100401015ustar00rootroot00000000000000python-opentelemetry-1.39.1/exporter/opentelemetry-exporter-otlp-proto-http/src/opentelemetry/exporter/otlp/proto__init__.py000066400000000000000000000051721511654350100422170ustar00rootroot00000000000000python-opentelemetry-1.39.1/exporter/opentelemetry-exporter-otlp-proto-http/src/opentelemetry/exporter/otlp/proto/http# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ This library allows to export tracing data to an OTLP collector. Usage ----- The **OTLP Span Exporter** allows to export `OpenTelemetry`_ traces to the `OTLP`_ collector. You can configure the exporter with the following environment variables: - :envvar:`OTEL_EXPORTER_OTLP_TRACES_TIMEOUT` - :envvar:`OTEL_EXPORTER_OTLP_TRACES_PROTOCOL` - :envvar:`OTEL_EXPORTER_OTLP_TRACES_HEADERS` - :envvar:`OTEL_EXPORTER_OTLP_TRACES_ENDPOINT` - :envvar:`OTEL_EXPORTER_OTLP_TRACES_COMPRESSION` - :envvar:`OTEL_EXPORTER_OTLP_TRACES_CERTIFICATE` - :envvar:`OTEL_EXPORTER_OTLP_TIMEOUT` - :envvar:`OTEL_EXPORTER_OTLP_PROTOCOL` - :envvar:`OTEL_EXPORTER_OTLP_HEADERS` - :envvar:`OTEL_EXPORTER_OTLP_ENDPOINT` - :envvar:`OTEL_EXPORTER_OTLP_COMPRESSION` - :envvar:`OTEL_EXPORTER_OTLP_CERTIFICATE` .. _OTLP: https://github.com/open-telemetry/opentelemetry-collector/ .. _OpenTelemetry: https://github.com/open-telemetry/opentelemetry-python/ .. code:: python from opentelemetry import trace from opentelemetry.exporter.otlp.proto.http.trace_exporter import OTLPSpanExporter from opentelemetry.sdk.resources import Resource from opentelemetry.sdk.trace import TracerProvider from opentelemetry.sdk.trace.export import BatchSpanProcessor # Resource can be required for some backends, e.g. Jaeger # If resource wouldn't be set - traces wouldn't appears in Jaeger resource = Resource(attributes={ "service.name": "service" }) trace.set_tracer_provider(TracerProvider(resource=resource)) tracer = trace.get_tracer(__name__) otlp_exporter = OTLPSpanExporter() span_processor = BatchSpanProcessor(otlp_exporter) trace.get_tracer_provider().add_span_processor(span_processor) with tracer.start_as_current_span("foo"): print("Hello world!") API --- """ import enum from .version import __version__ _OTLP_HTTP_HEADERS = { "Content-Type": "application/x-protobuf", "User-Agent": "OTel-OTLP-Exporter-Python/" + __version__, } class Compression(enum.Enum): NoCompression = "none" Deflate = "deflate" Gzip = "gzip" _common/000077500000000000000000000000001511654350100415305ustar00rootroot00000000000000python-opentelemetry-1.39.1/exporter/opentelemetry-exporter-otlp-proto-http/src/opentelemetry/exporter/otlp/proto/http__init__.py000066400000000000000000000047131511654350100436460ustar00rootroot00000000000000python-opentelemetry-1.39.1/exporter/opentelemetry-exporter-otlp-proto-http/src/opentelemetry/exporter/otlp/proto/http/_common# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from os import environ from typing import Literal, Optional import requests from opentelemetry.sdk.environment_variables import ( _OTEL_PYTHON_EXPORTER_OTLP_HTTP_CREDENTIAL_PROVIDER, _OTEL_PYTHON_EXPORTER_OTLP_HTTP_LOGS_CREDENTIAL_PROVIDER, _OTEL_PYTHON_EXPORTER_OTLP_HTTP_METRICS_CREDENTIAL_PROVIDER, _OTEL_PYTHON_EXPORTER_OTLP_HTTP_TRACES_CREDENTIAL_PROVIDER, ) from opentelemetry.util._importlib_metadata import entry_points def _is_retryable(resp: requests.Response) -> bool: if resp.status_code == 408: return True if resp.status_code >= 500 and resp.status_code <= 599: return True return False def _load_session_from_envvar( cred_envvar: Literal[ _OTEL_PYTHON_EXPORTER_OTLP_HTTP_LOGS_CREDENTIAL_PROVIDER, _OTEL_PYTHON_EXPORTER_OTLP_HTTP_TRACES_CREDENTIAL_PROVIDER, _OTEL_PYTHON_EXPORTER_OTLP_HTTP_METRICS_CREDENTIAL_PROVIDER, ], ) -> Optional[requests.Session]: _credential_env = environ.get( _OTEL_PYTHON_EXPORTER_OTLP_HTTP_CREDENTIAL_PROVIDER ) or environ.get(cred_envvar) if _credential_env: try: maybe_session = next( iter( entry_points( group="opentelemetry_otlp_credential_provider", name=_credential_env, ) ) ).load()() except StopIteration: raise RuntimeError( f"Requested component '{_credential_env}' not found in " f"entry point 'opentelemetry_otlp_credential_provider'" ) if isinstance(maybe_session, requests.Session): return maybe_session else: raise RuntimeError( f"Requested component '{_credential_env}' is of type {type(maybe_session)}" f" must be of type `requests.Session`." ) return None _log_exporter/000077500000000000000000000000001511654350100427515ustar00rootroot00000000000000python-opentelemetry-1.39.1/exporter/opentelemetry-exporter-otlp-proto-http/src/opentelemetry/exporter/otlp/proto/http__init__.py000066400000000000000000000216111511654350100450630ustar00rootroot00000000000000python-opentelemetry-1.39.1/exporter/opentelemetry-exporter-otlp-proto-http/src/opentelemetry/exporter/otlp/proto/http/_log_exporter# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gzip import logging import random import threading import zlib from io import BytesIO from os import environ from time import time from typing import Dict, Optional, Sequence import requests from requests.exceptions import ConnectionError from opentelemetry.exporter.otlp.proto.common._log_encoder import encode_logs from opentelemetry.exporter.otlp.proto.http import ( _OTLP_HTTP_HEADERS, Compression, ) from opentelemetry.exporter.otlp.proto.http._common import ( _is_retryable, _load_session_from_envvar, ) from opentelemetry.sdk._logs import ReadableLogRecord from opentelemetry.sdk._logs.export import ( LogRecordExporter, LogRecordExportResult, ) from opentelemetry.sdk._shared_internal import DuplicateFilter from opentelemetry.sdk.environment_variables import ( _OTEL_PYTHON_EXPORTER_OTLP_HTTP_LOGS_CREDENTIAL_PROVIDER, OTEL_EXPORTER_OTLP_CERTIFICATE, OTEL_EXPORTER_OTLP_CLIENT_CERTIFICATE, OTEL_EXPORTER_OTLP_CLIENT_KEY, OTEL_EXPORTER_OTLP_COMPRESSION, OTEL_EXPORTER_OTLP_ENDPOINT, OTEL_EXPORTER_OTLP_HEADERS, OTEL_EXPORTER_OTLP_LOGS_CERTIFICATE, OTEL_EXPORTER_OTLP_LOGS_CLIENT_CERTIFICATE, OTEL_EXPORTER_OTLP_LOGS_CLIENT_KEY, OTEL_EXPORTER_OTLP_LOGS_COMPRESSION, OTEL_EXPORTER_OTLP_LOGS_ENDPOINT, OTEL_EXPORTER_OTLP_LOGS_HEADERS, OTEL_EXPORTER_OTLP_LOGS_TIMEOUT, OTEL_EXPORTER_OTLP_TIMEOUT, ) from opentelemetry.util.re import parse_env_headers _logger = logging.getLogger(__name__) # This prevents logs generated when a log fails to be written to generate another log which fails to be written etc. etc. _logger.addFilter(DuplicateFilter()) DEFAULT_COMPRESSION = Compression.NoCompression DEFAULT_ENDPOINT = "http://localhost:4318/" DEFAULT_LOGS_EXPORT_PATH = "v1/logs" DEFAULT_TIMEOUT = 10 # in seconds _MAX_RETRYS = 6 class OTLPLogExporter(LogRecordExporter): def __init__( self, endpoint: Optional[str] = None, certificate_file: Optional[str] = None, client_key_file: Optional[str] = None, client_certificate_file: Optional[str] = None, headers: Optional[Dict[str, str]] = None, timeout: Optional[float] = None, compression: Optional[Compression] = None, session: Optional[requests.Session] = None, ): self._shutdown_is_occuring = threading.Event() self._endpoint = endpoint or environ.get( OTEL_EXPORTER_OTLP_LOGS_ENDPOINT, _append_logs_path( environ.get(OTEL_EXPORTER_OTLP_ENDPOINT, DEFAULT_ENDPOINT) ), ) # Keeping these as instance variables because they are used in tests self._certificate_file = certificate_file or environ.get( OTEL_EXPORTER_OTLP_LOGS_CERTIFICATE, environ.get(OTEL_EXPORTER_OTLP_CERTIFICATE, True), ) self._client_key_file = client_key_file or environ.get( OTEL_EXPORTER_OTLP_LOGS_CLIENT_KEY, environ.get(OTEL_EXPORTER_OTLP_CLIENT_KEY, None), ) self._client_certificate_file = client_certificate_file or environ.get( OTEL_EXPORTER_OTLP_LOGS_CLIENT_CERTIFICATE, environ.get(OTEL_EXPORTER_OTLP_CLIENT_CERTIFICATE, None), ) self._client_cert = ( (self._client_certificate_file, self._client_key_file) if self._client_certificate_file and self._client_key_file else self._client_certificate_file ) headers_string = environ.get( OTEL_EXPORTER_OTLP_LOGS_HEADERS, environ.get(OTEL_EXPORTER_OTLP_HEADERS, ""), ) self._headers = headers or parse_env_headers( headers_string, liberal=True ) self._timeout = timeout or float( environ.get( OTEL_EXPORTER_OTLP_LOGS_TIMEOUT, environ.get(OTEL_EXPORTER_OTLP_TIMEOUT, DEFAULT_TIMEOUT), ) ) self._compression = compression or _compression_from_env() self._session = ( session or _load_session_from_envvar( _OTEL_PYTHON_EXPORTER_OTLP_HTTP_LOGS_CREDENTIAL_PROVIDER ) or requests.Session() ) self._session.headers.update(self._headers) self._session.headers.update(_OTLP_HTTP_HEADERS) # let users override our defaults self._session.headers.update(self._headers) if self._compression is not Compression.NoCompression: self._session.headers.update( {"Content-Encoding": self._compression.value} ) self._shutdown = False def _export( self, serialized_data: bytes, timeout_sec: Optional[float] = None ): data = serialized_data if self._compression == Compression.Gzip: gzip_data = BytesIO() with gzip.GzipFile(fileobj=gzip_data, mode="w") as gzip_stream: gzip_stream.write(serialized_data) data = gzip_data.getvalue() elif self._compression == Compression.Deflate: data = zlib.compress(serialized_data) if timeout_sec is None: timeout_sec = self._timeout # By default, keep-alive is enabled in Session's request # headers. Backends may choose to close the connection # while a post happens which causes an unhandled # exception. This try/except will retry the post on such exceptions try: resp = self._session.post( url=self._endpoint, data=data, verify=self._certificate_file, timeout=timeout_sec, cert=self._client_cert, ) except ConnectionError: resp = self._session.post( url=self._endpoint, data=data, verify=self._certificate_file, timeout=timeout_sec, cert=self._client_cert, ) return resp def export( self, batch: Sequence[ReadableLogRecord] ) -> LogRecordExportResult: if self._shutdown: _logger.warning("Exporter already shutdown, ignoring batch") return LogRecordExportResult.FAILURE serialized_data = encode_logs(batch).SerializeToString() deadline_sec = time() + self._timeout for retry_num in range(_MAX_RETRYS): resp = self._export(serialized_data, deadline_sec - time()) if resp.ok: return LogRecordExportResult.SUCCESS # multiplying by a random number between .8 and 1.2 introduces a +/20% jitter to each backoff. backoff_seconds = 2**retry_num * random.uniform(0.8, 1.2) if ( not _is_retryable(resp) or retry_num + 1 == _MAX_RETRYS or backoff_seconds > (deadline_sec - time()) or self._shutdown ): _logger.error( "Failed to export logs batch code: %s, reason: %s", resp.status_code, resp.text, ) return LogRecordExportResult.FAILURE _logger.warning( "Transient error %s encountered while exporting logs batch, retrying in %.2fs.", resp.reason, backoff_seconds, ) shutdown = self._shutdown_is_occuring.wait(backoff_seconds) if shutdown: _logger.warning("Shutdown in progress, aborting retry.") break return LogRecordExportResult.FAILURE def force_flush(self, timeout_millis: float = 10_000) -> bool: """Nothing is buffered in this exporter, so this method does nothing.""" return True def shutdown(self): if self._shutdown: _logger.warning("Exporter already shutdown, ignoring call") return self._shutdown = True self._shutdown_is_occuring.set() self._session.close() def _compression_from_env() -> Compression: compression = ( environ.get( OTEL_EXPORTER_OTLP_LOGS_COMPRESSION, environ.get(OTEL_EXPORTER_OTLP_COMPRESSION, "none"), ) .lower() .strip() ) return Compression(compression) def _append_logs_path(endpoint: str) -> str: if endpoint.endswith("/"): return endpoint + DEFAULT_LOGS_EXPORT_PATH return endpoint + f"/{DEFAULT_LOGS_EXPORT_PATH}" metric_exporter/000077500000000000000000000000001511654350100433145ustar00rootroot00000000000000python-opentelemetry-1.39.1/exporter/opentelemetry-exporter-otlp-proto-http/src/opentelemetry/exporter/otlp/proto/http__init__.py000066400000000000000000000251561511654350100454360ustar00rootroot00000000000000python-opentelemetry-1.39.1/exporter/opentelemetry-exporter-otlp-proto-http/src/opentelemetry/exporter/otlp/proto/http/metric_exporter# Copyright The OpenTelemetry Authors # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import annotations import gzip import logging import random import threading import zlib from io import BytesIO from os import environ from time import time from typing import ( # noqa: F401 Any, Callable, Dict, List, Mapping, Optional, Sequence, ) import requests from requests.exceptions import ConnectionError from typing_extensions import deprecated from opentelemetry.exporter.otlp.proto.common._internal import ( _get_resource_data, ) from opentelemetry.exporter.otlp.proto.common._internal.metrics_encoder import ( OTLPMetricExporterMixin, ) from opentelemetry.exporter.otlp.proto.common.metrics_encoder import ( encode_metrics, ) from opentelemetry.exporter.otlp.proto.http import ( _OTLP_HTTP_HEADERS, Compression, ) from opentelemetry.exporter.otlp.proto.http._common import ( _is_retryable, _load_session_from_envvar, ) from opentelemetry.proto.collector.metrics.v1.metrics_service_pb2 import ( # noqa: F401 ExportMetricsServiceRequest, ) from opentelemetry.proto.common.v1.common_pb2 import ( # noqa: F401 AnyValue, ArrayValue, InstrumentationScope, KeyValue, KeyValueList, ) from opentelemetry.proto.metrics.v1 import metrics_pb2 as pb2 # noqa: F401 from opentelemetry.proto.resource.v1.resource_pb2 import Resource # noqa: F401 from opentelemetry.proto.resource.v1.resource_pb2 import ( Resource as PB2Resource, ) from opentelemetry.sdk.environment_variables import ( _OTEL_PYTHON_EXPORTER_OTLP_HTTP_METRICS_CREDENTIAL_PROVIDER, OTEL_EXPORTER_OTLP_CERTIFICATE, OTEL_EXPORTER_OTLP_CLIENT_CERTIFICATE, OTEL_EXPORTER_OTLP_CLIENT_KEY, OTEL_EXPORTER_OTLP_COMPRESSION, OTEL_EXPORTER_OTLP_ENDPOINT, OTEL_EXPORTER_OTLP_HEADERS, OTEL_EXPORTER_OTLP_METRICS_CERTIFICATE, OTEL_EXPORTER_OTLP_METRICS_CLIENT_CERTIFICATE, OTEL_EXPORTER_OTLP_METRICS_CLIENT_KEY, OTEL_EXPORTER_OTLP_METRICS_COMPRESSION, OTEL_EXPORTER_OTLP_METRICS_ENDPOINT, OTEL_EXPORTER_OTLP_METRICS_HEADERS, OTEL_EXPORTER_OTLP_METRICS_TIMEOUT, OTEL_EXPORTER_OTLP_TIMEOUT, ) from opentelemetry.sdk.metrics._internal.aggregation import Aggregation from opentelemetry.sdk.metrics.export import ( # noqa: F401 AggregationTemporality, Gauge, MetricExporter, MetricExportResult, MetricsData, Sum, ) from opentelemetry.sdk.metrics.export import ( # noqa: F401 Histogram as HistogramType, ) from opentelemetry.sdk.resources import Resource as SDKResource from opentelemetry.util.re import parse_env_headers _logger = logging.getLogger(__name__) DEFAULT_COMPRESSION = Compression.NoCompression DEFAULT_ENDPOINT = "http://localhost:4318/" DEFAULT_METRICS_EXPORT_PATH = "v1/metrics" DEFAULT_TIMEOUT = 10 # in seconds _MAX_RETRYS = 6 class OTLPMetricExporter(MetricExporter, OTLPMetricExporterMixin): def __init__( self, endpoint: str | None = None, certificate_file: str | None = None, client_key_file: str | None = None, client_certificate_file: str | None = None, headers: dict[str, str] | None = None, timeout: float | None = None, compression: Compression | None = None, session: requests.Session | None = None, preferred_temporality: dict[type, AggregationTemporality] | None = None, preferred_aggregation: dict[type, Aggregation] | None = None, ): self._shutdown_in_progress = threading.Event() self._endpoint = endpoint or environ.get( OTEL_EXPORTER_OTLP_METRICS_ENDPOINT, _append_metrics_path( environ.get(OTEL_EXPORTER_OTLP_ENDPOINT, DEFAULT_ENDPOINT) ), ) self._certificate_file = certificate_file or environ.get( OTEL_EXPORTER_OTLP_METRICS_CERTIFICATE, environ.get(OTEL_EXPORTER_OTLP_CERTIFICATE, True), ) self._client_key_file = client_key_file or environ.get( OTEL_EXPORTER_OTLP_METRICS_CLIENT_KEY, environ.get(OTEL_EXPORTER_OTLP_CLIENT_KEY, None), ) self._client_certificate_file = client_certificate_file or environ.get( OTEL_EXPORTER_OTLP_METRICS_CLIENT_CERTIFICATE, environ.get(OTEL_EXPORTER_OTLP_CLIENT_CERTIFICATE, None), ) self._client_cert = ( (self._client_certificate_file, self._client_key_file) if self._client_certificate_file and self._client_key_file else self._client_certificate_file ) headers_string = environ.get( OTEL_EXPORTER_OTLP_METRICS_HEADERS, environ.get(OTEL_EXPORTER_OTLP_HEADERS, ""), ) self._headers = headers or parse_env_headers( headers_string, liberal=True ) self._timeout = timeout or float( environ.get( OTEL_EXPORTER_OTLP_METRICS_TIMEOUT, environ.get(OTEL_EXPORTER_OTLP_TIMEOUT, DEFAULT_TIMEOUT), ) ) self._compression = compression or _compression_from_env() self._session = ( session or _load_session_from_envvar( _OTEL_PYTHON_EXPORTER_OTLP_HTTP_METRICS_CREDENTIAL_PROVIDER ) or requests.Session() ) self._session.headers.update(self._headers) self._session.headers.update(_OTLP_HTTP_HEADERS) # let users override our defaults self._session.headers.update(self._headers) if self._compression is not Compression.NoCompression: self._session.headers.update( {"Content-Encoding": self._compression.value} ) self._common_configuration( preferred_temporality, preferred_aggregation ) self._shutdown = False def _export( self, serialized_data: bytes, timeout_sec: Optional[float] = None ): data = serialized_data if self._compression == Compression.Gzip: gzip_data = BytesIO() with gzip.GzipFile(fileobj=gzip_data, mode="w") as gzip_stream: gzip_stream.write(serialized_data) data = gzip_data.getvalue() elif self._compression == Compression.Deflate: data = zlib.compress(serialized_data) if timeout_sec is None: timeout_sec = self._timeout # By default, keep-alive is enabled in Session's request # headers. Backends may choose to close the connection # while a post happens which causes an unhandled # exception. This try/except will retry the post on such exceptions try: resp = self._session.post( url=self._endpoint, data=data, verify=self._certificate_file, timeout=timeout_sec, cert=self._client_cert, ) except ConnectionError: resp = self._session.post( url=self._endpoint, data=data, verify=self._certificate_file, timeout=timeout_sec, cert=self._client_cert, ) return resp def export( self, metrics_data: MetricsData, timeout_millis: Optional[float] = 10000, **kwargs, ) -> MetricExportResult: if self._shutdown: _logger.warning("Exporter already shutdown, ignoring batch") return MetricExportResult.FAILURE serialized_data = encode_metrics(metrics_data).SerializeToString() deadline_sec = time() + self._timeout for retry_num in range(_MAX_RETRYS): resp = self._export(serialized_data, deadline_sec - time()) if resp.ok: return MetricExportResult.SUCCESS # multiplying by a random number between .8 and 1.2 introduces a +/20% jitter to each backoff. backoff_seconds = 2**retry_num * random.uniform(0.8, 1.2) if ( not _is_retryable(resp) or retry_num + 1 == _MAX_RETRYS or backoff_seconds > (deadline_sec - time()) or self._shutdown ): _logger.error( "Failed to export metrics batch code: %s, reason: %s", resp.status_code, resp.text, ) return MetricExportResult.FAILURE _logger.warning( "Transient error %s encountered while exporting metrics batch, retrying in %.2fs.", resp.reason, backoff_seconds, ) shutdown = self._shutdown_in_progress.wait(backoff_seconds) if shutdown: _logger.warning("Shutdown in progress, aborting retry.") break return MetricExportResult.FAILURE def shutdown(self, timeout_millis: float = 30_000, **kwargs) -> None: if self._shutdown: _logger.warning("Exporter already shutdown, ignoring call") return self._shutdown = True self._shutdown_in_progress.set() self._session.close() @property def _exporting(self) -> str: return "metrics" def force_flush(self, timeout_millis: float = 10_000) -> bool: """Nothing is buffered in this exporter, so this method does nothing.""" return True @deprecated( "Use one of the encoders from opentelemetry-exporter-otlp-proto-common instead. Deprecated since version 1.18.0.", ) def get_resource_data( sdk_resource_scope_data: Dict[SDKResource, Any], # ResourceDataT? resource_class: Callable[..., PB2Resource], name: str, ) -> List[PB2Resource]: return _get_resource_data(sdk_resource_scope_data, resource_class, name) def _compression_from_env() -> Compression: compression = ( environ.get( OTEL_EXPORTER_OTLP_METRICS_COMPRESSION, environ.get(OTEL_EXPORTER_OTLP_COMPRESSION, "none"), ) .lower() .strip() ) return Compression(compression) def _append_metrics_path(endpoint: str) -> str: if endpoint.endswith("/"): return endpoint + DEFAULT_METRICS_EXPORT_PATH return endpoint + f"/{DEFAULT_METRICS_EXPORT_PATH}" py.typed000066400000000000000000000000001511654350100415660ustar00rootroot00000000000000python-opentelemetry-1.39.1/exporter/opentelemetry-exporter-otlp-proto-http/src/opentelemetry/exporter/otlp/proto/httptrace_exporter/000077500000000000000000000000001511654350100431275ustar00rootroot00000000000000python-opentelemetry-1.39.1/exporter/opentelemetry-exporter-otlp-proto-http/src/opentelemetry/exporter/otlp/proto/http__init__.py000066400000000000000000000211141511654350100452370ustar00rootroot00000000000000python-opentelemetry-1.39.1/exporter/opentelemetry-exporter-otlp-proto-http/src/opentelemetry/exporter/otlp/proto/http/trace_exporter# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gzip import logging import random import threading import zlib from io import BytesIO from os import environ from time import time from typing import Dict, Optional, Sequence import requests from requests.exceptions import ConnectionError from opentelemetry.exporter.otlp.proto.common.trace_encoder import ( encode_spans, ) from opentelemetry.exporter.otlp.proto.http import ( _OTLP_HTTP_HEADERS, Compression, ) from opentelemetry.exporter.otlp.proto.http._common import ( _is_retryable, _load_session_from_envvar, ) from opentelemetry.sdk.environment_variables import ( _OTEL_PYTHON_EXPORTER_OTLP_HTTP_TRACES_CREDENTIAL_PROVIDER, OTEL_EXPORTER_OTLP_CERTIFICATE, OTEL_EXPORTER_OTLP_CLIENT_CERTIFICATE, OTEL_EXPORTER_OTLP_CLIENT_KEY, OTEL_EXPORTER_OTLP_COMPRESSION, OTEL_EXPORTER_OTLP_ENDPOINT, OTEL_EXPORTER_OTLP_HEADERS, OTEL_EXPORTER_OTLP_TIMEOUT, OTEL_EXPORTER_OTLP_TRACES_CERTIFICATE, OTEL_EXPORTER_OTLP_TRACES_CLIENT_CERTIFICATE, OTEL_EXPORTER_OTLP_TRACES_CLIENT_KEY, OTEL_EXPORTER_OTLP_TRACES_COMPRESSION, OTEL_EXPORTER_OTLP_TRACES_ENDPOINT, OTEL_EXPORTER_OTLP_TRACES_HEADERS, OTEL_EXPORTER_OTLP_TRACES_TIMEOUT, ) from opentelemetry.sdk.trace import ReadableSpan from opentelemetry.sdk.trace.export import SpanExporter, SpanExportResult from opentelemetry.util.re import parse_env_headers _logger = logging.getLogger(__name__) DEFAULT_COMPRESSION = Compression.NoCompression DEFAULT_ENDPOINT = "http://localhost:4318/" DEFAULT_TRACES_EXPORT_PATH = "v1/traces" DEFAULT_TIMEOUT = 10 # in seconds _MAX_RETRYS = 6 class OTLPSpanExporter(SpanExporter): def __init__( self, endpoint: Optional[str] = None, certificate_file: Optional[str] = None, client_key_file: Optional[str] = None, client_certificate_file: Optional[str] = None, headers: Optional[Dict[str, str]] = None, timeout: Optional[float] = None, compression: Optional[Compression] = None, session: Optional[requests.Session] = None, ): self._shutdown_in_progress = threading.Event() self._endpoint = endpoint or environ.get( OTEL_EXPORTER_OTLP_TRACES_ENDPOINT, _append_trace_path( environ.get(OTEL_EXPORTER_OTLP_ENDPOINT, DEFAULT_ENDPOINT) ), ) self._certificate_file = certificate_file or environ.get( OTEL_EXPORTER_OTLP_TRACES_CERTIFICATE, environ.get(OTEL_EXPORTER_OTLP_CERTIFICATE, True), ) self._client_key_file = client_key_file or environ.get( OTEL_EXPORTER_OTLP_TRACES_CLIENT_KEY, environ.get(OTEL_EXPORTER_OTLP_CLIENT_KEY, None), ) self._client_certificate_file = client_certificate_file or environ.get( OTEL_EXPORTER_OTLP_TRACES_CLIENT_CERTIFICATE, environ.get(OTEL_EXPORTER_OTLP_CLIENT_CERTIFICATE, None), ) self._client_cert = ( (self._client_certificate_file, self._client_key_file) if self._client_certificate_file and self._client_key_file else self._client_certificate_file ) headers_string = environ.get( OTEL_EXPORTER_OTLP_TRACES_HEADERS, environ.get(OTEL_EXPORTER_OTLP_HEADERS, ""), ) self._headers = headers or parse_env_headers( headers_string, liberal=True ) self._timeout = timeout or float( environ.get( OTEL_EXPORTER_OTLP_TRACES_TIMEOUT, environ.get(OTEL_EXPORTER_OTLP_TIMEOUT, DEFAULT_TIMEOUT), ) ) self._compression = compression or _compression_from_env() self._session = ( session or _load_session_from_envvar( _OTEL_PYTHON_EXPORTER_OTLP_HTTP_TRACES_CREDENTIAL_PROVIDER ) or requests.Session() ) self._session.headers.update(self._headers) self._session.headers.update(_OTLP_HTTP_HEADERS) # let users override our defaults self._session.headers.update(self._headers) if self._compression is not Compression.NoCompression: self._session.headers.update( {"Content-Encoding": self._compression.value} ) self._shutdown = False def _export( self, serialized_data: bytes, timeout_sec: Optional[float] = None ): data = serialized_data if self._compression == Compression.Gzip: gzip_data = BytesIO() with gzip.GzipFile(fileobj=gzip_data, mode="w") as gzip_stream: gzip_stream.write(serialized_data) data = gzip_data.getvalue() elif self._compression == Compression.Deflate: data = zlib.compress(serialized_data) if timeout_sec is None: timeout_sec = self._timeout # By default, keep-alive is enabled in Session's request # headers. Backends may choose to close the connection # while a post happens which causes an unhandled # exception. This try/except will retry the post on such exceptions try: resp = self._session.post( url=self._endpoint, data=data, verify=self._certificate_file, timeout=timeout_sec, cert=self._client_cert, ) except ConnectionError: resp = self._session.post( url=self._endpoint, data=data, verify=self._certificate_file, timeout=timeout_sec, cert=self._client_cert, ) return resp def export(self, spans: Sequence[ReadableSpan]) -> SpanExportResult: if self._shutdown: _logger.warning("Exporter already shutdown, ignoring batch") return SpanExportResult.FAILURE serialized_data = encode_spans(spans).SerializePartialToString() deadline_sec = time() + self._timeout for retry_num in range(_MAX_RETRYS): resp = self._export(serialized_data, deadline_sec - time()) if resp.ok: return SpanExportResult.SUCCESS # multiplying by a random number between .8 and 1.2 introduces a +/20% jitter to each backoff. backoff_seconds = 2**retry_num * random.uniform(0.8, 1.2) if ( not _is_retryable(resp) or retry_num + 1 == _MAX_RETRYS or backoff_seconds > (deadline_sec - time()) or self._shutdown ): _logger.error( "Failed to export span batch code: %s, reason: %s", resp.status_code, resp.text, ) return SpanExportResult.FAILURE _logger.warning( "Transient error %s encountered while exporting span batch, retrying in %.2fs.", resp.reason, backoff_seconds, ) shutdown = self._shutdown_in_progress.wait(backoff_seconds) if shutdown: _logger.warning("Shutdown in progress, aborting retry.") break return SpanExportResult.FAILURE def shutdown(self): if self._shutdown: _logger.warning("Exporter already shutdown, ignoring call") return self._shutdown = True self._shutdown_in_progress.set() self._session.close() def force_flush(self, timeout_millis: int = 30000) -> bool: """Nothing is buffered in this exporter, so this method does nothing.""" return True def _compression_from_env() -> Compression: compression = ( environ.get( OTEL_EXPORTER_OTLP_TRACES_COMPRESSION, environ.get(OTEL_EXPORTER_OTLP_COMPRESSION, "none"), ) .lower() .strip() ) return Compression(compression) def _append_trace_path(endpoint: str) -> str: if endpoint.endswith("/"): return endpoint + DEFAULT_TRACES_EXPORT_PATH return endpoint + f"/{DEFAULT_TRACES_EXPORT_PATH}" encoder/000077500000000000000000000000001511654350100445465ustar00rootroot00000000000000python-opentelemetry-1.39.1/exporter/opentelemetry-exporter-otlp-proto-http/src/opentelemetry/exporter/otlp/proto/http/trace_exporter__init__.py000066400000000000000000000043741511654350100466670ustar00rootroot00000000000000python-opentelemetry-1.39.1/exporter/opentelemetry-exporter-otlp-proto-http/src/opentelemetry/exporter/otlp/proto/http/trace_exporter/encoder# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging # noqa: F401 from collections import abc # noqa: F401 from typing import Any, List, Optional, Sequence # noqa: F401 from opentelemetry.proto.collector.trace.v1.trace_service_pb2 import ( # noqa: F401 ExportTraceServiceRequest as PB2ExportTraceServiceRequest, ) from opentelemetry.proto.common.v1.common_pb2 import ( # noqa: F401 AnyValue as PB2AnyValue, ) from opentelemetry.proto.common.v1.common_pb2 import ( # noqa: F401 ArrayValue as PB2ArrayValue, ) from opentelemetry.proto.common.v1.common_pb2 import ( # noqa: F401 InstrumentationScope as PB2InstrumentationScope, ) from opentelemetry.proto.common.v1.common_pb2 import ( # noqa: F401 KeyValue as PB2KeyValue, ) from opentelemetry.proto.resource.v1.resource_pb2 import ( # noqa: F401 Resource as PB2Resource, ) from opentelemetry.proto.trace.v1.trace_pb2 import ( # noqa: F401 ResourceSpans as PB2ResourceSpans, ) from opentelemetry.proto.trace.v1.trace_pb2 import ( # noqa: F401 ScopeSpans as PB2ScopeSpans, ) from opentelemetry.proto.trace.v1.trace_pb2 import ( # noqa: F401 Span as PB2SPan, ) from opentelemetry.proto.trace.v1.trace_pb2 import ( # noqa: F401 Status as PB2Status, ) from opentelemetry.sdk.trace import ( Event, # noqa: F401 Resource, # noqa: F401 ) from opentelemetry.sdk.trace import Span as SDKSpan # noqa: F401 from opentelemetry.sdk.util.instrumentation import ( # noqa: F401 InstrumentationScope, ) from opentelemetry.trace import ( Link, # noqa: F401 SpanKind, # noqa: F401 ) from opentelemetry.trace.span import ( # noqa: F401 SpanContext, Status, TraceState, ) from opentelemetry.util.types import Attributes # noqa: F401 version/000077500000000000000000000000001511654350100415665ustar00rootroot00000000000000python-opentelemetry-1.39.1/exporter/opentelemetry-exporter-otlp-proto-http/src/opentelemetry/exporter/otlp/proto/http__init__.py000066400000000000000000000011401511654350100436730ustar00rootroot00000000000000python-opentelemetry-1.39.1/exporter/opentelemetry-exporter-otlp-proto-http/src/opentelemetry/exporter/otlp/proto/http/version# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. __version__ = "1.39.1" python-opentelemetry-1.39.1/exporter/opentelemetry-exporter-otlp-proto-http/test-requirements.txt000066400000000000000000000011241511654350100337640ustar00rootroot00000000000000asgiref==3.7.2 certifi==2024.7.4 charset-normalizer==3.3.2 googleapis-common-protos==1.63.2 idna==3.7 importlib-metadata==6.11.0 iniconfig==2.0.0 packaging==24.0 pluggy==1.5.0 protobuf==5.26.1 py-cpuinfo==9.0.0 pytest==7.4.4 PyYAML==6.0.1 requests==2.32.3 responses==0.24.1 tomli==2.0.1 typing_extensions==4.10.0 urllib3==2.2.2 wrapt==1.16.0 zipp==3.19.2 -e opentelemetry-api -e tests/opentelemetry-test-utils -e exporter/opentelemetry-exporter-otlp-proto-common -e opentelemetry-proto -e opentelemetry-sdk -e opentelemetry-semantic-conventions -e exporter/opentelemetry-exporter-otlp-proto-http python-opentelemetry-1.39.1/exporter/opentelemetry-exporter-otlp-proto-http/tests/000077500000000000000000000000001511654350100306675ustar00rootroot00000000000000python-opentelemetry-1.39.1/exporter/opentelemetry-exporter-otlp-proto-http/tests/__init__.py000066400000000000000000000000001511654350100327660ustar00rootroot00000000000000python-opentelemetry-1.39.1/exporter/opentelemetry-exporter-otlp-proto-http/tests/metrics/000077500000000000000000000000001511654350100323355ustar00rootroot00000000000000__init__.py000066400000000000000000000000001511654350100343550ustar00rootroot00000000000000python-opentelemetry-1.39.1/exporter/opentelemetry-exporter-otlp-proto-http/tests/metricstest_otlp_metrics_exporter.py000066400000000000000000000533021511654350100403260ustar00rootroot00000000000000python-opentelemetry-1.39.1/exporter/opentelemetry-exporter-otlp-proto-http/tests/metrics# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import threading import time from logging import WARNING from os import environ from unittest import TestCase from unittest.mock import ANY, MagicMock, Mock, patch from requests import Session from requests.models import Response from opentelemetry.exporter.otlp.proto.common.metrics_encoder import ( encode_metrics, ) from opentelemetry.exporter.otlp.proto.http import Compression from opentelemetry.exporter.otlp.proto.http.metric_exporter import ( DEFAULT_COMPRESSION, DEFAULT_ENDPOINT, DEFAULT_METRICS_EXPORT_PATH, DEFAULT_TIMEOUT, OTLPMetricExporter, ) from opentelemetry.exporter.otlp.proto.http.version import __version__ from opentelemetry.sdk.environment_variables import ( _OTEL_PYTHON_EXPORTER_OTLP_HTTP_METRICS_CREDENTIAL_PROVIDER, OTEL_EXPORTER_OTLP_CERTIFICATE, OTEL_EXPORTER_OTLP_CLIENT_CERTIFICATE, OTEL_EXPORTER_OTLP_CLIENT_KEY, OTEL_EXPORTER_OTLP_COMPRESSION, OTEL_EXPORTER_OTLP_ENDPOINT, OTEL_EXPORTER_OTLP_HEADERS, OTEL_EXPORTER_OTLP_METRICS_CERTIFICATE, OTEL_EXPORTER_OTLP_METRICS_CLIENT_CERTIFICATE, OTEL_EXPORTER_OTLP_METRICS_CLIENT_KEY, OTEL_EXPORTER_OTLP_METRICS_COMPRESSION, OTEL_EXPORTER_OTLP_METRICS_DEFAULT_HISTOGRAM_AGGREGATION, OTEL_EXPORTER_OTLP_METRICS_ENDPOINT, OTEL_EXPORTER_OTLP_METRICS_HEADERS, OTEL_EXPORTER_OTLP_METRICS_TEMPORALITY_PREFERENCE, OTEL_EXPORTER_OTLP_METRICS_TIMEOUT, OTEL_EXPORTER_OTLP_TIMEOUT, ) from opentelemetry.sdk.metrics import ( Counter, Histogram, ObservableCounter, ObservableGauge, ObservableUpDownCounter, UpDownCounter, ) from opentelemetry.sdk.metrics.export import ( AggregationTemporality, MetricExportResult, MetricsData, ResourceMetrics, ScopeMetrics, ) from opentelemetry.sdk.metrics.view import ( ExplicitBucketHistogramAggregation, ExponentialBucketHistogramAggregation, ) from opentelemetry.sdk.resources import Resource from opentelemetry.sdk.util.instrumentation import ( InstrumentationScope as SDKInstrumentationScope, ) from opentelemetry.test.metrictestutil import _generate_sum from opentelemetry.test.mock_test_classes import IterEntryPoint OS_ENV_ENDPOINT = "os.env.base" OS_ENV_CERTIFICATE = "os/env/base.crt" OS_ENV_CLIENT_CERTIFICATE = "os/env/client-cert.pem" OS_ENV_CLIENT_KEY = "os/env/client-key.pem" OS_ENV_HEADERS = "envHeader1=val1,envHeader2=val2,User-agent=Overridden" OS_ENV_TIMEOUT = "30" # pylint: disable=protected-access class TestOTLPMetricExporter(TestCase): def setUp(self): self.metrics = { "sum_int": MetricsData( resource_metrics=[ ResourceMetrics( resource=Resource( attributes={"a": 1, "b": False}, schema_url="resource_schema_url", ), scope_metrics=[ ScopeMetrics( scope=SDKInstrumentationScope( name="first_name", version="first_version", schema_url="insrumentation_scope_schema_url", ), metrics=[_generate_sum("sum_int", 33)], schema_url="instrumentation_scope_schema_url", ) ], schema_url="resource_schema_url", ) ] ), } def test_constructor_default(self): exporter = OTLPMetricExporter() self.assertEqual( exporter._endpoint, DEFAULT_ENDPOINT + DEFAULT_METRICS_EXPORT_PATH ) self.assertEqual(exporter._certificate_file, True) self.assertEqual(exporter._client_certificate_file, None) self.assertEqual(exporter._client_key_file, None) self.assertEqual(exporter._timeout, DEFAULT_TIMEOUT) self.assertIs(exporter._compression, DEFAULT_COMPRESSION) self.assertEqual(exporter._headers, {}) self.assertIsInstance(exporter._session, Session) self.assertIn("User-Agent", exporter._session.headers) self.assertEqual( exporter._session.headers.get("Content-Type"), "application/x-protobuf", ) self.assertEqual( exporter._session.headers.get("User-Agent"), "OTel-OTLP-Exporter-Python/" + __version__, ) @patch.dict( "os.environ", { OTEL_EXPORTER_OTLP_CERTIFICATE: OS_ENV_CERTIFICATE, OTEL_EXPORTER_OTLP_CLIENT_CERTIFICATE: OS_ENV_CLIENT_CERTIFICATE, OTEL_EXPORTER_OTLP_CLIENT_KEY: OS_ENV_CLIENT_KEY, OTEL_EXPORTER_OTLP_COMPRESSION: Compression.Gzip.value, OTEL_EXPORTER_OTLP_ENDPOINT: OS_ENV_ENDPOINT, OTEL_EXPORTER_OTLP_HEADERS: OS_ENV_HEADERS, OTEL_EXPORTER_OTLP_TIMEOUT: OS_ENV_TIMEOUT, OTEL_EXPORTER_OTLP_METRICS_CERTIFICATE: "metrics/certificate.env", OTEL_EXPORTER_OTLP_METRICS_CLIENT_CERTIFICATE: "metrics/client-cert.pem", OTEL_EXPORTER_OTLP_METRICS_CLIENT_KEY: "metrics/client-key.pem", OTEL_EXPORTER_OTLP_METRICS_COMPRESSION: Compression.Deflate.value, OTEL_EXPORTER_OTLP_METRICS_ENDPOINT: "https://metrics.endpoint.env", OTEL_EXPORTER_OTLP_METRICS_HEADERS: "metricsEnv1=val1,metricsEnv2=val2,metricEnv3===val3==,User-agent=metrics-user-agent", OTEL_EXPORTER_OTLP_METRICS_TIMEOUT: "40", _OTEL_PYTHON_EXPORTER_OTLP_HTTP_METRICS_CREDENTIAL_PROVIDER: "credential_provider", }, ) @patch("opentelemetry.exporter.otlp.proto.http._common.entry_points") def test_exporter_metrics_env_take_priority(self, mock_entry_points): credential = Session() def f(): return credential mock_entry_points.configure_mock( return_value=[IterEntryPoint("custom_credential", f)] ) exporter = OTLPMetricExporter() self.assertEqual(exporter._endpoint, "https://metrics.endpoint.env") self.assertEqual(exporter._certificate_file, "metrics/certificate.env") self.assertEqual( exporter._client_certificate_file, "metrics/client-cert.pem" ) self.assertEqual(exporter._client_key_file, "metrics/client-key.pem") self.assertEqual(exporter._timeout, 40) self.assertIs(exporter._compression, Compression.Deflate) self.assertEqual( exporter._headers, { "metricsenv1": "val1", "metricsenv2": "val2", "metricenv3": "==val3==", "user-agent": "metrics-user-agent", }, ) self.assertIsInstance(exporter._session, Session) self.assertEqual( exporter._session.headers.get("User-Agent"), "metrics-user-agent", ) self.assertEqual( exporter._session.headers.get("Content-Type"), "application/x-protobuf", ) @patch.dict( "os.environ", { OTEL_EXPORTER_OTLP_CERTIFICATE: OS_ENV_CERTIFICATE, OTEL_EXPORTER_OTLP_CLIENT_CERTIFICATE: OS_ENV_CLIENT_CERTIFICATE, OTEL_EXPORTER_OTLP_CLIENT_KEY: OS_ENV_CLIENT_KEY, OTEL_EXPORTER_OTLP_COMPRESSION: Compression.Gzip.value, OTEL_EXPORTER_OTLP_ENDPOINT: OS_ENV_ENDPOINT, OTEL_EXPORTER_OTLP_METRICS_ENDPOINT: "https://metrics.endpoint.env", OTEL_EXPORTER_OTLP_HEADERS: OS_ENV_HEADERS, OTEL_EXPORTER_OTLP_TIMEOUT: OS_ENV_TIMEOUT, }, ) def test_exporter_constructor_take_priority(self): exporter = OTLPMetricExporter( endpoint="example.com/1234", certificate_file="path/to/service.crt", client_key_file="path/to/client-key.pem", client_certificate_file="path/to/client-cert.pem", headers={"testHeader1": "value1", "testHeader2": "value2"}, timeout=20, compression=Compression.NoCompression, session=Session(), ) self.assertEqual(exporter._endpoint, "example.com/1234") self.assertEqual(exporter._certificate_file, "path/to/service.crt") self.assertEqual( exporter._client_certificate_file, "path/to/client-cert.pem" ) self.assertEqual(exporter._client_key_file, "path/to/client-key.pem") self.assertEqual(exporter._timeout, 20) self.assertIs(exporter._compression, Compression.NoCompression) self.assertEqual( exporter._headers, {"testHeader1": "value1", "testHeader2": "value2"}, ) self.assertIsInstance(exporter._session, Session) @patch.dict( "os.environ", { OTEL_EXPORTER_OTLP_CERTIFICATE: OS_ENV_CERTIFICATE, OTEL_EXPORTER_OTLP_CLIENT_CERTIFICATE: OS_ENV_CLIENT_CERTIFICATE, OTEL_EXPORTER_OTLP_CLIENT_KEY: OS_ENV_CLIENT_KEY, OTEL_EXPORTER_OTLP_COMPRESSION: Compression.Gzip.value, OTEL_EXPORTER_OTLP_HEADERS: OS_ENV_HEADERS, OTEL_EXPORTER_OTLP_TIMEOUT: OS_ENV_TIMEOUT, }, ) def test_exporter_env(self): exporter = OTLPMetricExporter() self.assertEqual(exporter._certificate_file, OS_ENV_CERTIFICATE) self.assertEqual( exporter._client_certificate_file, OS_ENV_CLIENT_CERTIFICATE ) self.assertEqual(exporter._client_key_file, OS_ENV_CLIENT_KEY) self.assertEqual(exporter._timeout, int(OS_ENV_TIMEOUT)) self.assertIs(exporter._compression, Compression.Gzip) self.assertEqual( exporter._headers, { "envheader1": "val1", "envheader2": "val2", "user-agent": "Overridden", }, ) @patch.dict( "os.environ", {OTEL_EXPORTER_OTLP_ENDPOINT: OS_ENV_ENDPOINT}, ) def test_exporter_env_endpoint_without_slash(self): exporter = OTLPMetricExporter() self.assertEqual( exporter._endpoint, OS_ENV_ENDPOINT + f"/{DEFAULT_METRICS_EXPORT_PATH}", ) @patch.dict( "os.environ", {OTEL_EXPORTER_OTLP_ENDPOINT: OS_ENV_ENDPOINT + "/"}, ) def test_exporter_env_endpoint_with_slash(self): exporter = OTLPMetricExporter() self.assertEqual( exporter._endpoint, OS_ENV_ENDPOINT + f"/{DEFAULT_METRICS_EXPORT_PATH}", ) @patch.dict( "os.environ", { OTEL_EXPORTER_OTLP_HEADERS: "envHeader1=val1,envHeader2=val2,missingValue" }, ) def test_headers_parse_from_env(self): with self.assertLogs(level="WARNING") as cm: _ = OTLPMetricExporter() self.assertEqual( cm.records[0].message, ( "Header format invalid! Header values in environment " "variables must be URL encoded per the OpenTelemetry " "Protocol Exporter specification or a comma separated " "list of name=value occurrences: missingValue" ), ) @patch.object(Session, "post") def test_success(self, mock_post): resp = Response() resp.status_code = 200 mock_post.return_value = resp exporter = OTLPMetricExporter() self.assertEqual( exporter.export(self.metrics["sum_int"]), MetricExportResult.SUCCESS, ) @patch.object(Session, "post") def test_failure(self, mock_post): resp = Response() resp.status_code = 401 mock_post.return_value = resp exporter = OTLPMetricExporter() self.assertEqual( exporter.export(self.metrics["sum_int"]), MetricExportResult.FAILURE, ) @patch.object(Session, "post") def test_serialization(self, mock_post): resp = Response() resp.status_code = 200 mock_post.return_value = resp exporter = OTLPMetricExporter() self.assertEqual( exporter.export(self.metrics["sum_int"]), MetricExportResult.SUCCESS, ) serialized_data = encode_metrics(self.metrics["sum_int"]) mock_post.assert_called_once_with( url=exporter._endpoint, data=serialized_data.SerializeToString(), verify=exporter._certificate_file, timeout=ANY, # Timeout is a float based on real time, can't put an exact value here. cert=exporter._client_cert, ) def test_aggregation_temporality(self): otlp_metric_exporter = OTLPMetricExporter() for ( temporality ) in otlp_metric_exporter._preferred_temporality.values(): self.assertEqual(temporality, AggregationTemporality.CUMULATIVE) with patch.dict( environ, {OTEL_EXPORTER_OTLP_METRICS_TEMPORALITY_PREFERENCE: "CUMULATIVE"}, ): otlp_metric_exporter = OTLPMetricExporter() for ( temporality ) in otlp_metric_exporter._preferred_temporality.values(): self.assertEqual( temporality, AggregationTemporality.CUMULATIVE ) with patch.dict( environ, {OTEL_EXPORTER_OTLP_METRICS_TEMPORALITY_PREFERENCE: "ABC"} ): with self.assertLogs(level=WARNING): otlp_metric_exporter = OTLPMetricExporter() for ( temporality ) in otlp_metric_exporter._preferred_temporality.values(): self.assertEqual( temporality, AggregationTemporality.CUMULATIVE ) with patch.dict( environ, {OTEL_EXPORTER_OTLP_METRICS_TEMPORALITY_PREFERENCE: "DELTA"}, ): otlp_metric_exporter = OTLPMetricExporter() self.assertEqual( otlp_metric_exporter._preferred_temporality[Counter], AggregationTemporality.DELTA, ) self.assertEqual( otlp_metric_exporter._preferred_temporality[UpDownCounter], AggregationTemporality.CUMULATIVE, ) self.assertEqual( otlp_metric_exporter._preferred_temporality[Histogram], AggregationTemporality.DELTA, ) self.assertEqual( otlp_metric_exporter._preferred_temporality[ObservableCounter], AggregationTemporality.DELTA, ) self.assertEqual( otlp_metric_exporter._preferred_temporality[ ObservableUpDownCounter ], AggregationTemporality.CUMULATIVE, ) self.assertEqual( otlp_metric_exporter._preferred_temporality[ObservableGauge], AggregationTemporality.CUMULATIVE, ) with patch.dict( environ, {OTEL_EXPORTER_OTLP_METRICS_TEMPORALITY_PREFERENCE: "LOWMEMORY"}, ): otlp_metric_exporter = OTLPMetricExporter() self.assertEqual( otlp_metric_exporter._preferred_temporality[Counter], AggregationTemporality.DELTA, ) self.assertEqual( otlp_metric_exporter._preferred_temporality[UpDownCounter], AggregationTemporality.CUMULATIVE, ) self.assertEqual( otlp_metric_exporter._preferred_temporality[Histogram], AggregationTemporality.DELTA, ) self.assertEqual( otlp_metric_exporter._preferred_temporality[ObservableCounter], AggregationTemporality.CUMULATIVE, ) self.assertEqual( otlp_metric_exporter._preferred_temporality[ ObservableUpDownCounter ], AggregationTemporality.CUMULATIVE, ) self.assertEqual( otlp_metric_exporter._preferred_temporality[ObservableGauge], AggregationTemporality.CUMULATIVE, ) def test_exponential_explicit_bucket_histogram(self): self.assertIsInstance( OTLPMetricExporter()._preferred_aggregation[Histogram], ExplicitBucketHistogramAggregation, ) with patch.dict( environ, { OTEL_EXPORTER_OTLP_METRICS_DEFAULT_HISTOGRAM_AGGREGATION: "base2_exponential_bucket_histogram" }, ): self.assertIsInstance( OTLPMetricExporter()._preferred_aggregation[Histogram], ExponentialBucketHistogramAggregation, ) with patch.dict( environ, {OTEL_EXPORTER_OTLP_METRICS_DEFAULT_HISTOGRAM_AGGREGATION: "abc"}, ): with self.assertLogs(level=WARNING) as log: self.assertIsInstance( OTLPMetricExporter()._preferred_aggregation[Histogram], ExplicitBucketHistogramAggregation, ) self.assertIn( ( "Invalid value for OTEL_EXPORTER_OTLP_METRICS_DEFAULT_" "HISTOGRAM_AGGREGATION: abc, using explicit bucket " "histogram aggregation" ), log.output[0], ) with patch.dict( environ, { OTEL_EXPORTER_OTLP_METRICS_DEFAULT_HISTOGRAM_AGGREGATION: "explicit_bucket_histogram" }, ): self.assertIsInstance( OTLPMetricExporter()._preferred_aggregation[Histogram], ExplicitBucketHistogramAggregation, ) @patch.object(OTLPMetricExporter, "_export", return_value=Mock(ok=True)) def test_2xx_status_code(self, mock_otlp_metric_exporter): """ Test that any HTTP 2XX code returns a successful result """ self.assertEqual( OTLPMetricExporter().export(MagicMock()), MetricExportResult.SUCCESS, ) def test_preferred_aggregation_override(self): histogram_aggregation = ExplicitBucketHistogramAggregation( boundaries=[0.05, 0.1, 0.5, 1, 5, 10], ) exporter = OTLPMetricExporter( preferred_aggregation={ Histogram: histogram_aggregation, }, ) self.assertEqual( exporter._preferred_aggregation[Histogram], histogram_aggregation ) @patch.object(Session, "post") def test_retry_timeout(self, mock_post): exporter = OTLPMetricExporter(timeout=1.5) resp = Response() resp.status_code = 503 resp.reason = "UNAVAILABLE" mock_post.return_value = resp with self.assertLogs(level=WARNING) as warning: before = time.time() self.assertEqual( exporter.export(self.metrics["sum_int"]), MetricExportResult.FAILURE, ) after = time.time() # First call at time 0, second at time 1, then an early return before the second backoff sleep b/c it would exceed timeout. self.assertEqual(mock_post.call_count, 2) # There's a +/-20% jitter on each backoff. self.assertTrue(0.75 < after - before < 1.25) self.assertIn( "Transient error UNAVAILABLE encountered while exporting metrics batch, retrying in", warning.records[0].message, ) @patch.object(Session, "post") def test_timeout_set_correctly(self, mock_post): resp = Response() resp.status_code = 200 def export_side_effect(*args, **kwargs): # Timeout should be set to something slightly less than 400 milliseconds depending on how much time has passed. self.assertAlmostEqual(0.4, kwargs["timeout"], 2) return resp mock_post.side_effect = export_side_effect exporter = OTLPMetricExporter(timeout=0.4) exporter.export(self.metrics["sum_int"]) @patch.object(Session, "post") def test_shutdown_interrupts_retry_backoff(self, mock_post): exporter = OTLPMetricExporter(timeout=1.5) resp = Response() resp.status_code = 503 resp.reason = "UNAVAILABLE" mock_post.return_value = resp thread = threading.Thread( target=exporter.export, args=(self.metrics["sum_int"],) ) with self.assertLogs(level=WARNING) as warning: before = time.time() thread.start() # Wait for the first attempt to fail, then enter a 1 second backoff. time.sleep(0.05) # Should cause export to wake up and return. exporter.shutdown() thread.join() after = time.time() self.assertIn( "Transient error UNAVAILABLE encountered while exporting metrics batch, retrying in", warning.records[0].message, ) self.assertIn( "Shutdown in progress, aborting retry.", warning.records[1].message, ) assert after - before < 0.2 test_proto_log_exporter.py000066400000000000000000000463211511654350100361630ustar00rootroot00000000000000python-opentelemetry-1.39.1/exporter/opentelemetry-exporter-otlp-proto-http/tests# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # pylint: disable=protected-access import threading import time import unittest from logging import WARNING from typing import List from unittest.mock import MagicMock, Mock, patch import requests from google.protobuf.json_format import MessageToDict from requests import Session from requests.models import Response from opentelemetry._logs import LogRecord, SeverityNumber from opentelemetry.exporter.otlp.proto.http import Compression from opentelemetry.exporter.otlp.proto.http._log_exporter import ( DEFAULT_COMPRESSION, DEFAULT_ENDPOINT, DEFAULT_LOGS_EXPORT_PATH, DEFAULT_TIMEOUT, OTLPLogExporter, ) from opentelemetry.exporter.otlp.proto.http.version import __version__ from opentelemetry.proto.collector.logs.v1.logs_service_pb2 import ( ExportLogsServiceRequest, ) from opentelemetry.sdk._logs import ReadWriteLogRecord from opentelemetry.sdk._logs.export import LogRecordExportResult from opentelemetry.sdk.environment_variables import ( _OTEL_PYTHON_EXPORTER_OTLP_HTTP_LOGS_CREDENTIAL_PROVIDER, OTEL_EXPORTER_OTLP_CERTIFICATE, OTEL_EXPORTER_OTLP_CLIENT_CERTIFICATE, OTEL_EXPORTER_OTLP_CLIENT_KEY, OTEL_EXPORTER_OTLP_COMPRESSION, OTEL_EXPORTER_OTLP_ENDPOINT, OTEL_EXPORTER_OTLP_HEADERS, OTEL_EXPORTER_OTLP_LOGS_CERTIFICATE, OTEL_EXPORTER_OTLP_LOGS_CLIENT_CERTIFICATE, OTEL_EXPORTER_OTLP_LOGS_CLIENT_KEY, OTEL_EXPORTER_OTLP_LOGS_COMPRESSION, OTEL_EXPORTER_OTLP_LOGS_ENDPOINT, OTEL_EXPORTER_OTLP_LOGS_HEADERS, OTEL_EXPORTER_OTLP_LOGS_TIMEOUT, OTEL_EXPORTER_OTLP_TIMEOUT, ) from opentelemetry.sdk.resources import Resource as SDKResource from opentelemetry.sdk.util.instrumentation import InstrumentationScope from opentelemetry.test.mock_test_classes import IterEntryPoint from opentelemetry.trace import ( NonRecordingSpan, SpanContext, TraceFlags, set_span_in_context, ) ENV_ENDPOINT = "http://localhost.env:8080/" ENV_CERTIFICATE = "/etc/base.crt" ENV_CLIENT_CERTIFICATE = "/etc/client-cert.pem" ENV_CLIENT_KEY = "/etc/client-key.pem" ENV_HEADERS = "envHeader1=val1,envHeader2=val2,User-agent=Overridden" ENV_TIMEOUT = "30" class TestOTLPHTTPLogExporter(unittest.TestCase): def test_constructor_default(self): exporter = OTLPLogExporter() self.assertEqual( exporter._endpoint, DEFAULT_ENDPOINT + DEFAULT_LOGS_EXPORT_PATH ) self.assertEqual(exporter._certificate_file, True) self.assertEqual(exporter._client_certificate_file, None) self.assertEqual(exporter._client_key_file, None) self.assertEqual(exporter._timeout, DEFAULT_TIMEOUT) self.assertIs(exporter._compression, DEFAULT_COMPRESSION) self.assertEqual(exporter._headers, {}) self.assertIsInstance(exporter._session, requests.Session) self.assertIn("User-Agent", exporter._session.headers) self.assertEqual( exporter._session.headers.get("Content-Type"), "application/x-protobuf", ) self.assertEqual( exporter._session.headers.get("User-Agent"), "OTel-OTLP-Exporter-Python/" + __version__, ) @patch.dict( "os.environ", { OTEL_EXPORTER_OTLP_CERTIFICATE: ENV_CERTIFICATE, OTEL_EXPORTER_OTLP_CLIENT_CERTIFICATE: ENV_CLIENT_CERTIFICATE, OTEL_EXPORTER_OTLP_CLIENT_KEY: ENV_CLIENT_KEY, OTEL_EXPORTER_OTLP_COMPRESSION: Compression.Gzip.value, OTEL_EXPORTER_OTLP_ENDPOINT: ENV_ENDPOINT, OTEL_EXPORTER_OTLP_HEADERS: ENV_HEADERS, OTEL_EXPORTER_OTLP_TIMEOUT: ENV_TIMEOUT, OTEL_EXPORTER_OTLP_LOGS_CERTIFICATE: "logs/certificate.env", OTEL_EXPORTER_OTLP_LOGS_CLIENT_CERTIFICATE: "logs/client-cert.pem", OTEL_EXPORTER_OTLP_LOGS_CLIENT_KEY: "logs/client-key.pem", OTEL_EXPORTER_OTLP_LOGS_COMPRESSION: Compression.Deflate.value, OTEL_EXPORTER_OTLP_LOGS_ENDPOINT: "https://logs.endpoint.env", OTEL_EXPORTER_OTLP_LOGS_HEADERS: "logsEnv1=val1,logsEnv2=val2,logsEnv3===val3==,User-agent=LogsUserAgent", OTEL_EXPORTER_OTLP_LOGS_TIMEOUT: "40", _OTEL_PYTHON_EXPORTER_OTLP_HTTP_LOGS_CREDENTIAL_PROVIDER: "credential_provider", }, ) @patch("opentelemetry.exporter.otlp.proto.http._common.entry_points") def test_exporter_logs_env_take_priority(self, mock_entry_points): credential = Session() def f(): return credential mock_entry_points.configure_mock( return_value=[IterEntryPoint("custom_credential", f)] ) exporter = OTLPLogExporter() self.assertEqual(exporter._endpoint, "https://logs.endpoint.env") self.assertEqual(exporter._certificate_file, "logs/certificate.env") self.assertEqual( exporter._client_certificate_file, "logs/client-cert.pem" ) self.assertEqual(exporter._client_key_file, "logs/client-key.pem") self.assertEqual(exporter._timeout, 40) self.assertIs(exporter._compression, Compression.Deflate) self.assertEqual( exporter._headers, { "logsenv1": "val1", "logsenv2": "val2", "logsenv3": "==val3==", "user-agent": "LogsUserAgent", }, ) self.assertIs(exporter._session, credential) self.assertIsInstance(exporter._session, requests.Session) self.assertEqual( exporter._session.headers.get("User-Agent"), "LogsUserAgent", ) self.assertEqual( exporter._session.headers.get("Content-Type"), "application/x-protobuf", ) @patch.dict( "os.environ", { _OTEL_PYTHON_EXPORTER_OTLP_HTTP_LOGS_CREDENTIAL_PROVIDER: "provider_without_entry_point", }, ) @patch("opentelemetry.exporter.otlp.proto.http._common.entry_points") def test_exception_raised_when_entrypoint_returns_wrong_type( self, mock_entry_points ): def f(): return 1 mock_entry_points.configure_mock( return_value=[IterEntryPoint("custom_credential", f)] ) with self.assertRaises(RuntimeError): OTLPLogExporter() @patch.dict( "os.environ", { _OTEL_PYTHON_EXPORTER_OTLP_HTTP_LOGS_CREDENTIAL_PROVIDER: "provider_without_entry_point", }, ) def test_exception_raised_when_entrypoint_does_not_exist(self): with self.assertRaises(RuntimeError): OTLPLogExporter() @patch.dict( "os.environ", { OTEL_EXPORTER_OTLP_CERTIFICATE: ENV_CERTIFICATE, OTEL_EXPORTER_OTLP_CLIENT_CERTIFICATE: ENV_CLIENT_CERTIFICATE, OTEL_EXPORTER_OTLP_CLIENT_KEY: ENV_CLIENT_KEY, OTEL_EXPORTER_OTLP_COMPRESSION: Compression.Gzip.value, OTEL_EXPORTER_OTLP_ENDPOINT: ENV_ENDPOINT, OTEL_EXPORTER_OTLP_HEADERS: ENV_HEADERS, OTEL_EXPORTER_OTLP_TIMEOUT: ENV_TIMEOUT, }, ) def test_exporter_constructor_take_priority(self): sess = MagicMock() exporter = OTLPLogExporter( endpoint="endpoint.local:69/logs", certificate_file="/hello.crt", client_key_file="/client-key.pem", client_certificate_file="/client-cert.pem", headers={"testHeader1": "value1", "testHeader2": "value2"}, timeout=70, compression=Compression.NoCompression, session=sess(), ) self.assertEqual(exporter._endpoint, "endpoint.local:69/logs") self.assertEqual(exporter._certificate_file, "/hello.crt") self.assertEqual(exporter._client_certificate_file, "/client-cert.pem") self.assertEqual(exporter._client_key_file, "/client-key.pem") self.assertEqual(exporter._timeout, 70) self.assertIs(exporter._compression, Compression.NoCompression) self.assertEqual( exporter._headers, {"testHeader1": "value1", "testHeader2": "value2"}, ) self.assertTrue(sess.called) @patch.dict( "os.environ", { OTEL_EXPORTER_OTLP_CERTIFICATE: ENV_CERTIFICATE, OTEL_EXPORTER_OTLP_CLIENT_CERTIFICATE: ENV_CLIENT_CERTIFICATE, OTEL_EXPORTER_OTLP_CLIENT_KEY: ENV_CLIENT_KEY, OTEL_EXPORTER_OTLP_COMPRESSION: Compression.Gzip.value, OTEL_EXPORTER_OTLP_ENDPOINT: ENV_ENDPOINT, OTEL_EXPORTER_OTLP_HEADERS: ENV_HEADERS, OTEL_EXPORTER_OTLP_TIMEOUT: ENV_TIMEOUT, }, ) def test_exporter_env(self): exporter = OTLPLogExporter() self.assertEqual( exporter._endpoint, ENV_ENDPOINT + DEFAULT_LOGS_EXPORT_PATH ) self.assertEqual(exporter._certificate_file, ENV_CERTIFICATE) self.assertEqual( exporter._client_certificate_file, ENV_CLIENT_CERTIFICATE ) self.assertEqual(exporter._client_key_file, ENV_CLIENT_KEY) self.assertEqual(exporter._timeout, int(ENV_TIMEOUT)) self.assertIs(exporter._compression, Compression.Gzip) self.assertEqual( exporter._headers, { "envheader1": "val1", "envheader2": "val2", "user-agent": "Overridden", }, ) self.assertIsInstance(exporter._session, requests.Session) @staticmethod def export_log_and_deserialize(log): with patch("requests.Session.post") as mock_post: exporter = OTLPLogExporter() exporter.export([log]) request_body = mock_post.call_args[1]["data"] request = ExportLogsServiceRequest() request.ParseFromString(request_body) request_dict = MessageToDict(request) log_records = ( request_dict.get("resourceLogs")[0] .get("scopeLogs")[0] .get("logRecords") ) return log_records def test_exported_log_without_trace_id(self): ctx = set_span_in_context( NonRecordingSpan( SpanContext( 0, 1312458408527513292, False, TraceFlags(0x01), ) ) ) log = ReadWriteLogRecord( LogRecord( timestamp=1644650195189786182, context=ctx, severity_text="WARN", severity_number=SeverityNumber.WARN, body="Invalid trace id check", attributes={"a": 1, "b": "c"}, ), resource=SDKResource({"first_resource": "value"}), instrumentation_scope=InstrumentationScope("name", "version"), ) log_records = TestOTLPHTTPLogExporter.export_log_and_deserialize(log) if log_records: log_record = log_records[0] self.assertIn("spanId", log_record) self.assertNotIn( "traceId", log_record, "trace_id should not be present in the log record", ) else: self.fail("No log records found") def test_exported_log_without_span_id(self): ctx = set_span_in_context( NonRecordingSpan( SpanContext( 89564621134313219400156819398935297696, 0, False, TraceFlags(0x01), ) ) ) log = ReadWriteLogRecord( LogRecord( timestamp=1644650195189786360, context=ctx, severity_text="WARN", severity_number=SeverityNumber.WARN, body="Invalid span id check", attributes={"a": 1, "b": "c"}, ), resource=SDKResource({"first_resource": "value"}), instrumentation_scope=InstrumentationScope("name", "version"), ) log_records = TestOTLPHTTPLogExporter.export_log_and_deserialize(log) if log_records: log_record = log_records[0] self.assertIn("traceId", log_record) self.assertNotIn( "spanId", log_record, "spanId should not be present in the log record", ) else: self.fail("No log records found") @staticmethod def _get_sdk_log_data() -> List[ReadWriteLogRecord]: ctx_log1 = set_span_in_context( NonRecordingSpan( SpanContext( 89564621134313219400156819398935297684, 1312458408527513268, False, TraceFlags(0x01), ) ) ) log1 = ReadWriteLogRecord( LogRecord( timestamp=1644650195189786880, context=ctx_log1, severity_text="WARN", severity_number=SeverityNumber.WARN, body="Do not go gentle into that good night. Rage, rage against the dying of the light", attributes={"a": 1, "b": "c"}, ), resource=SDKResource({"first_resource": "value"}), instrumentation_scope=InstrumentationScope( "first_name", "first_version" ), ) ctx_log2 = set_span_in_context( NonRecordingSpan( SpanContext( 0, 0, False, ) ) ) log2 = ReadWriteLogRecord( LogRecord( timestamp=1644650249738562048, context=ctx_log2, severity_text="WARN", severity_number=SeverityNumber.WARN, body="Cooper, this is no time for caution!", attributes={}, ), resource=SDKResource({"second_resource": "CASE"}), instrumentation_scope=InstrumentationScope( "second_name", "second_version" ), ) ctx_log3 = set_span_in_context( NonRecordingSpan( SpanContext( 271615924622795969659406376515024083555, 4242561578944770265, False, TraceFlags(0x01), ) ) ) log3 = ReadWriteLogRecord( LogRecord( timestamp=1644650427658989056, context=ctx_log3, severity_text="DEBUG", severity_number=SeverityNumber.DEBUG, body="To our galaxy", attributes={"a": 1, "b": "c"}, ), resource=SDKResource({"second_resource": "CASE"}), instrumentation_scope=None, ) ctx_log4 = set_span_in_context( NonRecordingSpan( SpanContext( 212592107417388365804938480559624925555, 6077757853989569223, False, TraceFlags(0x01), ) ) ) log4 = ReadWriteLogRecord( LogRecord( timestamp=1644650584292683008, context=ctx_log4, severity_text="INFO", severity_number=SeverityNumber.INFO, body="Love is the one thing that transcends time and space", attributes={"filename": "model.py", "func_name": "run_method"}, ), resource=SDKResource({"first_resource": "value"}), instrumentation_scope=InstrumentationScope( "another_name", "another_version" ), ) return [log1, log2, log3, log4] @patch.object(OTLPLogExporter, "_export", return_value=Mock(ok=True)) def test_2xx_status_code(self, mock_otlp_metric_exporter): """ Test that any HTTP 2XX code returns a successful result """ self.assertEqual( OTLPLogExporter().export(MagicMock()), LogRecordExportResult.SUCCESS, ) @patch.object(Session, "post") def test_retry_timeout(self, mock_post): exporter = OTLPLogExporter(timeout=1.5) resp = Response() resp.status_code = 503 resp.reason = "UNAVAILABLE" mock_post.return_value = resp with self.assertLogs(level=WARNING) as warning: before = time.time() # Set timeout to 1.5 seconds self.assertEqual( exporter.export(self._get_sdk_log_data()), LogRecordExportResult.FAILURE, ) after = time.time() # First call at time 0, second at time 1, then an early return before the second backoff sleep b/c it would exceed timeout. self.assertEqual(mock_post.call_count, 2) # There's a +/-20% jitter on each backoff. self.assertTrue(0.75 < after - before < 1.25) self.assertIn( "Transient error UNAVAILABLE encountered while exporting logs batch, retrying in", warning.records[0].message, ) @patch.object(Session, "post") def test_timeout_set_correctly(self, mock_post): resp = Response() resp.status_code = 200 def export_side_effect(*args, **kwargs): # Timeout should be set to something slightly less than 400 milliseconds depending on how much time has passed. self.assertAlmostEqual(0.4, kwargs["timeout"], 2) return resp mock_post.side_effect = export_side_effect exporter = OTLPLogExporter(timeout=0.4) exporter.export(self._get_sdk_log_data()) @patch.object(Session, "post") def test_shutdown_interrupts_retry_backoff(self, mock_post): exporter = OTLPLogExporter(timeout=1.5) resp = Response() resp.status_code = 503 resp.reason = "UNAVAILABLE" mock_post.return_value = resp thread = threading.Thread( target=exporter.export, args=(self._get_sdk_log_data(),) ) with self.assertLogs(level=WARNING) as warning: before = time.time() thread.start() # Wait for the first attempt to fail, then enter a 1 second backoff. time.sleep(0.05) # Should cause export to wake up and return. exporter.shutdown() thread.join() after = time.time() self.assertIn( "Transient error UNAVAILABLE encountered while exporting logs batch, retrying in", warning.records[0].message, ) self.assertIn( "Shutdown in progress, aborting retry.", warning.records[1].message, ) assert after - before < 0.2 test_proto_span_exporter.py000066400000000000000000000321171511654350100363410ustar00rootroot00000000000000python-opentelemetry-1.39.1/exporter/opentelemetry-exporter-otlp-proto-http/tests# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import threading import time import unittest from logging import WARNING from unittest.mock import MagicMock, Mock, patch import requests from requests import Session from requests.models import Response from opentelemetry.exporter.otlp.proto.http import Compression from opentelemetry.exporter.otlp.proto.http.trace_exporter import ( DEFAULT_COMPRESSION, DEFAULT_ENDPOINT, DEFAULT_TIMEOUT, DEFAULT_TRACES_EXPORT_PATH, OTLPSpanExporter, ) from opentelemetry.exporter.otlp.proto.http.version import __version__ from opentelemetry.sdk.environment_variables import ( _OTEL_PYTHON_EXPORTER_OTLP_HTTP_TRACES_CREDENTIAL_PROVIDER, OTEL_EXPORTER_OTLP_CERTIFICATE, OTEL_EXPORTER_OTLP_CLIENT_CERTIFICATE, OTEL_EXPORTER_OTLP_CLIENT_KEY, OTEL_EXPORTER_OTLP_COMPRESSION, OTEL_EXPORTER_OTLP_ENDPOINT, OTEL_EXPORTER_OTLP_HEADERS, OTEL_EXPORTER_OTLP_TIMEOUT, OTEL_EXPORTER_OTLP_TRACES_CERTIFICATE, OTEL_EXPORTER_OTLP_TRACES_CLIENT_CERTIFICATE, OTEL_EXPORTER_OTLP_TRACES_CLIENT_KEY, OTEL_EXPORTER_OTLP_TRACES_COMPRESSION, OTEL_EXPORTER_OTLP_TRACES_ENDPOINT, OTEL_EXPORTER_OTLP_TRACES_HEADERS, OTEL_EXPORTER_OTLP_TRACES_TIMEOUT, ) from opentelemetry.sdk.trace import _Span from opentelemetry.sdk.trace.export import SpanExportResult from opentelemetry.test.mock_test_classes import IterEntryPoint OS_ENV_ENDPOINT = "os.env.base" OS_ENV_CERTIFICATE = "os/env/base.crt" OS_ENV_CLIENT_CERTIFICATE = "os/env/client-cert.pem" OS_ENV_CLIENT_KEY = "os/env/client-key.pem" OS_ENV_HEADERS = "envHeader1=val1,envHeader2=val2,User-agent=Overridden" OS_ENV_TIMEOUT = "30" BASIC_SPAN = _Span( "abc", context=Mock( **{ "trace_state": {"a": "b", "c": "d"}, "span_id": 10217189687419569865, "trace_id": 67545097771067222548457157018666467027, } ), ) # pylint: disable=protected-access class TestOTLPSpanExporter(unittest.TestCase): def test_constructor_default(self): exporter = OTLPSpanExporter() self.assertEqual( exporter._endpoint, DEFAULT_ENDPOINT + DEFAULT_TRACES_EXPORT_PATH ) self.assertEqual(exporter._certificate_file, True) self.assertEqual(exporter._client_certificate_file, None) self.assertEqual(exporter._client_key_file, None) self.assertEqual(exporter._timeout, DEFAULT_TIMEOUT) self.assertIs(exporter._compression, DEFAULT_COMPRESSION) self.assertEqual(exporter._headers, {}) self.assertIsInstance(exporter._session, requests.Session) self.assertIn("User-Agent", exporter._session.headers) self.assertEqual( exporter._session.headers.get("Content-Type"), "application/x-protobuf", ) self.assertEqual( exporter._session.headers.get("User-Agent"), "OTel-OTLP-Exporter-Python/" + __version__, ) @patch.dict( "os.environ", { OTEL_EXPORTER_OTLP_CERTIFICATE: OS_ENV_CERTIFICATE, OTEL_EXPORTER_OTLP_CLIENT_CERTIFICATE: OS_ENV_CLIENT_CERTIFICATE, OTEL_EXPORTER_OTLP_CLIENT_KEY: OS_ENV_CLIENT_KEY, OTEL_EXPORTER_OTLP_COMPRESSION: Compression.Gzip.value, OTEL_EXPORTER_OTLP_ENDPOINT: OS_ENV_ENDPOINT, OTEL_EXPORTER_OTLP_HEADERS: OS_ENV_HEADERS, OTEL_EXPORTER_OTLP_TIMEOUT: OS_ENV_TIMEOUT, OTEL_EXPORTER_OTLP_TRACES_CERTIFICATE: "traces/certificate.env", OTEL_EXPORTER_OTLP_TRACES_CLIENT_CERTIFICATE: "traces/client-cert.pem", OTEL_EXPORTER_OTLP_TRACES_CLIENT_KEY: "traces/client-key.pem", OTEL_EXPORTER_OTLP_TRACES_COMPRESSION: Compression.Deflate.value, OTEL_EXPORTER_OTLP_TRACES_ENDPOINT: "https://traces.endpoint.env", OTEL_EXPORTER_OTLP_TRACES_HEADERS: "tracesEnv1=val1,tracesEnv2=val2,traceEnv3===val3==,User-agent=TraceUserAgent", OTEL_EXPORTER_OTLP_TRACES_TIMEOUT: "40", _OTEL_PYTHON_EXPORTER_OTLP_HTTP_TRACES_CREDENTIAL_PROVIDER: "credential_provider", }, ) @patch("opentelemetry.exporter.otlp.proto.http._common.entry_points") def test_exporter_traces_env_take_priority(self, mock_entry_point): credential = Session() def f(): return credential mock_entry_point.configure_mock( return_value=[IterEntryPoint("custom_credential", f)] ) exporter = OTLPSpanExporter() self.assertEqual(exporter._endpoint, "https://traces.endpoint.env") self.assertEqual(exporter._certificate_file, "traces/certificate.env") self.assertEqual( exporter._client_certificate_file, "traces/client-cert.pem" ) self.assertEqual(exporter._client_key_file, "traces/client-key.pem") self.assertEqual(exporter._timeout, 40) self.assertIs(exporter._compression, Compression.Deflate) self.assertEqual( exporter._headers, { "tracesenv1": "val1", "tracesenv2": "val2", "traceenv3": "==val3==", "user-agent": "TraceUserAgent", }, ) self.assertIs(exporter._session, credential) self.assertIsInstance(exporter._session, requests.Session) self.assertEqual( exporter._session.headers.get("Content-Type"), "application/x-protobuf", ) self.assertEqual( exporter._session.headers.get("User-Agent"), "TraceUserAgent", ) @patch.dict( "os.environ", { OTEL_EXPORTER_OTLP_CERTIFICATE: OS_ENV_CERTIFICATE, OTEL_EXPORTER_OTLP_CLIENT_CERTIFICATE: OS_ENV_CLIENT_CERTIFICATE, OTEL_EXPORTER_OTLP_CLIENT_KEY: OS_ENV_CLIENT_KEY, OTEL_EXPORTER_OTLP_COMPRESSION: Compression.Gzip.value, OTEL_EXPORTER_OTLP_ENDPOINT: OS_ENV_ENDPOINT, OTEL_EXPORTER_OTLP_TRACES_ENDPOINT: "https://traces.endpoint.env", OTEL_EXPORTER_OTLP_HEADERS: OS_ENV_HEADERS, OTEL_EXPORTER_OTLP_TIMEOUT: OS_ENV_TIMEOUT, }, ) def test_exporter_constructor_take_priority(self): exporter = OTLPSpanExporter( endpoint="example.com/1234", certificate_file="path/to/service.crt", client_key_file="path/to/client-key.pem", client_certificate_file="path/to/client-cert.pem", headers={"testHeader1": "value1", "testHeader2": "value2"}, timeout=20, compression=Compression.NoCompression, session=requests.Session(), ) self.assertEqual(exporter._endpoint, "example.com/1234") self.assertEqual(exporter._certificate_file, "path/to/service.crt") self.assertEqual( exporter._client_certificate_file, "path/to/client-cert.pem" ) self.assertEqual(exporter._client_key_file, "path/to/client-key.pem") self.assertEqual(exporter._timeout, 20) self.assertIs(exporter._compression, Compression.NoCompression) self.assertEqual( exporter._headers, {"testHeader1": "value1", "testHeader2": "value2"}, ) self.assertIsInstance(exporter._session, requests.Session) @patch.dict( "os.environ", { OTEL_EXPORTER_OTLP_CERTIFICATE: OS_ENV_CERTIFICATE, OTEL_EXPORTER_OTLP_CLIENT_CERTIFICATE: OS_ENV_CLIENT_CERTIFICATE, OTEL_EXPORTER_OTLP_CLIENT_KEY: OS_ENV_CLIENT_KEY, OTEL_EXPORTER_OTLP_COMPRESSION: Compression.Gzip.value, OTEL_EXPORTER_OTLP_HEADERS: OS_ENV_HEADERS, OTEL_EXPORTER_OTLP_TIMEOUT: OS_ENV_TIMEOUT, }, ) def test_exporter_env(self): exporter = OTLPSpanExporter() self.assertEqual(exporter._certificate_file, OS_ENV_CERTIFICATE) self.assertEqual( exporter._client_certificate_file, OS_ENV_CLIENT_CERTIFICATE ) self.assertEqual(exporter._client_key_file, OS_ENV_CLIENT_KEY) self.assertEqual(exporter._timeout, int(OS_ENV_TIMEOUT)) self.assertIs(exporter._compression, Compression.Gzip) self.assertEqual( exporter._headers, { "envheader1": "val1", "envheader2": "val2", "user-agent": "Overridden", }, ) @patch.dict( "os.environ", {OTEL_EXPORTER_OTLP_ENDPOINT: OS_ENV_ENDPOINT}, ) def test_exporter_env_endpoint_without_slash(self): exporter = OTLPSpanExporter() self.assertEqual( exporter._endpoint, OS_ENV_ENDPOINT + f"/{DEFAULT_TRACES_EXPORT_PATH}", ) @patch.dict( "os.environ", {OTEL_EXPORTER_OTLP_ENDPOINT: OS_ENV_ENDPOINT + "/"}, ) def test_exporter_env_endpoint_with_slash(self): exporter = OTLPSpanExporter() self.assertEqual( exporter._endpoint, OS_ENV_ENDPOINT + f"/{DEFAULT_TRACES_EXPORT_PATH}", ) @patch.dict( "os.environ", { OTEL_EXPORTER_OTLP_HEADERS: "envHeader1=val1,envHeader2=val2,missingValue" }, ) def test_headers_parse_from_env(self): with self.assertLogs(level="WARNING") as cm: _ = OTLPSpanExporter() self.assertEqual( cm.records[0].message, ( "Header format invalid! Header values in environment " "variables must be URL encoded per the OpenTelemetry " "Protocol Exporter specification or a comma separated " "list of name=value occurrences: missingValue" ), ) @patch.object(OTLPSpanExporter, "_export", return_value=Mock(ok=True)) def test_2xx_status_code(self, mock_otlp_metric_exporter): """ Test that any HTTP 2XX code returns a successful result """ self.assertEqual( OTLPSpanExporter().export(MagicMock()), SpanExportResult.SUCCESS ) @patch.object(Session, "post") def test_retry_timeout(self, mock_post): exporter = OTLPSpanExporter(timeout=1.5) resp = Response() resp.status_code = 503 resp.reason = "UNAVAILABLE" mock_post.return_value = resp with self.assertLogs(level=WARNING) as warning: before = time.time() # Set timeout to 1.5 seconds self.assertEqual( exporter.export([BASIC_SPAN]), SpanExportResult.FAILURE, ) after = time.time() # First call at time 0, second at time 1, then an early return before the second backoff sleep b/c it would exceed timeout. self.assertEqual(mock_post.call_count, 2) # There's a +/-20% jitter on each backoff. self.assertTrue(0.75 < after - before < 1.25) self.assertIn( "Transient error UNAVAILABLE encountered while exporting span batch, retrying in", warning.records[0].message, ) @patch.object(Session, "post") def test_timeout_set_correctly(self, mock_post): resp = Response() resp.status_code = 200 def export_side_effect(*args, **kwargs): # Timeout should be set to something slightly less than 400 milliseconds depending on how much time has passed. self.assertAlmostEqual(0.4, kwargs["timeout"], 2) return resp mock_post.side_effect = export_side_effect exporter = OTLPSpanExporter(timeout=0.4) exporter.export([BASIC_SPAN]) @patch.object(Session, "post") def test_shutdown_interrupts_retry_backoff(self, mock_post): exporter = OTLPSpanExporter(timeout=1.5) resp = Response() resp.status_code = 503 resp.reason = "UNAVAILABLE" mock_post.return_value = resp thread = threading.Thread(target=exporter.export, args=([BASIC_SPAN],)) with self.assertLogs(level=WARNING) as warning: before = time.time() thread.start() # Wait for the first attempt to fail, then enter a 1 second backoff. time.sleep(0.05) # Should cause export to wake up and return. exporter.shutdown() thread.join() after = time.time() self.assertIn( "Transient error UNAVAILABLE encountered while exporting span batch, retrying in", warning.records[0].message, ) self.assertIn( "Shutdown in progress, aborting retry.", warning.records[1].message, ) assert after - before < 0.2 python-opentelemetry-1.39.1/exporter/opentelemetry-exporter-otlp/000077500000000000000000000000001511654350100254075ustar00rootroot00000000000000python-opentelemetry-1.39.1/exporter/opentelemetry-exporter-otlp/LICENSE000066400000000000000000000261351511654350100264230ustar00rootroot00000000000000 Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. Copyright [yyyy] [name of copyright owner] Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. python-opentelemetry-1.39.1/exporter/opentelemetry-exporter-otlp/README.rst000066400000000000000000000022121511654350100270730ustar00rootroot00000000000000OpenTelemetry Collector Exporters ================================= |pypi| .. |pypi| image:: https://badge.fury.io/py/opentelemetry-exporter-otlp.svg :target: https://pypi.org/project/opentelemetry-exporter-otlp/ This library is provided as a convenience to install all supported OpenTelemetry Collector Exporters. Currently it installs: * opentelemetry-exporter-otlp-proto-grpc * opentelemetry-exporter-otlp-proto-http In the future, additional packages will be available: * opentelemetry-exporter-otlp-json-http To avoid unnecessary dependencies, users should install the specific package once they've determined their preferred serialization and protocol method. Installation ------------ :: pip install opentelemetry-exporter-otlp References ---------- * `OpenTelemetry Collector Exporter `_ * `OpenTelemetry Collector `_ * `OpenTelemetry `_ * `OpenTelemetry Protocol Specification `_ python-opentelemetry-1.39.1/exporter/opentelemetry-exporter-otlp/pyproject.toml000066400000000000000000000034361511654350100303310ustar00rootroot00000000000000[build-system] requires = ["hatchling"] build-backend = "hatchling.build" [project] name = "opentelemetry-exporter-otlp" dynamic = ["version"] description = "OpenTelemetry Collector Exporters" readme = "README.rst" license = "Apache-2.0" requires-python = ">=3.9" authors = [ { name = "OpenTelemetry Authors", email = "cncf-opentelemetry-contributors@lists.cncf.io" }, ] classifiers = [ "Development Status :: 5 - Production/Stable", "Framework :: OpenTelemetry", "Framework :: OpenTelemetry :: Exporters", "Intended Audience :: Developers", "Programming Language :: Python", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.9", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", "Programming Language :: Python :: 3.12", "Programming Language :: Python :: 3.13", "Typing :: Typed", ] dependencies = [ "opentelemetry-exporter-otlp-proto-grpc == 1.39.1", "opentelemetry-exporter-otlp-proto-http == 1.39.1", ] [project.entry-points.opentelemetry_logs_exporter] otlp = "opentelemetry.exporter.otlp.proto.grpc._log_exporter:OTLPLogExporter" [project.entry-points.opentelemetry_metrics_exporter] otlp = "opentelemetry.exporter.otlp.proto.grpc.metric_exporter:OTLPMetricExporter" [project.entry-points.opentelemetry_traces_exporter] otlp = "opentelemetry.exporter.otlp.proto.grpc.trace_exporter:OTLPSpanExporter" [project.urls] Homepage = "https://github.com/open-telemetry/opentelemetry-python/tree/main/exporter/opentelemetry-exporter-otlp" Repository = "https://github.com/open-telemetry/opentelemetry-python" [tool.hatch.version] path = "src/opentelemetry/exporter/otlp/version/__init__.py" [tool.hatch.build.targets.sdist] include = [ "/src", "/tests", ] [tool.hatch.build.targets.wheel] packages = ["src/opentelemetry"] python-opentelemetry-1.39.1/exporter/opentelemetry-exporter-otlp/src/000077500000000000000000000000001511654350100261765ustar00rootroot00000000000000python-opentelemetry-1.39.1/exporter/opentelemetry-exporter-otlp/src/opentelemetry/000077500000000000000000000000001511654350100310725ustar00rootroot00000000000000python-opentelemetry-1.39.1/exporter/opentelemetry-exporter-otlp/src/opentelemetry/exporter/000077500000000000000000000000001511654350100327425ustar00rootroot00000000000000python-opentelemetry-1.39.1/exporter/opentelemetry-exporter-otlp/src/opentelemetry/exporter/otlp/000077500000000000000000000000001511654350100337205ustar00rootroot00000000000000py.typed000066400000000000000000000000001511654350100353260ustar00rootroot00000000000000python-opentelemetry-1.39.1/exporter/opentelemetry-exporter-otlp/src/opentelemetry/exporter/otlpversion/000077500000000000000000000000001511654350100353265ustar00rootroot00000000000000python-opentelemetry-1.39.1/exporter/opentelemetry-exporter-otlp/src/opentelemetry/exporter/otlp__init__.py000066400000000000000000000011401511654350100374330ustar00rootroot00000000000000python-opentelemetry-1.39.1/exporter/opentelemetry-exporter-otlp/src/opentelemetry/exporter/otlp/version# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. __version__ = "1.39.1" python-opentelemetry-1.39.1/exporter/opentelemetry-exporter-otlp/test-requirements.txt000066400000000000000000000010071511654350100316460ustar00rootroot00000000000000asgiref==3.7.2 importlib-metadata==6.11.0 iniconfig==2.0.0 packaging==24.0 pluggy==1.5.0 py-cpuinfo==9.0.0 pytest==7.4.4 tomli==2.0.1 typing_extensions==4.10.0 wrapt==1.16.0 zipp==3.19.2 -e opentelemetry-api -e tests/opentelemetry-test-utils -e exporter/opentelemetry-exporter-otlp-proto-common -e exporter/opentelemetry-exporter-otlp-proto-grpc -e exporter/opentelemetry-exporter-otlp-proto-http -e opentelemetry-proto -e opentelemetry-sdk -e opentelemetry-semantic-conventions -e exporter/opentelemetry-exporter-otlp python-opentelemetry-1.39.1/exporter/opentelemetry-exporter-otlp/tests/000077500000000000000000000000001511654350100265515ustar00rootroot00000000000000python-opentelemetry-1.39.1/exporter/opentelemetry-exporter-otlp/tests/__init__.py000066400000000000000000000000001511654350100306500ustar00rootroot00000000000000python-opentelemetry-1.39.1/exporter/opentelemetry-exporter-otlp/tests/test_otlp.py000066400000000000000000000024501511654350100311410ustar00rootroot00000000000000# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from opentelemetry.exporter.otlp.proto.grpc._log_exporter import ( OTLPLogExporter, ) from opentelemetry.exporter.otlp.proto.grpc.metric_exporter import ( OTLPMetricExporter, ) from opentelemetry.exporter.otlp.proto.grpc.trace_exporter import ( OTLPSpanExporter, ) from opentelemetry.exporter.otlp.proto.http.trace_exporter import ( OTLPSpanExporter as HTTPSpanExporter, ) from opentelemetry.test import TestCase class TestOTLPExporters(TestCase): def test_constructors(self): for exporter in [ OTLPSpanExporter, HTTPSpanExporter, OTLPLogExporter, OTLPMetricExporter, ]: with self.assertNotRaises(Exception): exporter() python-opentelemetry-1.39.1/exporter/opentelemetry-exporter-prometheus/000077500000000000000000000000001511654350100266245ustar00rootroot00000000000000python-opentelemetry-1.39.1/exporter/opentelemetry-exporter-prometheus/LICENSE000066400000000000000000000261351511654350100276400ustar00rootroot00000000000000 Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. Copyright [yyyy] [name of copyright owner] Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. python-opentelemetry-1.39.1/exporter/opentelemetry-exporter-prometheus/README.rst000066400000000000000000000015531511654350100303170ustar00rootroot00000000000000OpenTelemetry Prometheus Exporter ================================= |pypi| .. |pypi| image:: https://badge.fury.io/py/opentelemetry-exporter-prometheus.svg :target: https://pypi.org/project/opentelemetry-exporter-prometheus/ This library allows to export metrics data to `Prometheus `_. Installation ------------ :: pip install opentelemetry-exporter-prometheus Limitations ----------- * No multiprocessing support: The Prometheus exporter is not designed to operate in multiprocessing environments (see `#3747 `_). References ---------- * `OpenTelemetry Prometheus Exporter `_ * `Prometheus `_ * `OpenTelemetry Project `_ python-opentelemetry-1.39.1/exporter/opentelemetry-exporter-prometheus/pyproject.toml000066400000000000000000000031231511654350100315370ustar00rootroot00000000000000[build-system] requires = ["hatchling"] build-backend = "hatchling.build" [project] name = "opentelemetry-exporter-prometheus" dynamic = ["version"] description = "Prometheus Metric Exporter for OpenTelemetry" readme = "README.rst" license = "Apache-2.0" requires-python = ">=3.9" authors = [ { name = "OpenTelemetry Authors", email = "cncf-opentelemetry-contributors@lists.cncf.io" }, ] classifiers = [ "Development Status :: 4 - Beta", "Framework :: OpenTelemetry", "Framework :: OpenTelemetry :: Exporters", "Intended Audience :: Developers", "Programming Language :: Python", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.9", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", "Programming Language :: Python :: 3.12", "Programming Language :: Python :: 3.13", ] dependencies = [ "opentelemetry-api ~= 1.12", # DONOTMERGE: confirm that this will becomes ~= 1.21 in the next release "opentelemetry-sdk ~= 1.39.1", "prometheus_client >= 0.5.0, < 1.0.0", ] [project.entry-points.opentelemetry_metrics_exporter] prometheus = "opentelemetry.exporter.prometheus:_AutoPrometheusMetricReader" [project.urls] Homepage = "https://github.com/open-telemetry/opentelemetry-python/tree/main/exporter/opentelemetry-exporter-prometheus" Repository = "https://github.com/open-telemetry/opentelemetry-python" [tool.hatch.version] path = "src/opentelemetry/exporter/prometheus/version/__init__.py" [tool.hatch.build.targets.sdist] include = [ "/src", "/tests", ] [tool.hatch.build.targets.wheel] packages = ["src/opentelemetry"] python-opentelemetry-1.39.1/exporter/opentelemetry-exporter-prometheus/src/000077500000000000000000000000001511654350100274135ustar00rootroot00000000000000python-opentelemetry-1.39.1/exporter/opentelemetry-exporter-prometheus/src/opentelemetry/000077500000000000000000000000001511654350100323075ustar00rootroot00000000000000python-opentelemetry-1.39.1/exporter/opentelemetry-exporter-prometheus/src/opentelemetry/exporter/000077500000000000000000000000001511654350100341575ustar00rootroot00000000000000prometheus/000077500000000000000000000000001511654350100362735ustar00rootroot00000000000000python-opentelemetry-1.39.1/exporter/opentelemetry-exporter-prometheus/src/opentelemetry/exporter__init__.py000066400000000000000000000347031511654350100404130ustar00rootroot00000000000000python-opentelemetry-1.39.1/exporter/opentelemetry-exporter-prometheus/src/opentelemetry/exporter/prometheus# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ This library allows export of metrics data to `Prometheus `_. Usage ----- The **OpenTelemetry Prometheus Exporter** allows export of `OpenTelemetry`_ metrics to `Prometheus`_. .. _Prometheus: https://prometheus.io/ .. _OpenTelemetry: https://github.com/open-telemetry/opentelemetry-python/ .. code:: python from prometheus_client import start_http_server from opentelemetry.exporter.prometheus import PrometheusMetricReader from opentelemetry.metrics import get_meter_provider, set_meter_provider from opentelemetry.sdk.metrics import MeterProvider # Start Prometheus client start_http_server(port=8000, addr="localhost") # Exporter to export metrics to Prometheus prefix = "MyAppPrefix" reader = PrometheusMetricReader(prefix) # Meter is responsible for creating and recording metrics set_meter_provider(MeterProvider(metric_readers=[reader])) meter = get_meter_provider().get_meter("myapp", "0.1.2") counter = meter.create_counter( "requests", "requests", "number of requests", ) # Labels are used to identify key-values that are associated with a specific # metric that you want to record. These are useful for pre-aggregation and can # be used to store custom dimensions pertaining to a metric labels = {"environment": "staging"} counter.add(25, labels) input("Press any key to exit...") API --- """ from collections import deque from itertools import chain from json import dumps from logging import getLogger from os import environ from typing import Deque, Dict, Iterable, Sequence, Tuple, Union from prometheus_client import start_http_server from prometheus_client.core import ( REGISTRY, CounterMetricFamily, GaugeMetricFamily, HistogramMetricFamily, InfoMetricFamily, ) from prometheus_client.core import Metric as PrometheusMetric from opentelemetry.exporter.prometheus._mapping import ( map_unit, sanitize_attribute, sanitize_full_name, ) from opentelemetry.sdk.environment_variables import ( OTEL_EXPORTER_PROMETHEUS_HOST, OTEL_EXPORTER_PROMETHEUS_PORT, ) from opentelemetry.sdk.metrics import ( Counter, ObservableCounter, ObservableGauge, ObservableUpDownCounter, UpDownCounter, ) from opentelemetry.sdk.metrics import Histogram as HistogramInstrument from opentelemetry.sdk.metrics.export import ( AggregationTemporality, Gauge, Histogram, HistogramDataPoint, MetricReader, MetricsData, Sum, ) from opentelemetry.util.types import Attributes _logger = getLogger(__name__) _TARGET_INFO_NAME = "target" _TARGET_INFO_DESCRIPTION = "Target metadata" def _convert_buckets( bucket_counts: Sequence[int], explicit_bounds: Sequence[float] ) -> Sequence[Tuple[str, int]]: buckets = [] total_count = 0 for upper_bound, count in zip( chain(explicit_bounds, ["+Inf"]), bucket_counts, ): total_count += count buckets.append((f"{upper_bound}", total_count)) return buckets class PrometheusMetricReader(MetricReader): """Prometheus metric exporter for OpenTelemetry.""" def __init__(self, disable_target_info: bool = False) -> None: super().__init__( preferred_temporality={ Counter: AggregationTemporality.CUMULATIVE, UpDownCounter: AggregationTemporality.CUMULATIVE, HistogramInstrument: AggregationTemporality.CUMULATIVE, ObservableCounter: AggregationTemporality.CUMULATIVE, ObservableUpDownCounter: AggregationTemporality.CUMULATIVE, ObservableGauge: AggregationTemporality.CUMULATIVE, } ) self._collector = _CustomCollector(disable_target_info) REGISTRY.register(self._collector) self._collector._callback = self.collect def _receive_metrics( self, metrics_data: MetricsData, timeout_millis: float = 10_000, **kwargs, ) -> None: if metrics_data is None: return self._collector.add_metrics_data(metrics_data) def shutdown(self, timeout_millis: float = 30_000, **kwargs) -> None: REGISTRY.unregister(self._collector) class _CustomCollector: """_CustomCollector represents the Prometheus Collector object See more: https://github.com/prometheus/client_python#custom-collectors """ def __init__(self, disable_target_info: bool = False): self._callback = None self._metrics_datas: Deque[MetricsData] = deque() self._disable_target_info = disable_target_info self._target_info = None def add_metrics_data(self, metrics_data: MetricsData) -> None: """Add metrics to Prometheus data""" self._metrics_datas.append(metrics_data) def collect(self) -> Iterable[PrometheusMetric]: """Collect fetches the metrics from OpenTelemetry and delivers them as Prometheus Metrics. Collect is invoked every time a ``prometheus.Gatherer`` is run for example when the HTTP endpoint is invoked by Prometheus. """ if self._callback is not None: self._callback() metric_family_id_metric_family = {} if len(self._metrics_datas): if not self._disable_target_info: if self._target_info is None: attributes: Attributes = {} for res in self._metrics_datas[0].resource_metrics: attributes = {**attributes, **res.resource.attributes} self._target_info = self._create_info_metric( _TARGET_INFO_NAME, _TARGET_INFO_DESCRIPTION, attributes ) metric_family_id_metric_family[_TARGET_INFO_NAME] = ( self._target_info ) while self._metrics_datas: self._translate_to_prometheus( self._metrics_datas.popleft(), metric_family_id_metric_family ) if metric_family_id_metric_family: yield from metric_family_id_metric_family.values() # pylint: disable=too-many-locals,too-many-branches def _translate_to_prometheus( self, metrics_data: MetricsData, metric_family_id_metric_family: Dict[str, PrometheusMetric], ): metrics = [] for resource_metrics in metrics_data.resource_metrics: for scope_metrics in resource_metrics.scope_metrics: for metric in scope_metrics.metrics: metrics.append(metric) for metric in metrics: label_values_data_points = [] label_keys_data_points = [] values = [] per_metric_family_ids = [] metric_name = sanitize_full_name(metric.name) metric_description = metric.description or "" metric_unit = map_unit(metric.unit) for number_data_point in metric.data.data_points: label_keys = [] label_values = [] for key, value in sorted(number_data_point.attributes.items()): label_keys.append(sanitize_attribute(key)) label_values.append(self._check_value(value)) per_metric_family_ids.append( "|".join( [ metric_name, metric_description, "%".join(label_keys), metric_unit, ] ) ) label_values_data_points.append(label_values) label_keys_data_points.append(label_keys) if isinstance(number_data_point, HistogramDataPoint): values.append( { "bucket_counts": number_data_point.bucket_counts, "explicit_bounds": ( number_data_point.explicit_bounds ), "sum": number_data_point.sum, } ) else: values.append(number_data_point.value) for per_metric_family_id, label_keys, label_values, value in zip( per_metric_family_ids, label_keys_data_points, label_values_data_points, values, ): is_non_monotonic_sum = ( isinstance(metric.data, Sum) and metric.data.is_monotonic is False ) is_cumulative = ( isinstance(metric.data, Sum) and metric.data.aggregation_temporality == AggregationTemporality.CUMULATIVE ) # The prometheus compatibility spec for sums says: If the aggregation temporality is cumulative and the sum is non-monotonic, it MUST be converted to a Prometheus Gauge. should_convert_sum_to_gauge = ( is_non_monotonic_sum and is_cumulative ) if ( isinstance(metric.data, Sum) and not should_convert_sum_to_gauge ): metric_family_id = "|".join( [per_metric_family_id, CounterMetricFamily.__name__] ) if metric_family_id not in metric_family_id_metric_family: metric_family_id_metric_family[metric_family_id] = ( CounterMetricFamily( name=metric_name, documentation=metric_description, labels=label_keys, unit=metric_unit, ) ) metric_family_id_metric_family[ metric_family_id ].add_metric(labels=label_values, value=value) elif ( isinstance(metric.data, Gauge) or should_convert_sum_to_gauge ): metric_family_id = "|".join( [per_metric_family_id, GaugeMetricFamily.__name__] ) if ( metric_family_id not in metric_family_id_metric_family.keys() ): metric_family_id_metric_family[metric_family_id] = ( GaugeMetricFamily( name=metric_name, documentation=metric_description, labels=label_keys, unit=metric_unit, ) ) metric_family_id_metric_family[ metric_family_id ].add_metric(labels=label_values, value=value) elif isinstance(metric.data, Histogram): metric_family_id = "|".join( [per_metric_family_id, HistogramMetricFamily.__name__] ) if ( metric_family_id not in metric_family_id_metric_family.keys() ): metric_family_id_metric_family[metric_family_id] = ( HistogramMetricFamily( name=metric_name, documentation=metric_description, labels=label_keys, unit=metric_unit, ) ) metric_family_id_metric_family[ metric_family_id ].add_metric( labels=label_values, buckets=_convert_buckets( value["bucket_counts"], value["explicit_bounds"] ), sum_value=value["sum"], ) else: _logger.warning( "Unsupported metric data. %s", type(metric.data) ) # pylint: disable=no-self-use def _check_value(self, value: Union[int, float, str, Sequence]) -> str: """Check the label value and return is appropriate representation""" if not isinstance(value, str): return dumps(value, default=str) return str(value) def _create_info_metric( self, name: str, description: str, attributes: Dict[str, str] ) -> InfoMetricFamily: """Create an Info Metric Family with list of attributes""" # sanitize the attribute names according to Prometheus rule attributes = { sanitize_attribute(key): self._check_value(value) for key, value in attributes.items() } info = InfoMetricFamily(name, description, labels=attributes) info.add_metric(labels=list(attributes.keys()), value=attributes) return info class _AutoPrometheusMetricReader(PrometheusMetricReader): """Thin wrapper around PrometheusMetricReader used for the opentelemetry_metrics_exporter entry point. This allows users to use the prometheus exporter with opentelemetry-instrument. It handles starting the Prometheus http server on the the correct port and host. """ def __init__(self) -> None: super().__init__() # Default values are specified in # https://github.com/open-telemetry/opentelemetry-specification/blob/v1.24.0/specification/configuration/sdk-environment-variables.md#prometheus-exporter start_http_server( port=int(environ.get(OTEL_EXPORTER_PROMETHEUS_PORT, "9464")), addr=environ.get(OTEL_EXPORTER_PROMETHEUS_HOST, "localhost"), ) _mapping.py000066400000000000000000000117071511654350100404450ustar00rootroot00000000000000python-opentelemetry-1.39.1/exporter/opentelemetry-exporter-prometheus/src/opentelemetry/exporter/prometheus# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from re import UNICODE, compile _SANITIZE_NAME_RE = compile(r"[^a-zA-Z0-9:]+", UNICODE) # Same as name, but doesn't allow ":" _SANITIZE_ATTRIBUTE_KEY_RE = compile(r"[^a-zA-Z0-9]+", UNICODE) # UCUM style annotations which are text enclosed in curly braces https://ucum.org/ucum#para-6. # This regex is more permissive than UCUM allows and matches any character within curly braces. _UNIT_ANNOTATION = compile(r"{.*}") # Remaps common UCUM and SI units to prometheus conventions. Copied from # https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/v0.101.0/pkg/translator/prometheus/normalize_name.go#L19 # See specification: # https://github.com/open-telemetry/opentelemetry-specification/blob/v1.33.0/specification/compatibility/prometheus_and_openmetrics.md#metric-metadata-1 _UNIT_MAPPINGS = { # Time "d": "days", "h": "hours", "min": "minutes", "s": "seconds", "ms": "milliseconds", "us": "microseconds", "ns": "nanoseconds", # Bytes "By": "bytes", "KiBy": "kibibytes", "MiBy": "mebibytes", "GiBy": "gibibytes", "TiBy": "tibibytes", "KBy": "kilobytes", "MBy": "megabytes", "GBy": "gigabytes", "TBy": "terabytes", # SI "m": "meters", "V": "volts", "A": "amperes", "J": "joules", "W": "watts", "g": "grams", # Misc "Cel": "celsius", "Hz": "hertz", # TODO(https://github.com/open-telemetry/opentelemetry-specification/issues/4058): the # specification says to normalize "1" to ratio but that may change. Update this mapping or # remove TODO once a decision is made. "1": "", "%": "percent", } # Similar to _UNIT_MAPPINGS, but for "per" unit denominator. # Example: s => per second (singular) # Copied from https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/80317ce83ed87a2dff0c316bb939afbfaa823d5e/pkg/translator/prometheus/normalize_name.go#L58 _PER_UNIT_MAPPINGS = { "s": "second", "m": "minute", "h": "hour", "d": "day", "w": "week", "mo": "month", "y": "year", } def sanitize_full_name(name: str) -> str: """sanitize the given metric name according to Prometheus rule, including sanitizing leading digits https://github.com/open-telemetry/opentelemetry-specification/blob/v1.33.0/specification/compatibility/prometheus_and_openmetrics.md#metric-metadata-1 """ # Leading number special case if name and name[0].isdigit(): name = "_" + name[1:] return _sanitize_name(name) def _sanitize_name(name: str) -> str: """sanitize the given metric name according to Prometheus rule, but does not handle sanitizing a leading digit.""" return _SANITIZE_NAME_RE.sub("_", name) def sanitize_attribute(key: str) -> str: """sanitize the given metric attribute key according to Prometheus rule. https://github.com/open-telemetry/opentelemetry-specification/blob/v1.33.0/specification/compatibility/prometheus_and_openmetrics.md#metric-attributes """ # Leading number special case if key and key[0].isdigit(): key = "_" + key[1:] return _SANITIZE_ATTRIBUTE_KEY_RE.sub("_", key) def map_unit(unit: str) -> str: """Maps unit to common prometheus metric names if available and sanitizes any invalid characters See: - https://github.com/open-telemetry/opentelemetry-specification/blob/v1.33.0/specification/compatibility/prometheus_and_openmetrics.md#metric-metadata-1 - https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/v0.101.0/pkg/translator/prometheus/normalize_name.go#L108 """ # remove curly brace unit annotations unit = _UNIT_ANNOTATION.sub("", unit) if unit in _UNIT_MAPPINGS: return _UNIT_MAPPINGS[unit] # replace "/" with "per" units like m/s -> meters_per_second ratio_unit_subparts = unit.split("/", maxsplit=1) if len(ratio_unit_subparts) == 2: bottom = _sanitize_name(ratio_unit_subparts[1]) if bottom: top = _sanitize_name(ratio_unit_subparts[0]) top = _UNIT_MAPPINGS.get(top, top) bottom = _PER_UNIT_MAPPINGS.get(bottom, bottom) return f"{top}_per_{bottom}" if top else f"per_{bottom}" return ( # since units end up as a metric name suffix, they must be sanitized _sanitize_name(unit) # strip surrounding "_" chars since it will lead to consecutive underscores in the # metric name .strip("_") ) py.typed000066400000000000000000000000001511654350100377600ustar00rootroot00000000000000python-opentelemetry-1.39.1/exporter/opentelemetry-exporter-prometheus/src/opentelemetry/exporter/prometheusversion/000077500000000000000000000000001511654350100377605ustar00rootroot00000000000000python-opentelemetry-1.39.1/exporter/opentelemetry-exporter-prometheus/src/opentelemetry/exporter/prometheus__init__.py000066400000000000000000000011401511654350100420650ustar00rootroot00000000000000python-opentelemetry-1.39.1/exporter/opentelemetry-exporter-prometheus/src/opentelemetry/exporter/prometheus/version# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. __version__ = "0.60b1" python-opentelemetry-1.39.1/exporter/opentelemetry-exporter-prometheus/test-requirements.txt000066400000000000000000000005651511654350100330730ustar00rootroot00000000000000asgiref==3.7.2 importlib-metadata==6.11.0 iniconfig==2.0.0 packaging==24.0 pluggy==1.5.0 prometheus_client==0.20.0 py-cpuinfo==9.0.0 pytest==7.4.4 tomli==2.0.1 typing_extensions==4.10.0 wrapt==1.16.0 zipp==3.19.2 -e opentelemetry-api -e opentelemetry-sdk -e tests/opentelemetry-test-utils -e opentelemetry-semantic-conventions -e exporter/opentelemetry-exporter-prometheus python-opentelemetry-1.39.1/exporter/opentelemetry-exporter-prometheus/tests/000077500000000000000000000000001511654350100277665ustar00rootroot00000000000000python-opentelemetry-1.39.1/exporter/opentelemetry-exporter-prometheus/tests/__init__.py000066400000000000000000000011101511654350100320700ustar00rootroot00000000000000# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. python-opentelemetry-1.39.1/exporter/opentelemetry-exporter-prometheus/tests/test_entrypoints.py000066400000000000000000000050571511654350100340040ustar00rootroot00000000000000# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # pylint: disable=no-self-use import os from unittest import TestCase from unittest.mock import ANY, Mock, patch from opentelemetry.exporter.prometheus import _AutoPrometheusMetricReader from opentelemetry.sdk._configuration import _import_exporters from opentelemetry.sdk.environment_variables import ( OTEL_EXPORTER_PROMETHEUS_HOST, OTEL_EXPORTER_PROMETHEUS_PORT, ) class TestEntrypoints(TestCase): def test_import_exporters(self) -> None: """ Tests that the entrypoint can be loaded and doesn't have a typo in the name """ ( _trace_exporters, metric_exporters, _logs_exporters, ) = _import_exporters( trace_exporter_names=[], metric_exporter_names=["prometheus"], log_exporter_names=[], ) self.assertIs( metric_exporters["prometheus"], _AutoPrometheusMetricReader, ) @patch("opentelemetry.exporter.prometheus.start_http_server") @patch.dict(os.environ) def test_starts_http_server_defaults( self, mock_start_http_server: Mock ) -> None: _AutoPrometheusMetricReader() mock_start_http_server.assert_called_once_with( port=9464, addr="localhost" ) @patch("opentelemetry.exporter.prometheus.start_http_server") @patch.dict(os.environ, {OTEL_EXPORTER_PROMETHEUS_HOST: "1.2.3.4"}) def test_starts_http_server_host_envvar( self, mock_start_http_server: Mock ) -> None: _AutoPrometheusMetricReader() mock_start_http_server.assert_called_once_with( port=ANY, addr="1.2.3.4" ) @patch("opentelemetry.exporter.prometheus.start_http_server") @patch.dict(os.environ, {OTEL_EXPORTER_PROMETHEUS_PORT: "9999"}) def test_starts_http_server_port_envvar( self, mock_start_http_server: Mock ) -> None: _AutoPrometheusMetricReader() mock_start_http_server.assert_called_once_with(port=9999, addr=ANY) python-opentelemetry-1.39.1/exporter/opentelemetry-exporter-prometheus/tests/test_mapping.py000066400000000000000000000111121511654350100330260ustar00rootroot00000000000000# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from unittest import TestCase from opentelemetry.exporter.prometheus._mapping import ( map_unit, sanitize_attribute, sanitize_full_name, ) class TestMapping(TestCase): def test_sanitize_full_name(self): self.assertEqual( sanitize_full_name("valid_metric_name"), "valid_metric_name" ) self.assertEqual( sanitize_full_name("VALID_METRIC_NAME"), "VALID_METRIC_NAME" ) self.assertEqual( sanitize_full_name("_valid_metric_name"), "_valid_metric_name" ) self.assertEqual( sanitize_full_name("valid:metric_name"), "valid:metric_name" ) self.assertEqual( sanitize_full_name("valid_1_metric_name"), "valid_1_metric_name" ) self.assertEqual( sanitize_full_name("1leading_digit"), "_leading_digit" ) self.assertEqual( sanitize_full_name("consective_____underscores"), "consective_underscores", ) self.assertEqual( sanitize_full_name("1_~#consective_underscores"), "_consective_underscores", ) self.assertEqual( sanitize_full_name("1!2@3#4$5%6^7&8*9(0)_-"), "_2_3_4_5_6_7_8_9_0_", ) self.assertEqual(sanitize_full_name("foo,./?;:[]{}bar"), "foo_:_bar") self.assertEqual(sanitize_full_name("TestString"), "TestString") self.assertEqual(sanitize_full_name("aAbBcC_12_oi"), "aAbBcC_12_oi") def test_sanitize_attribute(self): self.assertEqual( sanitize_attribute("valid_attr_key"), "valid_attr_key" ) self.assertEqual( sanitize_attribute("VALID_attr_key"), "VALID_attr_key" ) self.assertEqual( sanitize_attribute("_valid_attr_key"), "_valid_attr_key" ) self.assertEqual( sanitize_attribute("valid_1_attr_key"), "valid_1_attr_key" ) self.assertEqual( sanitize_attribute("sanitize:colons"), "sanitize_colons" ) self.assertEqual( sanitize_attribute("1leading_digit"), "_leading_digit" ) self.assertEqual( sanitize_attribute("1_~#consective_underscores"), "_consective_underscores", ) self.assertEqual( sanitize_attribute("1!2@3#4$5%6^7&8*9(0)_-"), "_2_3_4_5_6_7_8_9_0_", ) self.assertEqual(sanitize_attribute("foo,./?;:[]{}bar"), "foo_bar") self.assertEqual(sanitize_attribute("TestString"), "TestString") self.assertEqual(sanitize_attribute("aAbBcC_12_oi"), "aAbBcC_12_oi") def test_map_unit(self): # select hardcoded mappings self.assertEqual(map_unit("s"), "seconds") self.assertEqual(map_unit("By"), "bytes") self.assertEqual(map_unit("m"), "meters") # should work with UCUM annotations as well self.assertEqual(map_unit("g{dogfood}"), "grams") # UCUM "default unit" aka unity and equivalent UCUM annotations should be stripped self.assertEqual(map_unit("1"), "") self.assertEqual(map_unit("{}"), "") self.assertEqual(map_unit("{request}"), "") self.assertEqual(map_unit("{{{;@#$}}}"), "") self.assertEqual(map_unit("{unit with space}"), "") # conversion of per units self.assertEqual(map_unit("km/h"), "km_per_hour") self.assertEqual(map_unit("m/s"), "meters_per_second") self.assertEqual(map_unit("{foo}/s"), "per_second") self.assertEqual(map_unit("foo/bar"), "foo_per_bar") self.assertEqual(map_unit("2fer/store"), "2fer_per_store") # should be sanitized to become part of the metric name without surrounding "_" self.assertEqual(map_unit("____"), "") self.assertEqual(map_unit("____"), "") self.assertEqual(map_unit("1:foo#@!"), "1:foo") # should not be interpreted as a per unit since there is no denominator self.assertEqual(map_unit("m/"), "m") self.assertEqual(map_unit("m/{bar}"), "m") test_prometheus_exporter.py000066400000000000000000000634411511654350100354530ustar00rootroot00000000000000python-opentelemetry-1.39.1/exporter/opentelemetry-exporter-prometheus/tests# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from textwrap import dedent from unittest import TestCase from unittest.mock import Mock, patch from prometheus_client import generate_latest from prometheus_client.core import ( CounterMetricFamily, GaugeMetricFamily, InfoMetricFamily, ) from opentelemetry.exporter.prometheus import ( PrometheusMetricReader, _CustomCollector, ) from opentelemetry.sdk.metrics import MeterProvider from opentelemetry.sdk.metrics.export import ( AggregationTemporality, Histogram, HistogramDataPoint, Metric, MetricsData, ResourceMetrics, ScopeMetrics, ) from opentelemetry.sdk.resources import Resource from opentelemetry.test.metrictestutil import ( _generate_gauge, _generate_histogram, _generate_sum, _generate_unsupported_metric, ) class TestPrometheusMetricReader(TestCase): def setUp(self): self._mock_registry_register = Mock() self._registry_register_patch = patch( "prometheus_client.core.REGISTRY.register", side_effect=self._mock_registry_register, ) def verify_text_format( self, metric: Metric, expect_prometheus_text: str ) -> None: metrics_data = MetricsData( resource_metrics=[ ResourceMetrics( resource=Mock(), scope_metrics=[ ScopeMetrics( scope=Mock(), metrics=[metric], schema_url="schema_url", ) ], schema_url="schema_url", ) ] ) collector = _CustomCollector(disable_target_info=True) collector.add_metrics_data(metrics_data) result_bytes = generate_latest(collector) result = result_bytes.decode("utf-8") self.assertEqual(result, expect_prometheus_text) # pylint: disable=protected-access def test_constructor(self): """Test the constructor.""" with self._registry_register_patch: _ = PrometheusMetricReader() self.assertTrue(self._mock_registry_register.called) def test_shutdown(self): with patch( "prometheus_client.core.REGISTRY.unregister" ) as registry_unregister_patch: exporter = PrometheusMetricReader() exporter.shutdown() self.assertTrue(registry_unregister_patch.called) def test_histogram_to_prometheus(self): metric = Metric( name="test@name", description="foo", unit="s", data=Histogram( data_points=[ HistogramDataPoint( attributes={"histo": 1}, start_time_unix_nano=1641946016139533244, time_unix_nano=1641946016139533244, count=6, sum=579.0, bucket_counts=[1, 3, 2], explicit_bounds=[123.0, 456.0], min=1, max=457, ) ], aggregation_temporality=AggregationTemporality.DELTA, ), ) self.verify_text_format( metric, dedent( """\ # HELP test_name_seconds foo # TYPE test_name_seconds histogram test_name_seconds_bucket{histo="1",le="123.0"} 1.0 test_name_seconds_bucket{histo="1",le="456.0"} 4.0 test_name_seconds_bucket{histo="1",le="+Inf"} 6.0 test_name_seconds_count{histo="1"} 6.0 test_name_seconds_sum{histo="1"} 579.0 """ ), ) def test_monotonic_sum_to_prometheus(self): labels = {"environment@": "staging", "os": "Windows"} metric = _generate_sum( "test@sum_monotonic", 123, attributes=labels, description="testdesc", unit="testunit", ) metrics_data = MetricsData( resource_metrics=[ ResourceMetrics( resource=Mock(), scope_metrics=[ ScopeMetrics( scope=Mock(), metrics=[metric], schema_url="schema_url", ) ], schema_url="schema_url", ) ] ) collector = _CustomCollector(disable_target_info=True) collector.add_metrics_data(metrics_data) for prometheus_metric in collector.collect(): self.assertEqual(type(prometheus_metric), CounterMetricFamily) self.assertEqual( prometheus_metric.name, "test_sum_monotonic_testunit" ) self.assertEqual(prometheus_metric.documentation, "testdesc") self.assertTrue(len(prometheus_metric.samples) == 1) self.assertEqual(prometheus_metric.samples[0].value, 123) self.assertTrue(len(prometheus_metric.samples[0].labels) == 2) self.assertEqual( prometheus_metric.samples[0].labels["environment_"], "staging" ) self.assertEqual( prometheus_metric.samples[0].labels["os"], "Windows" ) def test_non_monotonic_sum_to_prometheus(self): labels = {"environment@": "staging", "os": "Windows"} metric = _generate_sum( "test@sum_nonmonotonic", 123, attributes=labels, description="testdesc", unit="testunit", is_monotonic=False, ) metrics_data = MetricsData( resource_metrics=[ ResourceMetrics( resource=Mock(), scope_metrics=[ ScopeMetrics( scope=Mock(), metrics=[metric], schema_url="schema_url", ) ], schema_url="schema_url", ) ] ) collector = _CustomCollector(disable_target_info=True) collector.add_metrics_data(metrics_data) for prometheus_metric in collector.collect(): self.assertEqual(type(prometheus_metric), GaugeMetricFamily) self.assertEqual( prometheus_metric.name, "test_sum_nonmonotonic_testunit" ) self.assertEqual(prometheus_metric.documentation, "testdesc") self.assertTrue(len(prometheus_metric.samples) == 1) self.assertEqual(prometheus_metric.samples[0].value, 123) self.assertTrue(len(prometheus_metric.samples[0].labels) == 2) self.assertEqual( prometheus_metric.samples[0].labels["environment_"], "staging" ) self.assertEqual( prometheus_metric.samples[0].labels["os"], "Windows" ) def test_gauge_to_prometheus(self): labels = {"environment@": "dev", "os": "Unix"} metric = _generate_gauge( "test@gauge", 123, attributes=labels, description="testdesc", unit="testunit", ) metrics_data = MetricsData( resource_metrics=[ ResourceMetrics( resource=Mock(), scope_metrics=[ ScopeMetrics( scope=Mock(), metrics=[metric], schema_url="schema_url", ) ], schema_url="schema_url", ) ] ) collector = _CustomCollector(disable_target_info=True) collector.add_metrics_data(metrics_data) for prometheus_metric in collector.collect(): self.assertEqual(type(prometheus_metric), GaugeMetricFamily) self.assertEqual(prometheus_metric.name, "test_gauge_testunit") self.assertEqual(prometheus_metric.documentation, "testdesc") self.assertTrue(len(prometheus_metric.samples) == 1) self.assertEqual(prometheus_metric.samples[0].value, 123) self.assertTrue(len(prometheus_metric.samples[0].labels) == 2) self.assertEqual( prometheus_metric.samples[0].labels["environment_"], "dev" ) self.assertEqual(prometheus_metric.samples[0].labels["os"], "Unix") def test_invalid_metric(self): labels = {"environment": "staging"} record = _generate_unsupported_metric( "tesname", attributes=labels, description="testdesc", unit="testunit", ) collector = _CustomCollector() collector.add_metrics_data([record]) collector.collect() self.assertLogs("opentelemetry.exporter.prometheus", level="WARNING") def test_list_labels(self): labels = {"environment@": ["1", "2", "3"], "os": "Unix"} metric = _generate_gauge( "test@gauge", 123, attributes=labels, description="testdesc", unit="testunit", ) metrics_data = MetricsData( resource_metrics=[ ResourceMetrics( resource=Mock(), scope_metrics=[ ScopeMetrics( scope=Mock(), metrics=[metric], schema_url="schema_url", ) ], schema_url="schema_url", ) ] ) collector = _CustomCollector(disable_target_info=True) collector.add_metrics_data(metrics_data) for prometheus_metric in collector.collect(): self.assertEqual(type(prometheus_metric), GaugeMetricFamily) self.assertEqual(prometheus_metric.name, "test_gauge_testunit") self.assertEqual(prometheus_metric.documentation, "testdesc") self.assertTrue(len(prometheus_metric.samples) == 1) self.assertEqual(prometheus_metric.samples[0].value, 123) self.assertTrue(len(prometheus_metric.samples[0].labels) == 2) self.assertEqual( prometheus_metric.samples[0].labels["environment_"], '["1", "2", "3"]', ) self.assertEqual(prometheus_metric.samples[0].labels["os"], "Unix") def test_check_value(self): collector = _CustomCollector() self.assertEqual(collector._check_value(1), "1") self.assertEqual(collector._check_value(1.0), "1.0") self.assertEqual(collector._check_value("a"), "a") self.assertEqual(collector._check_value([1, 2]), "[1, 2]") self.assertEqual(collector._check_value((1, 2)), "[1, 2]") self.assertEqual(collector._check_value(["a", 2]), '["a", 2]') self.assertEqual(collector._check_value(True), "true") self.assertEqual(collector._check_value(False), "false") self.assertEqual(collector._check_value(None), "null") def test_multiple_collection_calls(self): metric_reader = PrometheusMetricReader() provider = MeterProvider(metric_readers=[metric_reader]) meter = provider.get_meter("getting-started", "0.1.2") counter = meter.create_counter("counter") counter.add(1) result_0 = list(metric_reader._collector.collect()) result_1 = list(metric_reader._collector.collect()) result_2 = list(metric_reader._collector.collect()) self.assertEqual(result_0, result_1) self.assertEqual(result_1, result_2) def test_target_info_enabled_by_default(self): metric_reader = PrometheusMetricReader() provider = MeterProvider( metric_readers=[metric_reader], resource=Resource({"os": "Unix", "version": "1.2.3"}), ) meter = provider.get_meter("getting-started", "0.1.2") counter = meter.create_counter("counter") counter.add(1) result = list(metric_reader._collector.collect()) self.assertEqual(len(result), 2) prometheus_metric = result[0] self.assertEqual(type(prometheus_metric), InfoMetricFamily) self.assertEqual(prometheus_metric.name, "target") self.assertEqual(prometheus_metric.documentation, "Target metadata") self.assertTrue(len(prometheus_metric.samples) == 1) self.assertEqual(prometheus_metric.samples[0].value, 1) self.assertTrue(len(prometheus_metric.samples[0].labels) == 2) self.assertEqual(prometheus_metric.samples[0].labels["os"], "Unix") self.assertEqual( prometheus_metric.samples[0].labels["version"], "1.2.3" ) def test_target_info_disabled(self): metric_reader = PrometheusMetricReader(disable_target_info=True) provider = MeterProvider( metric_readers=[metric_reader], resource=Resource({"os": "Unix", "version": "1.2.3"}), ) meter = provider.get_meter("getting-started", "0.1.2") counter = meter.create_counter("counter") counter.add(1) result = list(metric_reader._collector.collect()) for prometheus_metric in result: self.assertNotEqual(type(prometheus_metric), InfoMetricFamily) self.assertNotEqual(prometheus_metric.name, "target") self.assertNotEqual( prometheus_metric.documentation, "Target metadata" ) self.assertNotIn("os", prometheus_metric.samples[0].labels) self.assertNotIn("version", prometheus_metric.samples[0].labels) def test_target_info_sanitize(self): metric_reader = PrometheusMetricReader() provider = MeterProvider( metric_readers=[metric_reader], resource=Resource( { "system.os": "Unix", "system.name": "Prometheus Target Sanitize", "histo": 1, "ratio": 0.1, } ), ) meter = provider.get_meter("getting-started", "0.1.2") counter = meter.create_counter("counter") counter.add(1) prometheus_metric = list(metric_reader._collector.collect())[0] self.assertEqual(type(prometheus_metric), InfoMetricFamily) self.assertEqual(prometheus_metric.name, "target") self.assertEqual(prometheus_metric.documentation, "Target metadata") self.assertTrue(len(prometheus_metric.samples) == 1) self.assertEqual(prometheus_metric.samples[0].value, 1) self.assertTrue(len(prometheus_metric.samples[0].labels) == 4) self.assertTrue("system_os" in prometheus_metric.samples[0].labels) self.assertEqual( prometheus_metric.samples[0].labels["system_os"], "Unix" ) self.assertTrue("system_name" in prometheus_metric.samples[0].labels) self.assertEqual( prometheus_metric.samples[0].labels["system_name"], "Prometheus Target Sanitize", ) self.assertTrue("histo" in prometheus_metric.samples[0].labels) self.assertEqual( prometheus_metric.samples[0].labels["histo"], "1", ) self.assertTrue("ratio" in prometheus_metric.samples[0].labels) self.assertEqual( prometheus_metric.samples[0].labels["ratio"], "0.1", ) def test_label_order_does_not_matter(self): metric_reader = PrometheusMetricReader() provider = MeterProvider(metric_readers=[metric_reader]) meter = provider.get_meter("getting-started", "0.1.2") counter = meter.create_counter("counter") counter.add(1, {"cause": "cause1", "reason": "reason1"}) counter.add(1, {"reason": "reason2", "cause": "cause2"}) prometheus_output = generate_latest().decode() # All labels are mapped correctly self.assertIn('cause="cause1"', prometheus_output) self.assertIn('cause="cause2"', prometheus_output) self.assertIn('reason="reason1"', prometheus_output) self.assertIn('reason="reason2"', prometheus_output) # Only one metric is generated metric_count = prometheus_output.count("# HELP counter_total") self.assertEqual(metric_count, 1) def test_metric_name(self): self.verify_text_format( _generate_sum(name="test_counter", value=1, unit=""), dedent( """\ # HELP test_counter_total foo # TYPE test_counter_total counter test_counter_total{a="1",b="true"} 1.0 """ ), ) self.verify_text_format( _generate_sum(name="1leading_digit", value=1, unit=""), dedent( """\ # HELP _leading_digit_total foo # TYPE _leading_digit_total counter _leading_digit_total{a="1",b="true"} 1.0 """ ), ) self.verify_text_format( _generate_sum(name="!@#counter_invalid_chars", value=1, unit=""), dedent( """\ # HELP _counter_invalid_chars_total foo # TYPE _counter_invalid_chars_total counter _counter_invalid_chars_total{a="1",b="true"} 1.0 """ ), ) def test_metric_name_with_unit(self): self.verify_text_format( _generate_gauge(name="test.metric.no_unit", value=1, unit=""), dedent( """\ # HELP test_metric_no_unit foo # TYPE test_metric_no_unit gauge test_metric_no_unit{a="1",b="true"} 1.0 """ ), ) self.verify_text_format( _generate_gauge( name="test.metric.spaces", value=1, unit=" \t " ), dedent( """\ # HELP test_metric_spaces foo # TYPE test_metric_spaces gauge test_metric_spaces{a="1",b="true"} 1.0 """ ), ) # UCUM annotations should be stripped self.verify_text_format( _generate_sum(name="test_counter", value=1, unit="{requests}"), dedent( """\ # HELP test_counter_total foo # TYPE test_counter_total counter test_counter_total{a="1",b="true"} 1.0 """ ), ) # slash converts to "per" self.verify_text_format( _generate_gauge(name="test_gauge", value=1, unit="m/s"), dedent( """\ # HELP test_gauge_meters_per_second foo # TYPE test_gauge_meters_per_second gauge test_gauge_meters_per_second{a="1",b="true"} 1.0 """ ), ) # invalid characters in name are sanitized before being passed to prom client, which # would throw errors self.verify_text_format( _generate_sum(name="test_counter", value=1, unit="%{foo}@?"), dedent( """\ # HELP test_counter_total foo # TYPE test_counter_total counter test_counter_total{a="1",b="true"} 1.0 """ ), ) def test_semconv(self): """Tests that a few select semconv metrics get converted to the expected prometheus text format""" self.verify_text_format( _generate_sum( name="system.filesystem.usage", value=1, is_monotonic=False, unit="By", ), dedent( """\ # HELP system_filesystem_usage_bytes foo # TYPE system_filesystem_usage_bytes gauge system_filesystem_usage_bytes{a="1",b="true"} 1.0 """ ), ) self.verify_text_format( _generate_sum( name="system.network.dropped", value=1, unit="{packets}", ), dedent( """\ # HELP system_network_dropped_total foo # TYPE system_network_dropped_total counter system_network_dropped_total{a="1",b="true"} 1.0 """ ), ) self.verify_text_format( _generate_histogram( name="http.server.request.duration", unit="s", ), dedent( """\ # HELP http_server_request_duration_seconds foo # TYPE http_server_request_duration_seconds histogram http_server_request_duration_seconds_bucket{a="1",b="true",le="123.0"} 1.0 http_server_request_duration_seconds_bucket{a="1",b="true",le="456.0"} 4.0 http_server_request_duration_seconds_bucket{a="1",b="true",le="+Inf"} 6.0 http_server_request_duration_seconds_count{a="1",b="true"} 6.0 http_server_request_duration_seconds_sum{a="1",b="true"} 579.0 """ ), ) self.verify_text_format( _generate_sum( name="http.server.active_requests", value=1, unit="{request}", is_monotonic=False, ), dedent( """\ # HELP http_server_active_requests foo # TYPE http_server_active_requests gauge http_server_active_requests{a="1",b="true"} 1.0 """ ), ) # if the metric name already contains the unit, it shouldn't be added again self.verify_text_format( _generate_sum( name="metric_name_with_myunit", value=1, unit="myunit", ), dedent( """\ # HELP metric_name_with_myunit_total foo # TYPE metric_name_with_myunit_total counter metric_name_with_myunit_total{a="1",b="true"} 1.0 """ ), ) self.verify_text_format( _generate_gauge( name="metric_name_percent", value=1, unit="%", ), dedent( """\ # HELP metric_name_percent foo # TYPE metric_name_percent gauge metric_name_percent{a="1",b="true"} 1.0 """ ), ) def test_multiple_data_points_with_different_label_sets(self): hist_point_1 = HistogramDataPoint( attributes={"http_target": "/foobar", "net_host_port": 8080}, start_time_unix_nano=1641946016139533244, time_unix_nano=1641946016139533244, count=6, sum=579.0, bucket_counts=[1, 3, 2], explicit_bounds=[123.0, 456.0], min=1, max=457, ) hist_point_2 = HistogramDataPoint( attributes={"net_host_port": 8080}, start_time_unix_nano=1641946016139533245, time_unix_nano=1641946016139533245, count=7, sum=579.0, bucket_counts=[1, 3, 3], explicit_bounds=[123.0, 456.0], min=1, max=457, ) metric = Metric( name="http.server.request.duration", description="test multiple label sets", unit="s", data=Histogram( data_points=[hist_point_1, hist_point_2], aggregation_temporality=AggregationTemporality.CUMULATIVE, ), ) self.verify_text_format( metric, dedent( """\ # HELP http_server_request_duration_seconds test multiple label sets # TYPE http_server_request_duration_seconds histogram http_server_request_duration_seconds_bucket{http_target="/foobar",le="123.0",net_host_port="8080"} 1.0 http_server_request_duration_seconds_bucket{http_target="/foobar",le="456.0",net_host_port="8080"} 4.0 http_server_request_duration_seconds_bucket{http_target="/foobar",le="+Inf",net_host_port="8080"} 6.0 http_server_request_duration_seconds_count{http_target="/foobar",net_host_port="8080"} 6.0 http_server_request_duration_seconds_sum{http_target="/foobar",net_host_port="8080"} 579.0 # HELP http_server_request_duration_seconds test multiple label sets # TYPE http_server_request_duration_seconds histogram http_server_request_duration_seconds_bucket{le="123.0",net_host_port="8080"} 1.0 http_server_request_duration_seconds_bucket{le="456.0",net_host_port="8080"} 4.0 http_server_request_duration_seconds_bucket{le="+Inf",net_host_port="8080"} 7.0 http_server_request_duration_seconds_count{net_host_port="8080"} 7.0 http_server_request_duration_seconds_sum{net_host_port="8080"} 579.0 """ ), ) python-opentelemetry-1.39.1/exporter/opentelemetry-exporter-zipkin-json/000077500000000000000000000000001511654350100267045ustar00rootroot00000000000000python-opentelemetry-1.39.1/exporter/opentelemetry-exporter-zipkin-json/CHANGELOG.md000066400000000000000000000000001511654350100305030ustar00rootroot00000000000000python-opentelemetry-1.39.1/exporter/opentelemetry-exporter-zipkin-json/LICENSE000066400000000000000000000261351511654350100277200ustar00rootroot00000000000000 Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. Copyright [yyyy] [name of copyright owner] Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. python-opentelemetry-1.39.1/exporter/opentelemetry-exporter-zipkin-json/README.rst000066400000000000000000000012221511654350100303700ustar00rootroot00000000000000OpenTelemetry Zipkin JSON Exporter ================================== |pypi| .. |pypi| image:: https://badge.fury.io/py/opentelemetry-exporter-zipkin-json.svg :target: https://pypi.org/project/opentelemetry-exporter-zipkin-json/ This library allows export of tracing data to `Zipkin `_ using JSON for serialization. Installation ------------ :: pip install opentelemetry-exporter-zipkin-json References ---------- * `OpenTelemetry Zipkin Exporter `_ * `Zipkin `_ * `OpenTelemetry Project `_ python-opentelemetry-1.39.1/exporter/opentelemetry-exporter-zipkin-json/pyproject.toml000066400000000000000000000030111511654350100316130ustar00rootroot00000000000000[build-system] requires = ["hatchling"] build-backend = "hatchling.build" [project] name = "opentelemetry-exporter-zipkin-json" dynamic = ["version"] description = "Zipkin Span JSON Exporter for OpenTelemetry" readme = "README.rst" license = "Apache-2.0" requires-python = ">=3.9" authors = [ { name = "OpenTelemetry Authors", email = "cncf-opentelemetry-contributors@lists.cncf.io" }, ] classifiers = [ "Development Status :: 5 - Production/Stable", "Framework :: OpenTelemetry", "Framework :: OpenTelemetry :: Exporters", "Intended Audience :: Developers", "Programming Language :: Python", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.9", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", "Programming Language :: Python :: 3.12", "Programming Language :: Python :: 3.13", "Typing :: Typed", ] dependencies = [ "opentelemetry-api ~= 1.3", "opentelemetry-sdk ~= 1.11", "requests ~= 2.7", ] [project.entry-points.opentelemetry_traces_exporter] zipkin_json = "opentelemetry.exporter.zipkin.json:ZipkinExporter" [project.urls] Homepage = "https://github.com/open-telemetry/opentelemetry-python/tree/main/exporter/opentelemetry-exporter-zipkin-json" Repository = "https://github.com/open-telemetry/opentelemetry-python" [tool.hatch.version] path = "src/opentelemetry/exporter/zipkin/json/version/__init__.py" [tool.hatch.build.targets.sdist] include = [ "/src", "/tests", ] [tool.hatch.build.targets.wheel] packages = ["src/opentelemetry"] python-opentelemetry-1.39.1/exporter/opentelemetry-exporter-zipkin-json/src/000077500000000000000000000000001511654350100274735ustar00rootroot00000000000000python-opentelemetry-1.39.1/exporter/opentelemetry-exporter-zipkin-json/src/opentelemetry/000077500000000000000000000000001511654350100323675ustar00rootroot00000000000000python-opentelemetry-1.39.1/exporter/opentelemetry-exporter-zipkin-json/src/opentelemetry/exporter/000077500000000000000000000000001511654350100342375ustar00rootroot00000000000000zipkin/000077500000000000000000000000001511654350100354645ustar00rootroot00000000000000python-opentelemetry-1.39.1/exporter/opentelemetry-exporter-zipkin-json/src/opentelemetry/exporterencoder/000077500000000000000000000000001511654350100371035ustar00rootroot00000000000000python-opentelemetry-1.39.1/exporter/opentelemetry-exporter-zipkin-json/src/opentelemetry/exporter/zipkin__init__.py000066400000000000000000000233331511654350100412200ustar00rootroot00000000000000python-opentelemetry-1.39.1/exporter/opentelemetry-exporter-zipkin-json/src/opentelemetry/exporter/zipkin/encoder# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Zipkin Exporter Transport Encoder Base module and abstract class for concrete transport encoders to extend. """ import abc import json import logging from enum import Enum from typing import Any, Dict, List, Optional, Sequence, TypeVar from opentelemetry.exporter.zipkin.node_endpoint import NodeEndpoint from opentelemetry.sdk.trace import Event from opentelemetry.trace import ( Span, SpanContext, StatusCode, format_span_id, format_trace_id, ) EncodedLocalEndpointT = TypeVar("EncodedLocalEndpointT") DEFAULT_MAX_TAG_VALUE_LENGTH = 128 NAME_KEY = "otel.library.name" VERSION_KEY = "otel.library.version" _SCOPE_NAME_KEY = "otel.scope.name" _SCOPE_VERSION_KEY = "otel.scope.version" logger = logging.getLogger(__name__) class Protocol(Enum): """Enum of supported protocol formats. Values are human-readable strings so that they can be easily used by the OS environ var OTEL_EXPORTER_ZIPKIN_PROTOCOL (reserved for future usage). """ V1 = "v1" V2 = "v2" # pylint: disable=W0223 class Encoder(abc.ABC): """Base class for encoders that are used by the exporter. Args: max_tag_value_length: maximum length of an exported tag value. Values will be truncated to conform. Since values are serialized to a JSON list string, max_tag_value_length is honored at the element boundary. """ def __init__( self, max_tag_value_length: int = DEFAULT_MAX_TAG_VALUE_LENGTH ): self.max_tag_value_length = max_tag_value_length @staticmethod @abc.abstractmethod def content_type() -> str: pass @abc.abstractmethod def serialize( self, spans: Sequence[Span], local_endpoint: NodeEndpoint ) -> str: pass @abc.abstractmethod def _encode_span( self, span: Span, encoded_local_endpoint: EncodedLocalEndpointT ) -> Any: """ Per spec Zipkin fields that can be absent SHOULD be omitted from the payload when they are empty in the OpenTelemetry Span. https://github.com/open-telemetry/opentelemetry-specification/blob/master/specification/trace/sdk_exporters/zipkin.md#request-payload """ @staticmethod @abc.abstractmethod def _encode_local_endpoint( local_endpoint: NodeEndpoint, ) -> EncodedLocalEndpointT: pass @staticmethod def _encode_debug(span_context) -> Any: return span_context.trace_flags.sampled @staticmethod @abc.abstractmethod def _encode_span_id(span_id: int) -> Any: pass @staticmethod @abc.abstractmethod def _encode_trace_id(trace_id: int) -> Any: pass @staticmethod def _get_parent_id(span_context) -> Optional[int]: if isinstance(span_context, Span): parent_id = span_context.parent.span_id elif isinstance(span_context, SpanContext): parent_id = span_context.span_id else: parent_id = None return parent_id def _extract_tags_from_dict( self, tags_dict: Optional[Dict] ) -> Dict[str, str]: tags = {} if not tags_dict: return tags for attribute_key, attribute_value in tags_dict.items(): if isinstance(attribute_value, bool): value = str(attribute_value).lower() elif isinstance(attribute_value, (int, float, str)): value = str(attribute_value) elif isinstance(attribute_value, Sequence): value = self._extract_tag_value_string_from_sequence( attribute_value ) if not value: logger.warning("Could not serialize tag %s", attribute_key) continue else: logger.warning("Could not serialize tag %s", attribute_key) continue if ( self.max_tag_value_length is not None and self.max_tag_value_length > 0 ): value = value[: self.max_tag_value_length] tags[attribute_key] = value return tags def _extract_tag_value_string_from_sequence(self, sequence: Sequence): if self.max_tag_value_length and self.max_tag_value_length == 1: return None tag_value_elements = [] running_string_length = ( 2 # accounts for array brackets in output string ) defined_max_tag_value_length = ( self.max_tag_value_length is not None and self.max_tag_value_length > 0 ) for element in sequence: if isinstance(element, bool): tag_value_element = str(element).lower() elif isinstance(element, (int, float, str)): tag_value_element = str(element) elif element is None: tag_value_element = None else: continue if defined_max_tag_value_length: if tag_value_element is None: running_string_length += 4 # null with no quotes else: # + 2 accounts for string quotation marks running_string_length += len(tag_value_element) + 2 if tag_value_elements: # accounts for ',' item separator running_string_length += 1 if running_string_length > self.max_tag_value_length: break tag_value_elements.append(tag_value_element) return json.dumps(tag_value_elements, separators=(",", ":")) def _extract_tags_from_span(self, span: Span) -> Dict[str, str]: tags = self._extract_tags_from_dict(span.attributes) if span.resource: tags.update(self._extract_tags_from_dict(span.resource.attributes)) if span.instrumentation_scope is not None: tags.update( { NAME_KEY: span.instrumentation_scope.name, VERSION_KEY: span.instrumentation_scope.version, _SCOPE_NAME_KEY: span.instrumentation_scope.name, _SCOPE_VERSION_KEY: span.instrumentation_scope.version, } ) if span.status.status_code is not StatusCode.UNSET: tags.update({"otel.status_code": span.status.status_code.name}) if span.status.status_code is StatusCode.ERROR: tags.update({"error": span.status.description or ""}) if span.dropped_attributes: tags.update( {"otel.dropped_attributes_count": str(span.dropped_attributes)} ) if span.dropped_events: tags.update( {"otel.dropped_events_count": str(span.dropped_events)} ) if span.dropped_links: tags.update({"otel.dropped_links_count": str(span.dropped_links)}) return tags def _extract_annotations_from_events( self, events: Optional[List[Event]] ) -> Optional[List[Dict]]: if not events: return None annotations = [] for event in events: attrs = {} for key, value in event.attributes.items(): if ( isinstance(value, str) and self.max_tag_value_length is not None and self.max_tag_value_length > 0 ): value = value[: self.max_tag_value_length] attrs[key] = value annotations.append( { "timestamp": self._nsec_to_usec_round(event.timestamp), "value": json.dumps({event.name: attrs}, sort_keys=True), } ) return annotations @staticmethod def _nsec_to_usec_round(nsec: int) -> int: """Round nanoseconds to microseconds Timestamp in zipkin spans is int of microseconds. See: https://zipkin.io/pages/instrumenting.html """ return (nsec + 500) // 10**3 class JsonEncoder(Encoder): @staticmethod def content_type(): return "application/json" def serialize( self, spans: Sequence[Span], local_endpoint: NodeEndpoint ) -> str: encoded_local_endpoint = self._encode_local_endpoint(local_endpoint) encoded_spans = [] for span in spans: encoded_spans.append( self._encode_span(span, encoded_local_endpoint) ) return json.dumps(encoded_spans) @staticmethod def _encode_local_endpoint(local_endpoint: NodeEndpoint) -> Dict: encoded_local_endpoint = {"serviceName": local_endpoint.service_name} if local_endpoint.ipv4 is not None: encoded_local_endpoint["ipv4"] = str(local_endpoint.ipv4) if local_endpoint.ipv6 is not None: encoded_local_endpoint["ipv6"] = str(local_endpoint.ipv6) if local_endpoint.port is not None: encoded_local_endpoint["port"] = local_endpoint.port return encoded_local_endpoint @staticmethod def _encode_span_id(span_id: int) -> str: return format_span_id(span_id) @staticmethod def _encode_trace_id(trace_id: int) -> str: return format_trace_id(trace_id) json/000077500000000000000000000000001511654350100364355ustar00rootroot00000000000000python-opentelemetry-1.39.1/exporter/opentelemetry-exporter-zipkin-json/src/opentelemetry/exporter/zipkin__init__.py000066400000000000000000000155231511654350100405540ustar00rootroot00000000000000python-opentelemetry-1.39.1/exporter/opentelemetry-exporter-zipkin-json/src/opentelemetry/exporter/zipkin/json# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ OpenTelemetry Zipkin JSON Exporter ---------------------------------- This library allows to export tracing data to `Zipkin `_. Usage ----- The **OpenTelemetry Zipkin JSON Exporter** allows exporting of `OpenTelemetry`_ traces to `Zipkin`_. This exporter sends traces to the configured Zipkin collector endpoint using JSON over HTTP and supports multiple versions (v1, v2). .. _Zipkin: https://zipkin.io/ .. _OpenTelemetry: https://github.com/open-telemetry/opentelemetry-python/ .. _Specification: https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/sdk-environment-variables.md#zipkin-exporter .. code:: python import requests from opentelemetry import trace from opentelemetry.exporter.zipkin.json import ZipkinExporter from opentelemetry.sdk.trace import TracerProvider from opentelemetry.sdk.trace.export import BatchSpanProcessor trace.set_tracer_provider(TracerProvider()) tracer = trace.get_tracer(__name__) # create a ZipkinExporter zipkin_exporter = ZipkinExporter( # version=Protocol.V2 # optional: # endpoint="http://localhost:9411/api/v2/spans", # local_node_ipv4="192.168.0.1", # local_node_ipv6="2001:db8::c001", # local_node_port=31313, # max_tag_value_length=256, # timeout=5 (in seconds), # session=requests.Session(), ) # Create a BatchSpanProcessor and add the exporter to it span_processor = BatchSpanProcessor(zipkin_exporter) # add to the tracer trace.get_tracer_provider().add_span_processor(span_processor) with tracer.start_as_current_span("foo"): print("Hello world!") The exporter supports the following environment variable for configuration: - :envvar:`OTEL_EXPORTER_ZIPKIN_ENDPOINT` - :envvar:`OTEL_EXPORTER_ZIPKIN_TIMEOUT` API --- """ import logging from os import environ from typing import Optional, Sequence import requests from opentelemetry.exporter.zipkin.encoder import Protocol from opentelemetry.exporter.zipkin.json.v1 import JsonV1Encoder from opentelemetry.exporter.zipkin.json.v2 import JsonV2Encoder from opentelemetry.exporter.zipkin.node_endpoint import IpInput, NodeEndpoint from opentelemetry.sdk.environment_variables import ( OTEL_EXPORTER_ZIPKIN_ENDPOINT, OTEL_EXPORTER_ZIPKIN_TIMEOUT, ) from opentelemetry.sdk.resources import SERVICE_NAME from opentelemetry.sdk.trace.export import SpanExporter, SpanExportResult from opentelemetry.trace import Span DEFAULT_ENDPOINT = "http://localhost:9411/api/v2/spans" REQUESTS_SUCCESS_STATUS_CODES = (200, 202) logger = logging.getLogger(__name__) class ZipkinExporter(SpanExporter): def __init__( self, version: Protocol = Protocol.V2, endpoint: Optional[str] = None, local_node_ipv4: IpInput = None, local_node_ipv6: IpInput = None, local_node_port: Optional[int] = None, max_tag_value_length: Optional[int] = None, timeout: Optional[int] = None, session: Optional[requests.Session] = None, ): """Zipkin exporter. Args: version: The protocol version to be used. endpoint: The endpoint of the Zipkin collector. local_node_ipv4: Primary IPv4 address associated with this connection. local_node_ipv6: Primary IPv6 address associated with this connection. local_node_port: Depending on context, this could be a listen port or the client-side of a socket. max_tag_value_length: Max length string attribute values can have. timeout: Maximum time the Zipkin exporter will wait for each batch export. The default value is 10s. session: Connection session to the Zipkin collector endpoint. The tuple (local_node_ipv4, local_node_ipv6, local_node_port) is used to represent the network context of a node in the service graph. """ self.local_node = NodeEndpoint( local_node_ipv4, local_node_ipv6, local_node_port ) if endpoint is None: endpoint = ( environ.get(OTEL_EXPORTER_ZIPKIN_ENDPOINT) or DEFAULT_ENDPOINT ) self.endpoint = endpoint if version == Protocol.V1: self.encoder = JsonV1Encoder(max_tag_value_length) elif version == Protocol.V2: self.encoder = JsonV2Encoder(max_tag_value_length) self.session = session or requests.Session() self.session.headers.update( {"Content-Type": self.encoder.content_type()} ) self._closed = False self.timeout = timeout or int( environ.get(OTEL_EXPORTER_ZIPKIN_TIMEOUT, 10) ) def export(self, spans: Sequence[Span]) -> SpanExportResult: # After the call to Shutdown subsequent calls to Export are # not allowed and should return a Failure result if self._closed: logger.warning("Exporter already shutdown, ignoring batch") return SpanExportResult.FAILURE # Populate service_name from first span # We restrict any SpanProcessor to be only associated with a single # TracerProvider, so it is safe to assume that all Spans in a single # batch all originate from one TracerProvider (and in turn have all # the same service.name) if spans: service_name = spans[0].resource.attributes.get(SERVICE_NAME) if service_name: self.local_node.service_name = service_name result = self.session.post( url=self.endpoint, data=self.encoder.serialize(spans, self.local_node), timeout=self.timeout, ) if result.status_code not in REQUESTS_SUCCESS_STATUS_CODES: logger.error( "Traces cannot be uploaded; status code: %s, message %s", result.status_code, result.text, ) return SpanExportResult.FAILURE return SpanExportResult.SUCCESS def shutdown(self) -> None: if self._closed: logger.warning("Exporter already shutdown, ignoring call") return self.session.close() self._closed = True def force_flush(self, timeout_millis: int = 30000) -> bool: return True v1/000077500000000000000000000000001511654350100367635ustar00rootroot00000000000000python-opentelemetry-1.39.1/exporter/opentelemetry-exporter-zipkin-json/src/opentelemetry/exporter/zipkin/json__init__.py000066400000000000000000000056061511654350100411030ustar00rootroot00000000000000python-opentelemetry-1.39.1/exporter/opentelemetry-exporter-zipkin-json/src/opentelemetry/exporter/zipkin/json/v1# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Zipkin Export Encoders for JSON formats""" from typing import Dict, List from opentelemetry.exporter.zipkin.encoder import Encoder, JsonEncoder from opentelemetry.trace import Span # pylint: disable=W0223 class V1Encoder(Encoder): def _extract_binary_annotations( self, span: Span, encoded_local_endpoint: Dict ) -> List[Dict]: binary_annotations = [] for tag_key, tag_value in self._extract_tags_from_span(span).items(): if isinstance(tag_value, str) and self.max_tag_value_length > 0: tag_value = tag_value[: self.max_tag_value_length] binary_annotations.append( { "key": tag_key, "value": tag_value, "endpoint": encoded_local_endpoint, } ) return binary_annotations class JsonV1Encoder(JsonEncoder, V1Encoder): """Zipkin Export Encoder for JSON v1 API API spec: https://github.com/openzipkin/zipkin-api/blob/master/zipkin-api.yaml """ def _encode_span(self, span: Span, encoded_local_endpoint: Dict) -> Dict: context = span.get_span_context() encoded_span = { "traceId": self._encode_trace_id(context.trace_id), "id": self._encode_span_id(context.span_id), "name": span.name, "timestamp": self._nsec_to_usec_round(span.start_time), "duration": self._nsec_to_usec_round( span.end_time - span.start_time ), } encoded_annotations = self._extract_annotations_from_events( span.events ) if encoded_annotations is not None: for annotation in encoded_annotations: annotation["endpoint"] = encoded_local_endpoint encoded_span["annotations"] = encoded_annotations binary_annotations = self._extract_binary_annotations( span, encoded_local_endpoint ) if binary_annotations: encoded_span["binaryAnnotations"] = binary_annotations debug = self._encode_debug(context) if debug: encoded_span["debug"] = debug parent_id = self._get_parent_id(span.parent) if parent_id is not None: encoded_span["parentId"] = self._encode_span_id(parent_id) return encoded_span v2/000077500000000000000000000000001511654350100367645ustar00rootroot00000000000000python-opentelemetry-1.39.1/exporter/opentelemetry-exporter-zipkin-json/src/opentelemetry/exporter/zipkin/json__init__.py000066400000000000000000000043701511654350100411010ustar00rootroot00000000000000python-opentelemetry-1.39.1/exporter/opentelemetry-exporter-zipkin-json/src/opentelemetry/exporter/zipkin/json/v2# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Zipkin Export Encoders for JSON formats""" from typing import Dict from opentelemetry.exporter.zipkin.encoder import JsonEncoder from opentelemetry.trace import Span, SpanKind class JsonV2Encoder(JsonEncoder): """Zipkin Export Encoder for JSON v2 API API spec: https://github.com/openzipkin/zipkin-api/blob/master/zipkin2-api.yaml """ SPAN_KIND_MAP = { SpanKind.INTERNAL: None, SpanKind.SERVER: "SERVER", SpanKind.CLIENT: "CLIENT", SpanKind.PRODUCER: "PRODUCER", SpanKind.CONSUMER: "CONSUMER", } def _encode_span(self, span: Span, encoded_local_endpoint: Dict) -> Dict: context = span.get_span_context() encoded_span = { "traceId": self._encode_trace_id(context.trace_id), "id": self._encode_span_id(context.span_id), "name": span.name, "timestamp": self._nsec_to_usec_round(span.start_time), "duration": self._nsec_to_usec_round( span.end_time - span.start_time ), "localEndpoint": encoded_local_endpoint, "kind": self.SPAN_KIND_MAP[span.kind], } tags = self._extract_tags_from_span(span) if tags: encoded_span["tags"] = tags annotations = self._extract_annotations_from_events(span.events) if annotations: encoded_span["annotations"] = annotations debug = self._encode_debug(context) if debug: encoded_span["debug"] = debug parent_id = self._get_parent_id(span.parent) if parent_id is not None: encoded_span["parentId"] = self._encode_span_id(parent_id) return encoded_span version/000077500000000000000000000000001511654350100401225ustar00rootroot00000000000000python-opentelemetry-1.39.1/exporter/opentelemetry-exporter-zipkin-json/src/opentelemetry/exporter/zipkin/json__init__.py000066400000000000000000000011401511654350100422270ustar00rootroot00000000000000python-opentelemetry-1.39.1/exporter/opentelemetry-exporter-zipkin-json/src/opentelemetry/exporter/zipkin/json/version# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. __version__ = "1.39.1" node_endpoint.py000066400000000000000000000051701511654350100406660ustar00rootroot00000000000000python-opentelemetry-1.39.1/exporter/opentelemetry-exporter-zipkin-json/src/opentelemetry/exporter/zipkin# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Zipkin Exporter Endpoints""" import ipaddress from typing import Optional, Union from opentelemetry import trace from opentelemetry.sdk.resources import SERVICE_NAME, Resource IpInput = Union[str, int, None] class NodeEndpoint: """The network context of a node in the service graph. Args: ipv4: Primary IPv4 address associated with this connection. ipv6: Primary IPv6 address associated with this connection. port: Depending on context, this could be a listen port or the client-side of a socket. None if unknown. """ def __init__( self, ipv4: IpInput = None, ipv6: IpInput = None, port: Optional[int] = None, ): self.ipv4 = ipv4 self.ipv6 = ipv6 self.port = port tracer_provider = trace.get_tracer_provider() if hasattr(tracer_provider, "resource"): resource = tracer_provider.resource else: resource = Resource.create() self.service_name = resource.attributes[SERVICE_NAME] @property def ipv4(self) -> Optional[ipaddress.IPv4Address]: return self._ipv4 @ipv4.setter def ipv4(self, address: IpInput) -> None: if address is None: self._ipv4 = None else: ipv4_address = ipaddress.ip_address(address) if not isinstance(ipv4_address, ipaddress.IPv4Address): raise ValueError( f"{address!r} does not appear to be an IPv4 address" ) self._ipv4 = ipv4_address @property def ipv6(self) -> Optional[ipaddress.IPv6Address]: return self._ipv6 @ipv6.setter def ipv6(self, address: IpInput) -> None: if address is None: self._ipv6 = None else: ipv6_address = ipaddress.ip_address(address) if not isinstance(ipv6_address, ipaddress.IPv6Address): raise ValueError( f"{address!r} does not appear to be an IPv6 address" ) self._ipv6 = ipv6_address py.typed000066400000000000000000000000001511654350100371510ustar00rootroot00000000000000python-opentelemetry-1.39.1/exporter/opentelemetry-exporter-zipkin-json/src/opentelemetry/exporter/zipkinpython-opentelemetry-1.39.1/exporter/opentelemetry-exporter-zipkin-json/test-requirements.txt000066400000000000000000000006621511654350100331510ustar00rootroot00000000000000asgiref==3.7.2 certifi==2024.7.4 charset-normalizer==3.3.2 idna==3.7 importlib-metadata==6.11.0 iniconfig==2.0.0 packaging==24.0 pluggy==1.5.0 py-cpuinfo==9.0.0 pytest==7.4.4 requests==2.32.3 tomli==2.0.1 typing_extensions==4.10.0 urllib3==2.2.2 wrapt==1.16.0 zipp==3.19.2 -e opentelemetry-api -e opentelemetry-sdk -e opentelemetry-semantic-conventions -e tests/opentelemetry-test-utils -e exporter/opentelemetry-exporter-zipkin-json python-opentelemetry-1.39.1/exporter/opentelemetry-exporter-zipkin-json/tests/000077500000000000000000000000001511654350100300465ustar00rootroot00000000000000python-opentelemetry-1.39.1/exporter/opentelemetry-exporter-zipkin-json/tests/__init__.py000066400000000000000000000011101511654350100321500ustar00rootroot00000000000000# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. python-opentelemetry-1.39.1/exporter/opentelemetry-exporter-zipkin-json/tests/encoder/000077500000000000000000000000001511654350100314655ustar00rootroot00000000000000python-opentelemetry-1.39.1/exporter/opentelemetry-exporter-zipkin-json/tests/encoder/__init__.py000066400000000000000000000000001511654350100335640ustar00rootroot00000000000000common_tests.py000066400000000000000000000444221511654350100345000ustar00rootroot00000000000000python-opentelemetry-1.39.1/exporter/opentelemetry-exporter-zipkin-json/tests/encoder# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import abc import unittest from typing import Dict, List from opentelemetry import trace as trace_api from opentelemetry.exporter.zipkin.encoder import ( DEFAULT_MAX_TAG_VALUE_LENGTH, Encoder, ) from opentelemetry.exporter.zipkin.node_endpoint import NodeEndpoint from opentelemetry.sdk import trace from opentelemetry.sdk.util.instrumentation import InstrumentationScope from opentelemetry.trace import TraceFlags from opentelemetry.trace.status import Status, StatusCode TEST_SERVICE_NAME = "test_service" # pylint: disable=protected-access class CommonEncoderTestCases: class CommonEncoderTest(unittest.TestCase): @staticmethod @abc.abstractmethod def get_encoder(*args, **kwargs) -> Encoder: pass @classmethod def get_encoder_default(cls) -> Encoder: return cls.get_encoder() @abc.abstractmethod def test_encode_trace_id(self): pass @abc.abstractmethod def test_encode_span_id(self): pass @abc.abstractmethod def test_encode_local_endpoint_default(self): pass @abc.abstractmethod def test_encode_local_endpoint_explicits(self): pass @abc.abstractmethod def _test_encode_max_tag_length(self, max_tag_value_length: int): pass def test_encode_max_tag_length_2(self): self._test_encode_max_tag_length(2) def test_encode_max_tag_length_5(self): self._test_encode_max_tag_length(5) def test_encode_max_tag_length_9(self): self._test_encode_max_tag_length(9) def test_encode_max_tag_length_10(self): self._test_encode_max_tag_length(10) def test_encode_max_tag_length_11(self): self._test_encode_max_tag_length(11) def test_encode_max_tag_length_128(self): self._test_encode_max_tag_length(128) def test_constructor_default(self): encoder = self.get_encoder() self.assertEqual( DEFAULT_MAX_TAG_VALUE_LENGTH, encoder.max_tag_value_length ) def test_constructor_max_tag_value_length(self): max_tag_value_length = 123456 encoder = self.get_encoder(max_tag_value_length) self.assertEqual( max_tag_value_length, encoder.max_tag_value_length ) def test_nsec_to_usec_round(self): base_time_nsec = 683647322 * 10**9 for nsec in ( base_time_nsec, base_time_nsec + 150 * 10**6, base_time_nsec + 300 * 10**6, base_time_nsec + 400 * 10**6, ): self.assertEqual( (nsec + 500) // 10**3, self.get_encoder_default()._nsec_to_usec_round(nsec), ) def test_encode_debug(self): self.assertFalse( self.get_encoder_default()._encode_debug( trace_api.SpanContext( trace_id=0x000000000000000000000000DEADBEEF, span_id=0x00000000DEADBEF0, is_remote=False, trace_flags=TraceFlags(TraceFlags.DEFAULT), ) ) ) self.assertTrue( self.get_encoder_default()._encode_debug( trace_api.SpanContext( trace_id=0x000000000000000000000000DEADBEEF, span_id=0x00000000DEADBEF0, is_remote=False, trace_flags=TraceFlags(TraceFlags.SAMPLED), ) ) ) def test_get_parent_id_from_span(self): parent_id = 0x00000000DEADBEF0 self.assertEqual( parent_id, self.get_encoder_default()._get_parent_id( trace._Span( name="test-span", context=trace_api.SpanContext( 0x000000000000000000000000DEADBEEF, 0x04BF92DEEFC58C92, is_remote=False, ), parent=trace_api.SpanContext( 0x0000000000000000000000AADEADBEEF, parent_id, is_remote=False, ), ) ), ) def test_get_parent_id_from_span_context(self): parent_id = 0x00000000DEADBEF0 self.assertEqual( parent_id, self.get_encoder_default()._get_parent_id( trace_api.SpanContext( trace_id=0x000000000000000000000000DEADBEEF, span_id=parent_id, is_remote=False, ), ), ) @staticmethod def get_data_for_max_tag_length_test( max_tag_length: int, ) -> (trace._Span, Dict): start_time = 683647322 * 10**9 # in ns duration = 50 * 10**6 end_time = start_time + duration span = trace._Span( name=TEST_SERVICE_NAME, context=trace_api.SpanContext( 0x0E0C63257DE34C926F9EFCD03927272E, 0x04BF92DEEFC58C92, is_remote=False, trace_flags=TraceFlags(TraceFlags.SAMPLED), ), resource=trace.Resource({}), ) span.start(start_time=start_time) span.set_attribute("string1", "v" * 500) span.set_attribute("string2", "v" * 50) span.set_attribute("list1", ["a"] * 25) span.set_attribute("list2", ["a"] * 10) span.set_attribute("list3", [2] * 25) span.set_attribute("list4", [2] * 10) span.set_attribute("list5", [True] * 25) span.set_attribute("list6", [True] * 10) span.set_attribute("tuple1", ("a",) * 25) span.set_attribute("tuple2", ("a",) * 10) span.set_attribute("tuple3", (2,) * 25) span.set_attribute("tuple4", (2,) * 10) span.set_attribute("tuple5", (True,) * 25) span.set_attribute("tuple6", (True,) * 10) span.set_attribute("range1", range(0, 25)) span.set_attribute("range2", range(0, 10)) span.set_attribute("empty_list", []) span.set_attribute("none_list", ["hello", None, "world"]) span.end(end_time=end_time) expected_outputs = { 2: { "string1": "vv", "string2": "vv", "list1": "[]", "list2": "[]", "list3": "[]", "list4": "[]", "list5": "[]", "list6": "[]", "tuple1": "[]", "tuple2": "[]", "tuple3": "[]", "tuple4": "[]", "tuple5": "[]", "tuple6": "[]", "range1": "[]", "range2": "[]", "empty_list": "[]", "none_list": "[]", }, 5: { "string1": "vvvvv", "string2": "vvvvv", "list1": '["a"]', "list2": '["a"]', "list3": '["2"]', "list4": '["2"]', "list5": "[]", "list6": "[]", "tuple1": '["a"]', "tuple2": '["a"]', "tuple3": '["2"]', "tuple4": '["2"]', "tuple5": "[]", "tuple6": "[]", "range1": '["0"]', "range2": '["0"]', "empty_list": "[]", "none_list": "[]", }, 9: { "string1": "vvvvvvvvv", "string2": "vvvvvvvvv", "list1": '["a","a"]', "list2": '["a","a"]', "list3": '["2","2"]', "list4": '["2","2"]', "list5": '["true"]', "list6": '["true"]', "tuple1": '["a","a"]', "tuple2": '["a","a"]', "tuple3": '["2","2"]', "tuple4": '["2","2"]', "tuple5": '["true"]', "tuple6": '["true"]', "range1": '["0","1"]', "range2": '["0","1"]', "empty_list": "[]", "none_list": '["hello"]', }, 10: { "string1": "vvvvvvvvvv", "string2": "vvvvvvvvvv", "list1": '["a","a"]', "list2": '["a","a"]', "list3": '["2","2"]', "list4": '["2","2"]', "list5": '["true"]', "list6": '["true"]', "tuple1": '["a","a"]', "tuple2": '["a","a"]', "tuple3": '["2","2"]', "tuple4": '["2","2"]', "tuple5": '["true"]', "tuple6": '["true"]', "range1": '["0","1"]', "range2": '["0","1"]', "empty_list": "[]", "none_list": '["hello"]', }, 11: { "string1": "vvvvvvvvvvv", "string2": "vvvvvvvvvvv", "list1": '["a","a"]', "list2": '["a","a"]', "list3": '["2","2"]', "list4": '["2","2"]', "list5": '["true"]', "list6": '["true"]', "tuple1": '["a","a"]', "tuple2": '["a","a"]', "tuple3": '["2","2"]', "tuple4": '["2","2"]', "tuple5": '["true"]', "tuple6": '["true"]', "range1": '["0","1"]', "range2": '["0","1"]', "empty_list": "[]", "none_list": '["hello"]', }, 128: { "string1": "v" * 128, "string2": "v" * 50, "list1": '["a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a"]', "list2": '["a","a","a","a","a","a","a","a","a","a"]', "list3": '["2","2","2","2","2","2","2","2","2","2","2","2","2","2","2","2","2","2","2","2","2","2","2","2","2"]', "list4": '["2","2","2","2","2","2","2","2","2","2"]', "list5": '["true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true"]', "list6": '["true","true","true","true","true","true","true","true","true","true"]', "tuple1": '["a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a"]', "tuple2": '["a","a","a","a","a","a","a","a","a","a"]', "tuple3": '["2","2","2","2","2","2","2","2","2","2","2","2","2","2","2","2","2","2","2","2","2","2","2","2","2"]', "tuple4": '["2","2","2","2","2","2","2","2","2","2"]', "tuple5": '["true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true"]', "tuple6": '["true","true","true","true","true","true","true","true","true","true"]', "range1": '["0","1","2","3","4","5","6","7","8","9","10","11","12","13","14","15","16","17","18","19","20","21","22","23","24"]', "range2": '["0","1","2","3","4","5","6","7","8","9"]', "empty_list": "[]", "none_list": '["hello",null,"world"]', }, } return span, expected_outputs[max_tag_length] @staticmethod def get_exhaustive_otel_span_list() -> List[trace._Span]: trace_id = 0x6E0C63257DE34C926F9EFCD03927272E base_time = 683647322 * 10**9 # in ns start_times = ( base_time, base_time + 150 * 10**6, base_time + 300 * 10**6, base_time + 400 * 10**6, ) end_times = ( start_times[0] + (50 * 10**6), start_times[1] + (100 * 10**6), start_times[2] + (200 * 10**6), start_times[3] + (300 * 10**6), ) parent_span_context = trace_api.SpanContext( trace_id, 0x1111111111111111, is_remote=False ) other_context = trace_api.SpanContext( trace_id, 0x2222222222222222, is_remote=False ) span1 = trace._Span( name="test-span-1", context=trace_api.SpanContext( trace_id, 0x34BF92DEEFC58C92, is_remote=False, trace_flags=TraceFlags(TraceFlags.SAMPLED), ), parent=parent_span_context, events=( trace.Event( name="event0", timestamp=base_time + 50 * 10**6, attributes={ "annotation_bool": True, "annotation_string": "annotation_test", "key_float": 0.3, }, ), ), links=( trace_api.Link( context=other_context, attributes={"key_bool": True} ), ), resource=trace.Resource({}), ) span1.start(start_time=start_times[0]) span1.set_attribute("key_bool", False) span1.set_attribute("key_string", "hello_world") span1.set_attribute("key_float", 111.22) span1.set_status(Status(StatusCode.OK)) span1.end(end_time=end_times[0]) span2 = trace._Span( name="test-span-2", context=parent_span_context, parent=None, resource=trace.Resource( attributes={"key_resource": "some_resource"} ), ) span2.start(start_time=start_times[1]) span2.set_status(Status(StatusCode.ERROR, "Example description")) span2.end(end_time=end_times[1]) span3 = trace._Span( name="test-span-3", context=other_context, parent=None, resource=trace.Resource( attributes={"key_resource": "some_resource"} ), ) span3.start(start_time=start_times[2]) span3.set_attribute("key_string", "hello_world") span3.end(end_time=end_times[2]) span4 = trace._Span( name="test-span-3", context=other_context, parent=None, resource=trace.Resource({}), instrumentation_scope=InstrumentationScope( name="name", version="version" ), ) span4.start(start_time=start_times[3]) span4.end(end_time=end_times[3]) return [span1, span2, span3, span4] # pylint: disable=W0223 class CommonJsonEncoderTest(CommonEncoderTest, abc.ABC): def test_encode_trace_id(self): for trace_id in (1, 1024, 2**32, 2**64, 2**65): self.assertEqual( format(trace_id, "032x"), self.get_encoder_default()._encode_trace_id(trace_id), ) def test_encode_span_id(self): for span_id in (1, 1024, 2**8, 2**16, 2**32, 2**64): self.assertEqual( format(span_id, "016x"), self.get_encoder_default()._encode_span_id(span_id), ) def test_encode_local_endpoint_default(self): self.assertEqual( self.get_encoder_default()._encode_local_endpoint( NodeEndpoint() ), {"serviceName": TEST_SERVICE_NAME}, ) def test_encode_local_endpoint_explicits(self): ipv4 = "192.168.0.1" ipv6 = "2001:db8::c001" port = 414120 self.assertEqual( self.get_encoder_default()._encode_local_endpoint( NodeEndpoint(ipv4, ipv6, port) ), { "serviceName": TEST_SERVICE_NAME, "ipv4": ipv4, "ipv6": ipv6, "port": port, }, ) @staticmethod def pop_and_sort(source_list, source_index, sort_key): """ Convenience method that will pop a specified index from a list, sort it by a given key and then return it. """ popped_item = source_list.pop(source_index, None) if popped_item is not None: popped_item = sorted(popped_item, key=lambda x: x[sort_key]) return popped_item def assert_equal_encoded_spans(self, expected_spans, actual_spans): self.assertEqual(expected_spans, actual_spans) test_v1_json.py000066400000000000000000000243751511654350100344110ustar00rootroot00000000000000python-opentelemetry-1.39.1/exporter/opentelemetry-exporter-zipkin-json/tests/encoder# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json from opentelemetry import trace as trace_api from opentelemetry.exporter.zipkin.encoder import ( _SCOPE_NAME_KEY, _SCOPE_VERSION_KEY, NAME_KEY, VERSION_KEY, ) from opentelemetry.exporter.zipkin.json.v1 import JsonV1Encoder from opentelemetry.exporter.zipkin.node_endpoint import NodeEndpoint from opentelemetry.sdk import trace from opentelemetry.test.spantestutil import ( get_span_with_dropped_attributes_events_links, ) from opentelemetry.trace import TraceFlags, format_span_id, format_trace_id from .common_tests import ( # pylint: disable=import-error TEST_SERVICE_NAME, CommonEncoderTestCases, ) # pylint: disable=protected-access class TestV1JsonEncoder(CommonEncoderTestCases.CommonJsonEncoderTest): @staticmethod def get_encoder(*args, **kwargs) -> JsonV1Encoder: return JsonV1Encoder(*args, **kwargs) def test_encode(self): local_endpoint = {"serviceName": TEST_SERVICE_NAME} otel_spans = self.get_exhaustive_otel_span_list() trace_id = JsonV1Encoder._encode_trace_id( otel_spans[0].context.trace_id ) expected_output = [ { "traceId": trace_id, "id": JsonV1Encoder._encode_span_id( otel_spans[0].context.span_id ), "name": otel_spans[0].name, "timestamp": otel_spans[0].start_time // 10**3, "duration": (otel_spans[0].end_time // 10**3) - (otel_spans[0].start_time // 10**3), "annotations": [ { "timestamp": otel_spans[0].events[0].timestamp // 10**3, "value": json.dumps( { "event0": { "annotation_bool": True, "annotation_string": "annotation_test", "key_float": 0.3, } }, sort_keys=True, ), "endpoint": local_endpoint, } ], "binaryAnnotations": [ { "key": "key_bool", "value": "false", "endpoint": local_endpoint, }, { "key": "key_string", "value": "hello_world", "endpoint": local_endpoint, }, { "key": "key_float", "value": "111.22", "endpoint": local_endpoint, }, { "key": "otel.status_code", "value": "OK", "endpoint": local_endpoint, }, ], "debug": True, "parentId": JsonV1Encoder._encode_span_id( otel_spans[0].parent.span_id ), }, { "traceId": trace_id, "id": JsonV1Encoder._encode_span_id( otel_spans[1].context.span_id ), "name": otel_spans[1].name, "timestamp": otel_spans[1].start_time // 10**3, "duration": (otel_spans[1].end_time // 10**3) - (otel_spans[1].start_time // 10**3), "binaryAnnotations": [ { "key": "key_resource", "value": "some_resource", "endpoint": local_endpoint, }, { "key": "otel.status_code", "value": "ERROR", "endpoint": local_endpoint, }, { "key": "error", "value": "Example description", "endpoint": local_endpoint, }, ], }, { "traceId": trace_id, "id": JsonV1Encoder._encode_span_id( otel_spans[2].context.span_id ), "name": otel_spans[2].name, "timestamp": otel_spans[2].start_time // 10**3, "duration": (otel_spans[2].end_time // 10**3) - (otel_spans[2].start_time // 10**3), "binaryAnnotations": [ { "key": "key_string", "value": "hello_world", "endpoint": local_endpoint, }, { "key": "key_resource", "value": "some_resource", "endpoint": local_endpoint, }, ], }, { "traceId": trace_id, "id": JsonV1Encoder._encode_span_id( otel_spans[3].context.span_id ), "name": otel_spans[3].name, "timestamp": otel_spans[3].start_time // 10**3, "duration": (otel_spans[3].end_time // 10**3) - (otel_spans[3].start_time // 10**3), "binaryAnnotations": [ { "key": NAME_KEY, "value": "name", "endpoint": local_endpoint, }, { "key": VERSION_KEY, "value": "version", "endpoint": local_endpoint, }, { "key": _SCOPE_NAME_KEY, "value": "name", "endpoint": local_endpoint, }, { "key": _SCOPE_VERSION_KEY, "value": "version", "endpoint": local_endpoint, }, ], }, ] self.assert_equal_encoded_spans( json.dumps(expected_output), JsonV1Encoder().serialize(otel_spans, NodeEndpoint()), ) def test_encode_id_zero_padding(self): trace_id = 0x0E0C63257DE34C926F9EFCD03927272E span_id = 0x04BF92DEEFC58C92 parent_id = 0x0AAAAAAAAAAAAAAA start_time = 683647322 * 10**9 # in ns duration = 50 * 10**6 end_time = start_time + duration otel_span = trace._Span( name=TEST_SERVICE_NAME, context=trace_api.SpanContext( trace_id, span_id, is_remote=False, trace_flags=TraceFlags(TraceFlags.SAMPLED), ), parent=trace_api.SpanContext(trace_id, parent_id, is_remote=False), resource=trace.Resource({}), ) otel_span.start(start_time=start_time) otel_span.end(end_time=end_time) expected_output = [ { "traceId": format_trace_id(trace_id), "id": format_span_id(span_id), "name": TEST_SERVICE_NAME, "timestamp": JsonV1Encoder._nsec_to_usec_round(start_time), "duration": JsonV1Encoder._nsec_to_usec_round(duration), "debug": True, "parentId": format_span_id(parent_id), } ] self.assertEqual( json.dumps(expected_output), JsonV1Encoder().serialize([otel_span], NodeEndpoint()), ) def _test_encode_max_tag_length(self, max_tag_value_length: int): otel_span, expected_tag_output = self.get_data_for_max_tag_length_test( max_tag_value_length ) service_name = otel_span.name binary_annotations = [] for tag_key, tag_expected_value in expected_tag_output.items(): binary_annotations.append( { "key": tag_key, "value": tag_expected_value, "endpoint": {"serviceName": service_name}, } ) expected_output = [ { "traceId": JsonV1Encoder._encode_trace_id( otel_span.context.trace_id ), "id": JsonV1Encoder._encode_span_id(otel_span.context.span_id), "name": service_name, "timestamp": JsonV1Encoder._nsec_to_usec_round( otel_span.start_time ), "duration": JsonV1Encoder._nsec_to_usec_round( otel_span.end_time - otel_span.start_time ), "binaryAnnotations": binary_annotations, "debug": True, } ] self.assert_equal_encoded_spans( json.dumps(expected_output), JsonV1Encoder(max_tag_value_length).serialize( [otel_span], NodeEndpoint() ), ) def test_dropped_span_attributes(self): otel_span = get_span_with_dropped_attributes_events_links() annotations = JsonV1Encoder()._encode_span(otel_span, "test")[ "binaryAnnotations" ] annotations = { annotation["key"]: annotation["value"] for annotation in annotations } self.assertEqual("1", annotations["otel.dropped_links_count"]) self.assertEqual("2", annotations["otel.dropped_attributes_count"]) self.assertEqual("3", annotations["otel.dropped_events_count"]) test_v2_json.py000066400000000000000000000207541511654350100344070ustar00rootroot00000000000000python-opentelemetry-1.39.1/exporter/opentelemetry-exporter-zipkin-json/tests/encoder# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json from opentelemetry import trace as trace_api from opentelemetry.exporter.zipkin.encoder import ( _SCOPE_NAME_KEY, _SCOPE_VERSION_KEY, NAME_KEY, VERSION_KEY, ) from opentelemetry.exporter.zipkin.json.v2 import JsonV2Encoder from opentelemetry.exporter.zipkin.node_endpoint import NodeEndpoint from opentelemetry.sdk import trace from opentelemetry.test.spantestutil import ( get_span_with_dropped_attributes_events_links, ) from opentelemetry.trace import SpanKind, TraceFlags from .common_tests import ( # pylint: disable=import-error TEST_SERVICE_NAME, CommonEncoderTestCases, ) # pylint: disable=protected-access class TestV2JsonEncoder(CommonEncoderTestCases.CommonJsonEncoderTest): @staticmethod def get_encoder(*args, **kwargs) -> JsonV2Encoder: return JsonV2Encoder(*args, **kwargs) def test_encode(self): local_endpoint = {"serviceName": TEST_SERVICE_NAME} span_kind = JsonV2Encoder.SPAN_KIND_MAP[SpanKind.INTERNAL] otel_spans = self.get_exhaustive_otel_span_list() trace_id = JsonV2Encoder._encode_trace_id( otel_spans[0].context.trace_id ) expected_output = [ { "traceId": trace_id, "id": JsonV2Encoder._encode_span_id( otel_spans[0].context.span_id ), "name": otel_spans[0].name, "timestamp": otel_spans[0].start_time // 10**3, "duration": (otel_spans[0].end_time // 10**3) - (otel_spans[0].start_time // 10**3), "localEndpoint": local_endpoint, "kind": span_kind, "tags": { "key_bool": "false", "key_string": "hello_world", "key_float": "111.22", "otel.status_code": "OK", }, "annotations": [ { "timestamp": otel_spans[0].events[0].timestamp // 10**3, "value": json.dumps( { "event0": { "annotation_bool": True, "annotation_string": "annotation_test", "key_float": 0.3, } }, sort_keys=True, ), } ], "debug": True, "parentId": JsonV2Encoder._encode_span_id( otel_spans[0].parent.span_id ), }, { "traceId": trace_id, "id": JsonV2Encoder._encode_span_id( otel_spans[1].context.span_id ), "name": otel_spans[1].name, "timestamp": otel_spans[1].start_time // 10**3, "duration": (otel_spans[1].end_time // 10**3) - (otel_spans[1].start_time // 10**3), "localEndpoint": local_endpoint, "kind": span_kind, "tags": { "key_resource": "some_resource", "otel.status_code": "ERROR", "error": "Example description", }, }, { "traceId": trace_id, "id": JsonV2Encoder._encode_span_id( otel_spans[2].context.span_id ), "name": otel_spans[2].name, "timestamp": otel_spans[2].start_time // 10**3, "duration": (otel_spans[2].end_time // 10**3) - (otel_spans[2].start_time // 10**3), "localEndpoint": local_endpoint, "kind": span_kind, "tags": { "key_string": "hello_world", "key_resource": "some_resource", }, }, { "traceId": trace_id, "id": JsonV2Encoder._encode_span_id( otel_spans[3].context.span_id ), "name": otel_spans[3].name, "timestamp": otel_spans[3].start_time // 10**3, "duration": (otel_spans[3].end_time // 10**3) - (otel_spans[3].start_time // 10**3), "localEndpoint": local_endpoint, "kind": span_kind, "tags": { NAME_KEY: "name", VERSION_KEY: "version", _SCOPE_NAME_KEY: "name", _SCOPE_VERSION_KEY: "version", }, }, ] self.assert_equal_encoded_spans( json.dumps(expected_output), JsonV2Encoder().serialize(otel_spans, NodeEndpoint()), ) def test_encode_id_zero_padding(self): trace_id = 0x0E0C63257DE34C926F9EFCD03927272E span_id = 0x04BF92DEEFC58C92 parent_id = 0x0AAAAAAAAAAAAAAA start_time = 683647322 * 10**9 # in ns duration = 50 * 10**6 end_time = start_time + duration otel_span = trace._Span( name=TEST_SERVICE_NAME, context=trace_api.SpanContext( trace_id, span_id, is_remote=False, trace_flags=TraceFlags(TraceFlags.SAMPLED), ), parent=trace_api.SpanContext(trace_id, parent_id, is_remote=False), resource=trace.Resource({}), ) otel_span.start(start_time=start_time) otel_span.end(end_time=end_time) expected_output = [ { "traceId": format(trace_id, "032x"), "id": format(span_id, "016x"), "name": TEST_SERVICE_NAME, "timestamp": JsonV2Encoder._nsec_to_usec_round(start_time), "duration": JsonV2Encoder._nsec_to_usec_round(duration), "localEndpoint": {"serviceName": TEST_SERVICE_NAME}, "kind": JsonV2Encoder.SPAN_KIND_MAP[SpanKind.INTERNAL], "debug": True, "parentId": format(parent_id, "016x"), } ] self.assert_equal_encoded_spans( json.dumps(expected_output), JsonV2Encoder().serialize([otel_span], NodeEndpoint()), ) def _test_encode_max_tag_length(self, max_tag_value_length: int): otel_span, expected_tag_output = self.get_data_for_max_tag_length_test( max_tag_value_length ) service_name = otel_span.name expected_output = [ { "traceId": JsonV2Encoder._encode_trace_id( otel_span.context.trace_id ), "id": JsonV2Encoder._encode_span_id(otel_span.context.span_id), "name": service_name, "timestamp": JsonV2Encoder._nsec_to_usec_round( otel_span.start_time ), "duration": JsonV2Encoder._nsec_to_usec_round( otel_span.end_time - otel_span.start_time ), "localEndpoint": {"serviceName": service_name}, "kind": JsonV2Encoder.SPAN_KIND_MAP[SpanKind.INTERNAL], "tags": expected_tag_output, "debug": True, } ] self.assert_equal_encoded_spans( json.dumps(expected_output), JsonV2Encoder(max_tag_value_length).serialize( [otel_span], NodeEndpoint() ), ) def test_dropped_span_attributes(self): otel_span = get_span_with_dropped_attributes_events_links() tags = JsonV2Encoder()._encode_span(otel_span, "test")["tags"] self.assertEqual("1", tags["otel.dropped_links_count"]) self.assertEqual("2", tags["otel.dropped_attributes_count"]) self.assertEqual("3", tags["otel.dropped_events_count"]) test_zipkin_exporter.py000066400000000000000000000213351511654350100346400ustar00rootroot00000000000000python-opentelemetry-1.39.1/exporter/opentelemetry-exporter-zipkin-json/tests# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import ipaddress import os import unittest from unittest.mock import patch import requests from opentelemetry import trace from opentelemetry.exporter.zipkin.encoder import Protocol from opentelemetry.exporter.zipkin.json import DEFAULT_ENDPOINT, ZipkinExporter from opentelemetry.exporter.zipkin.json.v2 import JsonV2Encoder from opentelemetry.exporter.zipkin.node_endpoint import NodeEndpoint from opentelemetry.sdk.environment_variables import ( OTEL_EXPORTER_ZIPKIN_ENDPOINT, OTEL_EXPORTER_ZIPKIN_TIMEOUT, ) from opentelemetry.sdk.resources import SERVICE_NAME, Resource from opentelemetry.sdk.trace import TracerProvider, _Span from opentelemetry.sdk.trace.export import SpanExportResult TEST_SERVICE_NAME = "test_service" class MockResponse: def __init__(self, status_code): self.status_code = status_code self.text = status_code class TestZipkinExporter(unittest.TestCase): @classmethod def setUpClass(cls): trace.set_tracer_provider( TracerProvider( resource=Resource({SERVICE_NAME: TEST_SERVICE_NAME}) ) ) def tearDown(self): os.environ.pop(OTEL_EXPORTER_ZIPKIN_ENDPOINT, None) os.environ.pop(OTEL_EXPORTER_ZIPKIN_TIMEOUT, None) def test_constructor_default(self): exporter = ZipkinExporter() self.assertIsInstance(exporter.encoder, JsonV2Encoder) self.assertIsInstance(exporter.session, requests.Session) self.assertEqual(exporter.endpoint, DEFAULT_ENDPOINT) self.assertEqual(exporter.local_node.service_name, TEST_SERVICE_NAME) self.assertEqual(exporter.local_node.ipv4, None) self.assertEqual(exporter.local_node.ipv6, None) self.assertEqual(exporter.local_node.port, None) def test_constructor_env_vars(self): os_endpoint = "https://foo:9911/path" os.environ[OTEL_EXPORTER_ZIPKIN_ENDPOINT] = os_endpoint os.environ[OTEL_EXPORTER_ZIPKIN_TIMEOUT] = "15" exporter = ZipkinExporter() self.assertEqual(exporter.endpoint, os_endpoint) self.assertEqual(exporter.local_node.service_name, TEST_SERVICE_NAME) self.assertEqual(exporter.local_node.ipv4, None) self.assertEqual(exporter.local_node.ipv6, None) self.assertEqual(exporter.local_node.port, None) self.assertEqual(exporter.timeout, 15) def test_constructor_protocol_endpoint(self): """Test the constructor for the common usage of providing the protocol and endpoint arguments.""" endpoint = "https://opentelemetry.io:15875/myapi/traces?format=zipkin" exporter = ZipkinExporter(endpoint=endpoint) self.assertIsInstance(exporter.encoder, JsonV2Encoder) self.assertIsInstance(exporter.session, requests.Session) self.assertEqual(exporter.endpoint, endpoint) self.assertEqual(exporter.local_node.service_name, TEST_SERVICE_NAME) self.assertEqual(exporter.local_node.ipv4, None) self.assertEqual(exporter.local_node.ipv6, None) self.assertEqual(exporter.local_node.port, None) def test_constructor_all_params_and_env_vars(self): """Test the scenario where all params are provided and all OS env vars are set. Explicit params should take precedence. """ os_endpoint = "https://os.env.param:9911/path" os.environ[OTEL_EXPORTER_ZIPKIN_ENDPOINT] = os_endpoint os.environ[OTEL_EXPORTER_ZIPKIN_TIMEOUT] = "15" constructor_param_version = Protocol.V2 constructor_param_endpoint = "https://constructor.param:9911/path" local_node_ipv4 = "192.168.0.1" local_node_ipv6 = "2001:db8::1000" local_node_port = 30301 max_tag_value_length = 56 timeout_param = 20 session_param = requests.Session() exporter = ZipkinExporter( constructor_param_version, constructor_param_endpoint, local_node_ipv4, local_node_ipv6, local_node_port, max_tag_value_length, timeout_param, session_param, ) self.assertIsInstance(exporter.encoder, JsonV2Encoder) self.assertIsInstance(exporter.session, requests.Session) self.assertEqual(exporter.endpoint, constructor_param_endpoint) self.assertEqual(exporter.local_node.service_name, TEST_SERVICE_NAME) self.assertEqual( exporter.local_node.ipv4, ipaddress.IPv4Address(local_node_ipv4) ) self.assertEqual( exporter.local_node.ipv6, ipaddress.IPv6Address(local_node_ipv6) ) self.assertEqual(exporter.local_node.port, local_node_port) # Assert timeout passed in constructor is prioritized over env # when both are set. self.assertEqual(exporter.timeout, 20) @patch("requests.Session.post") def test_export_success(self, mock_post): mock_post.return_value = MockResponse(200) spans = [] exporter = ZipkinExporter() status = exporter.export(spans) self.assertEqual(SpanExportResult.SUCCESS, status) @patch("requests.Session.post") def test_export_invalid_response(self, mock_post): mock_post.return_value = MockResponse(404) spans = [] exporter = ZipkinExporter() status = exporter.export(spans) self.assertEqual(SpanExportResult.FAILURE, status) @patch("requests.Session.post") def test_export_span_service_name(self, mock_post): mock_post.return_value = MockResponse(200) resource = Resource.create({SERVICE_NAME: "test"}) context = trace.SpanContext( trace_id=0x000000000000000000000000DEADBEEF, span_id=0x00000000DEADBEF0, is_remote=False, ) span = _Span("test_span", context=context, resource=resource) span.start() span.end() exporter = ZipkinExporter() exporter.export([span]) self.assertEqual(exporter.local_node.service_name, "test") @patch("requests.Session.post") def test_export_shutdown(self, mock_post): mock_post.return_value = MockResponse(200) spans = [] exporter = ZipkinExporter() status = exporter.export(spans) self.assertEqual(SpanExportResult.SUCCESS, status) exporter.shutdown() # Any call to .export() post shutdown should return failure status = exporter.export(spans) self.assertEqual(SpanExportResult.FAILURE, status) @patch("requests.Session.post") def test_export_timeout(self, mock_post): mock_post.return_value = MockResponse(200) spans = [] exporter = ZipkinExporter(timeout=2) status = exporter.export(spans) self.assertEqual(SpanExportResult.SUCCESS, status) mock_post.assert_called_with( url="http://localhost:9411/api/v2/spans", data="[]", timeout=2 ) class TestZipkinNodeEndpoint(unittest.TestCase): def test_constructor_default(self): node_endpoint = NodeEndpoint() self.assertEqual(node_endpoint.ipv4, None) self.assertEqual(node_endpoint.ipv6, None) self.assertEqual(node_endpoint.port, None) self.assertEqual(node_endpoint.service_name, TEST_SERVICE_NAME) def test_constructor_explicits(self): ipv4 = "192.168.0.1" ipv6 = "2001:db8::c001" port = 414120 node_endpoint = NodeEndpoint(ipv4, ipv6, port) self.assertEqual(node_endpoint.ipv4, ipaddress.IPv4Address(ipv4)) self.assertEqual(node_endpoint.ipv6, ipaddress.IPv6Address(ipv6)) self.assertEqual(node_endpoint.port, port) self.assertEqual(node_endpoint.service_name, TEST_SERVICE_NAME) def test_ipv4_invalid_raises_error(self): with self.assertRaises(ValueError): NodeEndpoint(ipv4="invalid-ipv4-address") def test_ipv4_passed_ipv6_raises_error(self): with self.assertRaises(ValueError): NodeEndpoint(ipv4="2001:db8::c001") def test_ipv6_invalid_raises_error(self): with self.assertRaises(ValueError): NodeEndpoint(ipv6="invalid-ipv6-address") def test_ipv6_passed_ipv4_raises_error(self): with self.assertRaises(ValueError): NodeEndpoint(ipv6="192.168.0.1") python-opentelemetry-1.39.1/exporter/opentelemetry-exporter-zipkin-proto-http/000077500000000000000000000000001511654350100300535ustar00rootroot00000000000000python-opentelemetry-1.39.1/exporter/opentelemetry-exporter-zipkin-proto-http/CHANGELOG.md000066400000000000000000000000001511654350100316520ustar00rootroot00000000000000python-opentelemetry-1.39.1/exporter/opentelemetry-exporter-zipkin-proto-http/LICENSE000066400000000000000000000261351511654350100310670ustar00rootroot00000000000000 Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. Copyright [yyyy] [name of copyright owner] Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. python-opentelemetry-1.39.1/exporter/opentelemetry-exporter-zipkin-proto-http/README.rst000066400000000000000000000012601511654350100315410ustar00rootroot00000000000000OpenTelemetry Zipkin Protobuf Exporter ====================================== |pypi| .. |pypi| image:: https://badge.fury.io/py/opentelemetry-exporter-zipkin-proto-http.svg :target: https://pypi.org/project/opentelemetry-exporter-zipkin-proto-http/ This library allows export of tracing data to `Zipkin `_ using Protobuf for serialization. Installation ------------ :: pip install opentelemetry-exporter-zipkin-proto-http References ---------- * `OpenTelemetry Zipkin Exporter `_ * `Zipkin `_ * `OpenTelemetry Project `_ python-opentelemetry-1.39.1/exporter/opentelemetry-exporter-zipkin-proto-http/pyproject.toml000066400000000000000000000031561511654350100327740ustar00rootroot00000000000000[build-system] requires = ["hatchling"] build-backend = "hatchling.build" [project] name = "opentelemetry-exporter-zipkin-proto-http" dynamic = ["version"] description = "Zipkin Span Protobuf Exporter for OpenTelemetry" readme = "README.rst" license = "Apache-2.0" requires-python = ">=3.9" authors = [ { name = "OpenTelemetry Authors", email = "cncf-opentelemetry-contributors@lists.cncf.io" }, ] classifiers = [ "Development Status :: 5 - Production/Stable", "Framework :: OpenTelemetry", "Framework :: OpenTelemetry :: Exporters", "Intended Audience :: Developers", "Programming Language :: Python", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.9", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", "Programming Language :: Python :: 3.12", "Programming Language :: Python :: 3.13", "Typing :: Typed", ] dependencies = [ "opentelemetry-api ~= 1.3", "opentelemetry-exporter-zipkin-json == 1.39.1", "opentelemetry-sdk ~= 1.11", "protobuf ~= 3.12", "requests ~= 2.7", ] [project.entry-points.opentelemetry_traces_exporter] zipkin_proto = "opentelemetry.exporter.zipkin.proto.http:ZipkinExporter" [project.urls] Homepage = "https://github.com/open-telemetry/opentelemetry-python/tree/main/exporter/opentelemetry-exporter-zipkin-proto-http" Repository = "https://github.com/open-telemetry/opentelemetry-python" [tool.hatch.version] path = "src/opentelemetry/exporter/zipkin/proto/http/version/__init__.py" [tool.hatch.build.targets.sdist] include = [ "/src", "/tests", ] [tool.hatch.build.targets.wheel] packages = ["src/opentelemetry"] python-opentelemetry-1.39.1/exporter/opentelemetry-exporter-zipkin-proto-http/src/000077500000000000000000000000001511654350100306425ustar00rootroot00000000000000python-opentelemetry-1.39.1/exporter/opentelemetry-exporter-zipkin-proto-http/src/opentelemetry/000077500000000000000000000000001511654350100335365ustar00rootroot00000000000000exporter/000077500000000000000000000000001511654350100353275ustar00rootroot00000000000000python-opentelemetry-1.39.1/exporter/opentelemetry-exporter-zipkin-proto-http/src/opentelemetryzipkin/000077500000000000000000000000001511654350100366335ustar00rootroot00000000000000python-opentelemetry-1.39.1/exporter/opentelemetry-exporter-zipkin-proto-http/src/opentelemetry/exporterproto/000077500000000000000000000000001511654350100377765ustar00rootroot00000000000000python-opentelemetry-1.39.1/exporter/opentelemetry-exporter-zipkin-proto-http/src/opentelemetry/exporter/zipkinhttp/000077500000000000000000000000001511654350100407555ustar00rootroot00000000000000python-opentelemetry-1.39.1/exporter/opentelemetry-exporter-zipkin-proto-http/src/opentelemetry/exporter/zipkin/proto__init__.py000066400000000000000000000147761511654350100431050ustar00rootroot00000000000000python-opentelemetry-1.39.1/exporter/opentelemetry-exporter-zipkin-proto-http/src/opentelemetry/exporter/zipkin/proto/http# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ OpenTelemetry Zipkin Protobuf Exporter -------------------------------------- This library allows to export tracing data to `Zipkin `_. Usage ----- The **OpenTelemetry Zipkin Exporter** allows exporting of `OpenTelemetry`_ traces to `Zipkin`_. This exporter sends traces to the configured Zipkin collector endpoint using HTTP and supports v2 protobuf. .. _Zipkin: https://zipkin.io/ .. _OpenTelemetry: https://github.com/open-telemetry/opentelemetry-python/ .. _Specification: https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/sdk-environment-variables.md#zipkin-exporter .. code:: python import requests from opentelemetry import trace from opentelemetry.exporter.zipkin.proto.http import ZipkinExporter from opentelemetry.sdk.trace import TracerProvider from opentelemetry.sdk.trace.export import BatchSpanProcessor trace.set_tracer_provider(TracerProvider()) tracer = trace.get_tracer(__name__) # create a ZipkinExporter zipkin_exporter = ZipkinExporter( # optional: # endpoint="http://localhost:9411/api/v2/spans", # local_node_ipv4="192.168.0.1", # local_node_ipv6="2001:db8::c001", # local_node_port=31313, # max_tag_value_length=256, # timeout=5 (in seconds), # session=requests.Session() ) # Create a BatchSpanProcessor and add the exporter to it span_processor = BatchSpanProcessor(zipkin_exporter) # add to the tracer trace.get_tracer_provider().add_span_processor(span_processor) with tracer.start_as_current_span("foo"): print("Hello world!") The exporter supports the following environment variable for configuration: - :envvar:`OTEL_EXPORTER_ZIPKIN_ENDPOINT` - :envvar:`OTEL_EXPORTER_ZIPKIN_TIMEOUT` API --- """ import logging from os import environ from typing import Optional, Sequence import requests from opentelemetry.exporter.zipkin.node_endpoint import IpInput, NodeEndpoint from opentelemetry.exporter.zipkin.proto.http.v2 import ProtobufEncoder from opentelemetry.sdk.environment_variables import ( OTEL_EXPORTER_ZIPKIN_ENDPOINT, OTEL_EXPORTER_ZIPKIN_TIMEOUT, ) from opentelemetry.sdk.resources import SERVICE_NAME from opentelemetry.sdk.trace.export import SpanExporter, SpanExportResult from opentelemetry.trace import Span DEFAULT_ENDPOINT = "http://localhost:9411/api/v2/spans" REQUESTS_SUCCESS_STATUS_CODES = (200, 202) logger = logging.getLogger(__name__) class ZipkinExporter(SpanExporter): def __init__( self, endpoint: Optional[str] = None, local_node_ipv4: IpInput = None, local_node_ipv6: IpInput = None, local_node_port: Optional[int] = None, max_tag_value_length: Optional[int] = None, timeout: Optional[int] = None, session: Optional[requests.Session] = None, ): """Zipkin exporter. Args: version: The protocol version to be used. endpoint: The endpoint of the Zipkin collector. local_node_ipv4: Primary IPv4 address associated with this connection. local_node_ipv6: Primary IPv6 address associated with this connection. local_node_port: Depending on context, this could be a listen port or the client-side of a socket. max_tag_value_length: Max length string attribute values can have. timeout: Maximum time the Zipkin exporter will wait for each batch export. The default value is 10s. session: Connection session to the Zipkin collector endpoint. The tuple (local_node_ipv4, local_node_ipv6, local_node_port) is used to represent the network context of a node in the service graph. """ self.local_node = NodeEndpoint( local_node_ipv4, local_node_ipv6, local_node_port ) if endpoint is None: endpoint = ( environ.get(OTEL_EXPORTER_ZIPKIN_ENDPOINT) or DEFAULT_ENDPOINT ) self.endpoint = endpoint self.encoder = ProtobufEncoder(max_tag_value_length) self.session = session or requests.Session() self.session.headers.update( {"Content-Type": self.encoder.content_type()} ) self._closed = False self.timeout = timeout or int( environ.get(OTEL_EXPORTER_ZIPKIN_TIMEOUT, 10) ) def export(self, spans: Sequence[Span]) -> SpanExportResult: # After the call to Shutdown subsequent calls to Export are # not allowed and should return a Failure result if self._closed: logger.warning("Exporter already shutdown, ignoring batch") return SpanExportResult.FAILURE # Populate service_name from first span # We restrict any SpanProcessor to be only associated with a single # TracerProvider, so it is safe to assume that all Spans in a single # batch all originate from one TracerProvider (and in turn have all # the same service.name) if spans: service_name = spans[0].resource.attributes.get(SERVICE_NAME) if service_name: self.local_node.service_name = service_name result = self.session.post( url=self.endpoint, data=self.encoder.serialize(spans, self.local_node), timeout=self.timeout, ) if result.status_code not in REQUESTS_SUCCESS_STATUS_CODES: logger.error( "Traces cannot be uploaded; status code: %s, message %s", result.status_code, result.text, ) return SpanExportResult.FAILURE return SpanExportResult.SUCCESS def shutdown(self) -> None: if self._closed: logger.warning("Exporter already shutdown, ignoring call") return self.session.close() self._closed = True def force_flush(self, timeout_millis: int = 30000) -> bool: return True py.typed000066400000000000000000000000001511654350100424420ustar00rootroot00000000000000python-opentelemetry-1.39.1/exporter/opentelemetry-exporter-zipkin-proto-http/src/opentelemetry/exporter/zipkin/proto/httpv2/000077500000000000000000000000001511654350100413045ustar00rootroot00000000000000python-opentelemetry-1.39.1/exporter/opentelemetry-exporter-zipkin-proto-http/src/opentelemetry/exporter/zipkin/proto/http__init__.py000066400000000000000000000112141511654350100434140ustar00rootroot00000000000000python-opentelemetry-1.39.1/exporter/opentelemetry-exporter-zipkin-proto-http/src/opentelemetry/exporter/zipkin/proto/http/v2# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Zipkin Export Encoder for Protobuf API spec: https://github.com/openzipkin/zipkin-api/blob/master/zipkin.proto """ from typing import List, Optional, Sequence from opentelemetry.exporter.zipkin.encoder import Encoder from opentelemetry.exporter.zipkin.node_endpoint import NodeEndpoint from opentelemetry.exporter.zipkin.proto.http.v2.gen import zipkin_pb2 from opentelemetry.sdk.trace import Event from opentelemetry.trace import Span, SpanKind class ProtobufEncoder(Encoder): """Zipkin Export Encoder for Protobuf API spec: https://github.com/openzipkin/zipkin-api/blob/master/zipkin.proto """ SPAN_KIND_MAP = { SpanKind.INTERNAL: zipkin_pb2.Span.Kind.SPAN_KIND_UNSPECIFIED, SpanKind.SERVER: zipkin_pb2.Span.Kind.SERVER, SpanKind.CLIENT: zipkin_pb2.Span.Kind.CLIENT, SpanKind.PRODUCER: zipkin_pb2.Span.Kind.PRODUCER, SpanKind.CONSUMER: zipkin_pb2.Span.Kind.CONSUMER, } @staticmethod def content_type(): return "application/x-protobuf" def serialize( self, spans: Sequence[Span], local_endpoint: NodeEndpoint ) -> bytes: encoded_local_endpoint = self._encode_local_endpoint(local_endpoint) # pylint: disable=no-member encoded_spans = zipkin_pb2.ListOfSpans() for span in spans: encoded_spans.spans.append( self._encode_span(span, encoded_local_endpoint) ) return encoded_spans.SerializeToString() def _encode_span( self, span: Span, encoded_local_endpoint: zipkin_pb2.Endpoint ) -> zipkin_pb2.Span: context = span.get_span_context() # pylint: disable=no-member encoded_span = zipkin_pb2.Span( trace_id=self._encode_trace_id(context.trace_id), id=self._encode_span_id(context.span_id), name=span.name, timestamp=self._nsec_to_usec_round(span.start_time), duration=self._nsec_to_usec_round(span.end_time - span.start_time), local_endpoint=encoded_local_endpoint, kind=self.SPAN_KIND_MAP[span.kind], ) tags = self._extract_tags_from_span(span) if tags: encoded_span.tags.update(tags) annotations = self._encode_annotations(span.events) if annotations: encoded_span.annotations.extend(annotations) debug = self._encode_debug(context) if debug: encoded_span.debug = debug parent_id = self._get_parent_id(span.parent) if parent_id is not None: encoded_span.parent_id = self._encode_span_id(parent_id) return encoded_span def _encode_annotations( self, span_events: Optional[List[Event]] ) -> Optional[List]: annotations = self._extract_annotations_from_events(span_events) if annotations is None: encoded_annotations = None else: encoded_annotations = [] for annotation in annotations: encoded_annotations.append( zipkin_pb2.Annotation( timestamp=annotation["timestamp"], value=annotation["value"], ) ) return encoded_annotations @staticmethod def _encode_local_endpoint( local_endpoint: NodeEndpoint, ) -> zipkin_pb2.Endpoint: encoded_local_endpoint = zipkin_pb2.Endpoint( service_name=local_endpoint.service_name, ) if local_endpoint.ipv4 is not None: encoded_local_endpoint.ipv4 = local_endpoint.ipv4.packed if local_endpoint.ipv6 is not None: encoded_local_endpoint.ipv6 = local_endpoint.ipv6.packed if local_endpoint.port is not None: encoded_local_endpoint.port = local_endpoint.port return encoded_local_endpoint @staticmethod def _encode_span_id(span_id: int) -> bytes: return span_id.to_bytes(length=8, byteorder="big", signed=False) @staticmethod def _encode_trace_id(trace_id: int) -> bytes: return trace_id.to_bytes(length=16, byteorder="big", signed=False) gen/000077500000000000000000000000001511654350100420555ustar00rootroot00000000000000python-opentelemetry-1.39.1/exporter/opentelemetry-exporter-zipkin-proto-http/src/opentelemetry/exporter/zipkin/proto/http/v2__init__.py000066400000000000000000000000001511654350100441540ustar00rootroot00000000000000python-opentelemetry-1.39.1/exporter/opentelemetry-exporter-zipkin-proto-http/src/opentelemetry/exporter/zipkin/proto/http/v2/genzipkin_pb2.py000066400000000000000000000435531511654350100445100ustar00rootroot00000000000000python-opentelemetry-1.39.1/exporter/opentelemetry-exporter-zipkin-proto-http/src/opentelemetry/exporter/zipkin/proto/http/v2/gen# -*- coding: utf-8 -*- # Generated by the protocol buffer compiler. DO NOT EDIT! # source: zipkin.proto """Generated protocol buffer code.""" from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message from google.protobuf import reflection as _reflection from google.protobuf import symbol_database as _symbol_database # @@protoc_insertion_point(imports) _sym_db = _symbol_database.Default() DESCRIPTOR = _descriptor.FileDescriptor( name='zipkin.proto', package='zipkin.proto3', syntax='proto3', serialized_options=b'\n\016zipkin2.proto3P\001', create_key=_descriptor._internal_create_key, serialized_pb=b'\n\x0czipkin.proto\x12\rzipkin.proto3\"\xf5\x03\n\x04Span\x12\x10\n\x08trace_id\x18\x01 \x01(\x0c\x12\x11\n\tparent_id\x18\x02 \x01(\x0c\x12\n\n\x02id\x18\x03 \x01(\x0c\x12&\n\x04kind\x18\x04 \x01(\x0e\x32\x18.zipkin.proto3.Span.Kind\x12\x0c\n\x04name\x18\x05 \x01(\t\x12\x11\n\ttimestamp\x18\x06 \x01(\x06\x12\x10\n\x08\x64uration\x18\x07 \x01(\x04\x12/\n\x0elocal_endpoint\x18\x08 \x01(\x0b\x32\x17.zipkin.proto3.Endpoint\x12\x30\n\x0fremote_endpoint\x18\t \x01(\x0b\x32\x17.zipkin.proto3.Endpoint\x12.\n\x0b\x61nnotations\x18\n \x03(\x0b\x32\x19.zipkin.proto3.Annotation\x12+\n\x04tags\x18\x0b \x03(\x0b\x32\x1d.zipkin.proto3.Span.TagsEntry\x12\r\n\x05\x64\x65\x62ug\x18\x0c \x01(\x08\x12\x0e\n\x06shared\x18\r \x01(\x08\x1a+\n\tTagsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"U\n\x04Kind\x12\x19\n\x15SPAN_KIND_UNSPECIFIED\x10\x00\x12\n\n\x06\x43LIENT\x10\x01\x12\n\n\x06SERVER\x10\x02\x12\x0c\n\x08PRODUCER\x10\x03\x12\x0c\n\x08\x43ONSUMER\x10\x04\"J\n\x08\x45ndpoint\x12\x14\n\x0cservice_name\x18\x01 \x01(\t\x12\x0c\n\x04ipv4\x18\x02 \x01(\x0c\x12\x0c\n\x04ipv6\x18\x03 \x01(\x0c\x12\x0c\n\x04port\x18\x04 \x01(\x05\".\n\nAnnotation\x12\x11\n\ttimestamp\x18\x01 \x01(\x06\x12\r\n\x05value\x18\x02 \x01(\t\"1\n\x0bListOfSpans\x12\"\n\x05spans\x18\x01 \x03(\x0b\x32\x13.zipkin.proto3.Span\"\x10\n\x0eReportResponse2T\n\x0bSpanService\x12\x45\n\x06Report\x12\x1a.zipkin.proto3.ListOfSpans\x1a\x1d.zipkin.proto3.ReportResponse\"\x00\x42\x12\n\x0ezipkin2.proto3P\x01\x62\x06proto3' ) _SPAN_KIND = _descriptor.EnumDescriptor( name='Kind', full_name='zipkin.proto3.Span.Kind', filename=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key, values=[ _descriptor.EnumValueDescriptor( name='SPAN_KIND_UNSPECIFIED', index=0, number=0, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), _descriptor.EnumValueDescriptor( name='CLIENT', index=1, number=1, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), _descriptor.EnumValueDescriptor( name='SERVER', index=2, number=2, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), _descriptor.EnumValueDescriptor( name='PRODUCER', index=3, number=3, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), _descriptor.EnumValueDescriptor( name='CONSUMER', index=4, number=4, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), ], containing_type=None, serialized_options=None, serialized_start=448, serialized_end=533, ) _sym_db.RegisterEnumDescriptor(_SPAN_KIND) _SPAN_TAGSENTRY = _descriptor.Descriptor( name='TagsEntry', full_name='zipkin.proto3.Span.TagsEntry', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='key', full_name='zipkin.proto3.Span.TagsEntry.key', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=b"".decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='value', full_name='zipkin.proto3.Span.TagsEntry.value', index=1, number=2, type=9, cpp_type=9, label=1, has_default_value=False, default_value=b"".decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=b'8\001', is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=403, serialized_end=446, ) _SPAN = _descriptor.Descriptor( name='Span', full_name='zipkin.proto3.Span', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='trace_id', full_name='zipkin.proto3.Span.trace_id', index=0, number=1, type=12, cpp_type=9, label=1, has_default_value=False, default_value=b"", message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='parent_id', full_name='zipkin.proto3.Span.parent_id', index=1, number=2, type=12, cpp_type=9, label=1, has_default_value=False, default_value=b"", message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='id', full_name='zipkin.proto3.Span.id', index=2, number=3, type=12, cpp_type=9, label=1, has_default_value=False, default_value=b"", message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='kind', full_name='zipkin.proto3.Span.kind', index=3, number=4, type=14, cpp_type=8, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='name', full_name='zipkin.proto3.Span.name', index=4, number=5, type=9, cpp_type=9, label=1, has_default_value=False, default_value=b"".decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='timestamp', full_name='zipkin.proto3.Span.timestamp', index=5, number=6, type=6, cpp_type=4, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='duration', full_name='zipkin.proto3.Span.duration', index=6, number=7, type=4, cpp_type=4, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='local_endpoint', full_name='zipkin.proto3.Span.local_endpoint', index=7, number=8, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='remote_endpoint', full_name='zipkin.proto3.Span.remote_endpoint', index=8, number=9, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='annotations', full_name='zipkin.proto3.Span.annotations', index=9, number=10, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='tags', full_name='zipkin.proto3.Span.tags', index=10, number=11, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='debug', full_name='zipkin.proto3.Span.debug', index=11, number=12, type=8, cpp_type=7, label=1, has_default_value=False, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='shared', full_name='zipkin.proto3.Span.shared', index=12, number=13, type=8, cpp_type=7, label=1, has_default_value=False, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[_SPAN_TAGSENTRY, ], enum_types=[ _SPAN_KIND, ], serialized_options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=32, serialized_end=533, ) _ENDPOINT = _descriptor.Descriptor( name='Endpoint', full_name='zipkin.proto3.Endpoint', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='service_name', full_name='zipkin.proto3.Endpoint.service_name', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=b"".decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='ipv4', full_name='zipkin.proto3.Endpoint.ipv4', index=1, number=2, type=12, cpp_type=9, label=1, has_default_value=False, default_value=b"", message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='ipv6', full_name='zipkin.proto3.Endpoint.ipv6', index=2, number=3, type=12, cpp_type=9, label=1, has_default_value=False, default_value=b"", message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='port', full_name='zipkin.proto3.Endpoint.port', index=3, number=4, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=535, serialized_end=609, ) _ANNOTATION = _descriptor.Descriptor( name='Annotation', full_name='zipkin.proto3.Annotation', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='timestamp', full_name='zipkin.proto3.Annotation.timestamp', index=0, number=1, type=6, cpp_type=4, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='value', full_name='zipkin.proto3.Annotation.value', index=1, number=2, type=9, cpp_type=9, label=1, has_default_value=False, default_value=b"".decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=611, serialized_end=657, ) _LISTOFSPANS = _descriptor.Descriptor( name='ListOfSpans', full_name='zipkin.proto3.ListOfSpans', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='spans', full_name='zipkin.proto3.ListOfSpans.spans', index=0, number=1, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=659, serialized_end=708, ) _REPORTRESPONSE = _descriptor.Descriptor( name='ReportResponse', full_name='zipkin.proto3.ReportResponse', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=710, serialized_end=726, ) _SPAN_TAGSENTRY.containing_type = _SPAN _SPAN.fields_by_name['kind'].enum_type = _SPAN_KIND _SPAN.fields_by_name['local_endpoint'].message_type = _ENDPOINT _SPAN.fields_by_name['remote_endpoint'].message_type = _ENDPOINT _SPAN.fields_by_name['annotations'].message_type = _ANNOTATION _SPAN.fields_by_name['tags'].message_type = _SPAN_TAGSENTRY _SPAN_KIND.containing_type = _SPAN _LISTOFSPANS.fields_by_name['spans'].message_type = _SPAN DESCRIPTOR.message_types_by_name['Span'] = _SPAN DESCRIPTOR.message_types_by_name['Endpoint'] = _ENDPOINT DESCRIPTOR.message_types_by_name['Annotation'] = _ANNOTATION DESCRIPTOR.message_types_by_name['ListOfSpans'] = _LISTOFSPANS DESCRIPTOR.message_types_by_name['ReportResponse'] = _REPORTRESPONSE _sym_db.RegisterFileDescriptor(DESCRIPTOR) Span = _reflection.GeneratedProtocolMessageType('Span', (_message.Message,), { 'TagsEntry' : _reflection.GeneratedProtocolMessageType('TagsEntry', (_message.Message,), { 'DESCRIPTOR' : _SPAN_TAGSENTRY, '__module__' : 'zipkin_pb2' # @@protoc_insertion_point(class_scope:zipkin.proto3.Span.TagsEntry) }) , 'DESCRIPTOR' : _SPAN, '__module__' : 'zipkin_pb2' # @@protoc_insertion_point(class_scope:zipkin.proto3.Span) }) _sym_db.RegisterMessage(Span) _sym_db.RegisterMessage(Span.TagsEntry) Endpoint = _reflection.GeneratedProtocolMessageType('Endpoint', (_message.Message,), { 'DESCRIPTOR' : _ENDPOINT, '__module__' : 'zipkin_pb2' # @@protoc_insertion_point(class_scope:zipkin.proto3.Endpoint) }) _sym_db.RegisterMessage(Endpoint) Annotation = _reflection.GeneratedProtocolMessageType('Annotation', (_message.Message,), { 'DESCRIPTOR' : _ANNOTATION, '__module__' : 'zipkin_pb2' # @@protoc_insertion_point(class_scope:zipkin.proto3.Annotation) }) _sym_db.RegisterMessage(Annotation) ListOfSpans = _reflection.GeneratedProtocolMessageType('ListOfSpans', (_message.Message,), { 'DESCRIPTOR' : _LISTOFSPANS, '__module__' : 'zipkin_pb2' # @@protoc_insertion_point(class_scope:zipkin.proto3.ListOfSpans) }) _sym_db.RegisterMessage(ListOfSpans) ReportResponse = _reflection.GeneratedProtocolMessageType('ReportResponse', (_message.Message,), { 'DESCRIPTOR' : _REPORTRESPONSE, '__module__' : 'zipkin_pb2' # @@protoc_insertion_point(class_scope:zipkin.proto3.ReportResponse) }) _sym_db.RegisterMessage(ReportResponse) DESCRIPTOR._options = None _SPAN_TAGSENTRY._options = None _SPANSERVICE = _descriptor.ServiceDescriptor( name='SpanService', full_name='zipkin.proto3.SpanService', file=DESCRIPTOR, index=0, serialized_options=None, create_key=_descriptor._internal_create_key, serialized_start=728, serialized_end=812, methods=[ _descriptor.MethodDescriptor( name='Report', full_name='zipkin.proto3.SpanService.Report', index=0, containing_service=None, input_type=_LISTOFSPANS, output_type=_REPORTRESPONSE, serialized_options=None, create_key=_descriptor._internal_create_key, ), ]) _sym_db.RegisterServiceDescriptor(_SPANSERVICE) DESCRIPTOR.services_by_name['SpanService'] = _SPANSERVICE # @@protoc_insertion_point(module_scope) zipkin_pb2.pyi000066400000000000000000000212001511654350100446420ustar00rootroot00000000000000python-opentelemetry-1.39.1/exporter/opentelemetry-exporter-zipkin-proto-http/src/opentelemetry/exporter/zipkin/proto/http/v2/gen# @generated by generate_proto_mypy_stubs.py. Do not edit! import sys from google.protobuf.descriptor import ( Descriptor as google___protobuf___descriptor___Descriptor, EnumDescriptor as google___protobuf___descriptor___EnumDescriptor, FileDescriptor as google___protobuf___descriptor___FileDescriptor, ) from google.protobuf.internal.containers import ( RepeatedCompositeFieldContainer as google___protobuf___internal___containers___RepeatedCompositeFieldContainer, ) from google.protobuf.message import ( Message as google___protobuf___message___Message, ) from typing import ( Iterable as typing___Iterable, List as typing___List, Mapping as typing___Mapping, MutableMapping as typing___MutableMapping, NewType as typing___NewType, Optional as typing___Optional, Text as typing___Text, Tuple as typing___Tuple, Union as typing___Union, cast as typing___cast, ) from typing_extensions import ( Literal as typing_extensions___Literal, ) builtin___bool = bool builtin___bytes = bytes builtin___float = float builtin___int = int builtin___str = str DESCRIPTOR: google___protobuf___descriptor___FileDescriptor = ... class Span(google___protobuf___message___Message): DESCRIPTOR: google___protobuf___descriptor___Descriptor = ... KindValue = typing___NewType('KindValue', builtin___int) type___KindValue = KindValue class Kind(object): DESCRIPTOR: google___protobuf___descriptor___EnumDescriptor = ... @classmethod def Name(cls, number: builtin___int) -> builtin___str: ... @classmethod def Value(cls, name: builtin___str) -> Span.KindValue: ... @classmethod def keys(cls) -> typing___List[builtin___str]: ... @classmethod def values(cls) -> typing___List[Span.KindValue]: ... @classmethod def items(cls) -> typing___List[typing___Tuple[builtin___str, Span.KindValue]]: ... SPAN_KIND_UNSPECIFIED = typing___cast(Span.KindValue, 0) CLIENT = typing___cast(Span.KindValue, 1) SERVER = typing___cast(Span.KindValue, 2) PRODUCER = typing___cast(Span.KindValue, 3) CONSUMER = typing___cast(Span.KindValue, 4) SPAN_KIND_UNSPECIFIED = typing___cast(Span.KindValue, 0) CLIENT = typing___cast(Span.KindValue, 1) SERVER = typing___cast(Span.KindValue, 2) PRODUCER = typing___cast(Span.KindValue, 3) CONSUMER = typing___cast(Span.KindValue, 4) type___Kind = Kind class TagsEntry(google___protobuf___message___Message): DESCRIPTOR: google___protobuf___descriptor___Descriptor = ... key: typing___Text = ... value: typing___Text = ... def __init__(self, *, key : typing___Optional[typing___Text] = None, value : typing___Optional[typing___Text] = None, ) -> None: ... if sys.version_info >= (3,): @classmethod def FromString(cls, s: builtin___bytes) -> Span.TagsEntry: ... else: @classmethod def FromString(cls, s: typing___Union[builtin___bytes, builtin___buffer, builtin___unicode]) -> Span.TagsEntry: ... def ClearField(self, field_name: typing_extensions___Literal[u"key",b"key",u"value",b"value"]) -> None: ... type___TagsEntry = TagsEntry trace_id: builtin___bytes = ... parent_id: builtin___bytes = ... id: builtin___bytes = ... kind: type___Span.KindValue = ... name: typing___Text = ... timestamp: builtin___int = ... duration: builtin___int = ... debug: builtin___bool = ... shared: builtin___bool = ... @property def local_endpoint(self) -> type___Endpoint: ... @property def remote_endpoint(self) -> type___Endpoint: ... @property def annotations(self) -> google___protobuf___internal___containers___RepeatedCompositeFieldContainer[type___Annotation]: ... @property def tags(self) -> typing___MutableMapping[typing___Text, typing___Text]: ... def __init__(self, *, trace_id : typing___Optional[builtin___bytes] = None, parent_id : typing___Optional[builtin___bytes] = None, id : typing___Optional[builtin___bytes] = None, kind : typing___Optional[type___Span.KindValue] = None, name : typing___Optional[typing___Text] = None, timestamp : typing___Optional[builtin___int] = None, duration : typing___Optional[builtin___int] = None, local_endpoint : typing___Optional[type___Endpoint] = None, remote_endpoint : typing___Optional[type___Endpoint] = None, annotations : typing___Optional[typing___Iterable[type___Annotation]] = None, tags : typing___Optional[typing___Mapping[typing___Text, typing___Text]] = None, debug : typing___Optional[builtin___bool] = None, shared : typing___Optional[builtin___bool] = None, ) -> None: ... if sys.version_info >= (3,): @classmethod def FromString(cls, s: builtin___bytes) -> Span: ... else: @classmethod def FromString(cls, s: typing___Union[builtin___bytes, builtin___buffer, builtin___unicode]) -> Span: ... def HasField(self, field_name: typing_extensions___Literal[u"local_endpoint",b"local_endpoint",u"remote_endpoint",b"remote_endpoint"]) -> builtin___bool: ... def ClearField(self, field_name: typing_extensions___Literal[u"annotations",b"annotations",u"debug",b"debug",u"duration",b"duration",u"id",b"id",u"kind",b"kind",u"local_endpoint",b"local_endpoint",u"name",b"name",u"parent_id",b"parent_id",u"remote_endpoint",b"remote_endpoint",u"shared",b"shared",u"tags",b"tags",u"timestamp",b"timestamp",u"trace_id",b"trace_id"]) -> None: ... type___Span = Span class Endpoint(google___protobuf___message___Message): DESCRIPTOR: google___protobuf___descriptor___Descriptor = ... service_name: typing___Text = ... ipv4: builtin___bytes = ... ipv6: builtin___bytes = ... port: builtin___int = ... def __init__(self, *, service_name : typing___Optional[typing___Text] = None, ipv4 : typing___Optional[builtin___bytes] = None, ipv6 : typing___Optional[builtin___bytes] = None, port : typing___Optional[builtin___int] = None, ) -> None: ... if sys.version_info >= (3,): @classmethod def FromString(cls, s: builtin___bytes) -> Endpoint: ... else: @classmethod def FromString(cls, s: typing___Union[builtin___bytes, builtin___buffer, builtin___unicode]) -> Endpoint: ... def ClearField(self, field_name: typing_extensions___Literal[u"ipv4",b"ipv4",u"ipv6",b"ipv6",u"port",b"port",u"service_name",b"service_name"]) -> None: ... type___Endpoint = Endpoint class Annotation(google___protobuf___message___Message): DESCRIPTOR: google___protobuf___descriptor___Descriptor = ... timestamp: builtin___int = ... value: typing___Text = ... def __init__(self, *, timestamp : typing___Optional[builtin___int] = None, value : typing___Optional[typing___Text] = None, ) -> None: ... if sys.version_info >= (3,): @classmethod def FromString(cls, s: builtin___bytes) -> Annotation: ... else: @classmethod def FromString(cls, s: typing___Union[builtin___bytes, builtin___buffer, builtin___unicode]) -> Annotation: ... def ClearField(self, field_name: typing_extensions___Literal[u"timestamp",b"timestamp",u"value",b"value"]) -> None: ... type___Annotation = Annotation class ListOfSpans(google___protobuf___message___Message): DESCRIPTOR: google___protobuf___descriptor___Descriptor = ... @property def spans(self) -> google___protobuf___internal___containers___RepeatedCompositeFieldContainer[type___Span]: ... def __init__(self, *, spans : typing___Optional[typing___Iterable[type___Span]] = None, ) -> None: ... if sys.version_info >= (3,): @classmethod def FromString(cls, s: builtin___bytes) -> ListOfSpans: ... else: @classmethod def FromString(cls, s: typing___Union[builtin___bytes, builtin___buffer, builtin___unicode]) -> ListOfSpans: ... def ClearField(self, field_name: typing_extensions___Literal[u"spans",b"spans"]) -> None: ... type___ListOfSpans = ListOfSpans class ReportResponse(google___protobuf___message___Message): DESCRIPTOR: google___protobuf___descriptor___Descriptor = ... def __init__(self, ) -> None: ... if sys.version_info >= (3,): @classmethod def FromString(cls, s: builtin___bytes) -> ReportResponse: ... else: @classmethod def FromString(cls, s: typing___Union[builtin___bytes, builtin___buffer, builtin___unicode]) -> ReportResponse: ... type___ReportResponse = ReportResponse version/000077500000000000000000000000001511654350100424425ustar00rootroot00000000000000python-opentelemetry-1.39.1/exporter/opentelemetry-exporter-zipkin-proto-http/src/opentelemetry/exporter/zipkin/proto/http__init__.py000066400000000000000000000011401511654350100445470ustar00rootroot00000000000000python-opentelemetry-1.39.1/exporter/opentelemetry-exporter-zipkin-proto-http/src/opentelemetry/exporter/zipkin/proto/http/version# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. __version__ = "1.39.1" python-opentelemetry-1.39.1/exporter/opentelemetry-exporter-zipkin-proto-http/test-requirements.txt000066400000000000000000000007701511654350100343200ustar00rootroot00000000000000asgiref==3.7.2 certifi==2024.7.4 charset-normalizer==3.3.2 idna==3.7 importlib-metadata==6.11.0 iniconfig==2.0.0 packaging==24.0 pluggy==1.5.0 protobuf==3.20.3 py-cpuinfo==9.0.0 pytest==7.4.4 requests==2.32.3 tomli==2.0.1 typing_extensions==4.10.0 urllib3==2.2.2 wrapt==1.16.0 zipp==3.19.2 -e opentelemetry-api -e exporter/opentelemetry-exporter-zipkin-json -e opentelemetry-sdk -e tests/opentelemetry-test-utils -e opentelemetry-semantic-conventions -e exporter/opentelemetry-exporter-zipkin-proto-http python-opentelemetry-1.39.1/exporter/opentelemetry-exporter-zipkin-proto-http/tests/000077500000000000000000000000001511654350100312155ustar00rootroot00000000000000python-opentelemetry-1.39.1/exporter/opentelemetry-exporter-zipkin-proto-http/tests/__init__.py000066400000000000000000000011101511654350100333170ustar00rootroot00000000000000# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. python-opentelemetry-1.39.1/exporter/opentelemetry-exporter-zipkin-proto-http/tests/encoder/000077500000000000000000000000001511654350100326345ustar00rootroot00000000000000__init__.py000066400000000000000000000000001511654350100346540ustar00rootroot00000000000000python-opentelemetry-1.39.1/exporter/opentelemetry-exporter-zipkin-proto-http/tests/encodercommon_tests.py000066400000000000000000000444221511654350100356470ustar00rootroot00000000000000python-opentelemetry-1.39.1/exporter/opentelemetry-exporter-zipkin-proto-http/tests/encoder# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import abc import unittest from typing import Dict, List from opentelemetry import trace as trace_api from opentelemetry.exporter.zipkin.encoder import ( DEFAULT_MAX_TAG_VALUE_LENGTH, Encoder, ) from opentelemetry.exporter.zipkin.node_endpoint import NodeEndpoint from opentelemetry.sdk import trace from opentelemetry.sdk.util.instrumentation import InstrumentationScope from opentelemetry.trace import TraceFlags from opentelemetry.trace.status import Status, StatusCode TEST_SERVICE_NAME = "test_service" # pylint: disable=protected-access class CommonEncoderTestCases: class CommonEncoderTest(unittest.TestCase): @staticmethod @abc.abstractmethod def get_encoder(*args, **kwargs) -> Encoder: pass @classmethod def get_encoder_default(cls) -> Encoder: return cls.get_encoder() @abc.abstractmethod def test_encode_trace_id(self): pass @abc.abstractmethod def test_encode_span_id(self): pass @abc.abstractmethod def test_encode_local_endpoint_default(self): pass @abc.abstractmethod def test_encode_local_endpoint_explicits(self): pass @abc.abstractmethod def _test_encode_max_tag_length(self, max_tag_value_length: int): pass def test_encode_max_tag_length_2(self): self._test_encode_max_tag_length(2) def test_encode_max_tag_length_5(self): self._test_encode_max_tag_length(5) def test_encode_max_tag_length_9(self): self._test_encode_max_tag_length(9) def test_encode_max_tag_length_10(self): self._test_encode_max_tag_length(10) def test_encode_max_tag_length_11(self): self._test_encode_max_tag_length(11) def test_encode_max_tag_length_128(self): self._test_encode_max_tag_length(128) def test_constructor_default(self): encoder = self.get_encoder() self.assertEqual( DEFAULT_MAX_TAG_VALUE_LENGTH, encoder.max_tag_value_length ) def test_constructor_max_tag_value_length(self): max_tag_value_length = 123456 encoder = self.get_encoder(max_tag_value_length) self.assertEqual( max_tag_value_length, encoder.max_tag_value_length ) def test_nsec_to_usec_round(self): base_time_nsec = 683647322 * 10**9 for nsec in ( base_time_nsec, base_time_nsec + 150 * 10**6, base_time_nsec + 300 * 10**6, base_time_nsec + 400 * 10**6, ): self.assertEqual( (nsec + 500) // 10**3, self.get_encoder_default()._nsec_to_usec_round(nsec), ) def test_encode_debug(self): self.assertFalse( self.get_encoder_default()._encode_debug( trace_api.SpanContext( trace_id=0x000000000000000000000000DEADBEEF, span_id=0x00000000DEADBEF0, is_remote=False, trace_flags=TraceFlags(TraceFlags.DEFAULT), ) ) ) self.assertTrue( self.get_encoder_default()._encode_debug( trace_api.SpanContext( trace_id=0x000000000000000000000000DEADBEEF, span_id=0x00000000DEADBEF0, is_remote=False, trace_flags=TraceFlags(TraceFlags.SAMPLED), ) ) ) def test_get_parent_id_from_span(self): parent_id = 0x00000000DEADBEF0 self.assertEqual( parent_id, self.get_encoder_default()._get_parent_id( trace._Span( name="test-span", context=trace_api.SpanContext( 0x000000000000000000000000DEADBEEF, 0x04BF92DEEFC58C92, is_remote=False, ), parent=trace_api.SpanContext( 0x0000000000000000000000AADEADBEEF, parent_id, is_remote=False, ), ) ), ) def test_get_parent_id_from_span_context(self): parent_id = 0x00000000DEADBEF0 self.assertEqual( parent_id, self.get_encoder_default()._get_parent_id( trace_api.SpanContext( trace_id=0x000000000000000000000000DEADBEEF, span_id=parent_id, is_remote=False, ), ), ) @staticmethod def get_data_for_max_tag_length_test( max_tag_length: int, ) -> (trace._Span, Dict): start_time = 683647322 * 10**9 # in ns duration = 50 * 10**6 end_time = start_time + duration span = trace._Span( name=TEST_SERVICE_NAME, context=trace_api.SpanContext( 0x0E0C63257DE34C926F9EFCD03927272E, 0x04BF92DEEFC58C92, is_remote=False, trace_flags=TraceFlags(TraceFlags.SAMPLED), ), resource=trace.Resource({}), ) span.start(start_time=start_time) span.set_attribute("string1", "v" * 500) span.set_attribute("string2", "v" * 50) span.set_attribute("list1", ["a"] * 25) span.set_attribute("list2", ["a"] * 10) span.set_attribute("list3", [2] * 25) span.set_attribute("list4", [2] * 10) span.set_attribute("list5", [True] * 25) span.set_attribute("list6", [True] * 10) span.set_attribute("tuple1", ("a",) * 25) span.set_attribute("tuple2", ("a",) * 10) span.set_attribute("tuple3", (2,) * 25) span.set_attribute("tuple4", (2,) * 10) span.set_attribute("tuple5", (True,) * 25) span.set_attribute("tuple6", (True,) * 10) span.set_attribute("range1", range(0, 25)) span.set_attribute("range2", range(0, 10)) span.set_attribute("empty_list", []) span.set_attribute("none_list", ["hello", None, "world"]) span.end(end_time=end_time) expected_outputs = { 2: { "string1": "vv", "string2": "vv", "list1": "[]", "list2": "[]", "list3": "[]", "list4": "[]", "list5": "[]", "list6": "[]", "tuple1": "[]", "tuple2": "[]", "tuple3": "[]", "tuple4": "[]", "tuple5": "[]", "tuple6": "[]", "range1": "[]", "range2": "[]", "empty_list": "[]", "none_list": "[]", }, 5: { "string1": "vvvvv", "string2": "vvvvv", "list1": '["a"]', "list2": '["a"]', "list3": '["2"]', "list4": '["2"]', "list5": "[]", "list6": "[]", "tuple1": '["a"]', "tuple2": '["a"]', "tuple3": '["2"]', "tuple4": '["2"]', "tuple5": "[]", "tuple6": "[]", "range1": '["0"]', "range2": '["0"]', "empty_list": "[]", "none_list": "[]", }, 9: { "string1": "vvvvvvvvv", "string2": "vvvvvvvvv", "list1": '["a","a"]', "list2": '["a","a"]', "list3": '["2","2"]', "list4": '["2","2"]', "list5": '["true"]', "list6": '["true"]', "tuple1": '["a","a"]', "tuple2": '["a","a"]', "tuple3": '["2","2"]', "tuple4": '["2","2"]', "tuple5": '["true"]', "tuple6": '["true"]', "range1": '["0","1"]', "range2": '["0","1"]', "empty_list": "[]", "none_list": '["hello"]', }, 10: { "string1": "vvvvvvvvvv", "string2": "vvvvvvvvvv", "list1": '["a","a"]', "list2": '["a","a"]', "list3": '["2","2"]', "list4": '["2","2"]', "list5": '["true"]', "list6": '["true"]', "tuple1": '["a","a"]', "tuple2": '["a","a"]', "tuple3": '["2","2"]', "tuple4": '["2","2"]', "tuple5": '["true"]', "tuple6": '["true"]', "range1": '["0","1"]', "range2": '["0","1"]', "empty_list": "[]", "none_list": '["hello"]', }, 11: { "string1": "vvvvvvvvvvv", "string2": "vvvvvvvvvvv", "list1": '["a","a"]', "list2": '["a","a"]', "list3": '["2","2"]', "list4": '["2","2"]', "list5": '["true"]', "list6": '["true"]', "tuple1": '["a","a"]', "tuple2": '["a","a"]', "tuple3": '["2","2"]', "tuple4": '["2","2"]', "tuple5": '["true"]', "tuple6": '["true"]', "range1": '["0","1"]', "range2": '["0","1"]', "empty_list": "[]", "none_list": '["hello"]', }, 128: { "string1": "v" * 128, "string2": "v" * 50, "list1": '["a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a"]', "list2": '["a","a","a","a","a","a","a","a","a","a"]', "list3": '["2","2","2","2","2","2","2","2","2","2","2","2","2","2","2","2","2","2","2","2","2","2","2","2","2"]', "list4": '["2","2","2","2","2","2","2","2","2","2"]', "list5": '["true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true"]', "list6": '["true","true","true","true","true","true","true","true","true","true"]', "tuple1": '["a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a"]', "tuple2": '["a","a","a","a","a","a","a","a","a","a"]', "tuple3": '["2","2","2","2","2","2","2","2","2","2","2","2","2","2","2","2","2","2","2","2","2","2","2","2","2"]', "tuple4": '["2","2","2","2","2","2","2","2","2","2"]', "tuple5": '["true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true","true"]', "tuple6": '["true","true","true","true","true","true","true","true","true","true"]', "range1": '["0","1","2","3","4","5","6","7","8","9","10","11","12","13","14","15","16","17","18","19","20","21","22","23","24"]', "range2": '["0","1","2","3","4","5","6","7","8","9"]', "empty_list": "[]", "none_list": '["hello",null,"world"]', }, } return span, expected_outputs[max_tag_length] @staticmethod def get_exhaustive_otel_span_list() -> List[trace._Span]: trace_id = 0x6E0C63257DE34C926F9EFCD03927272E base_time = 683647322 * 10**9 # in ns start_times = ( base_time, base_time + 150 * 10**6, base_time + 300 * 10**6, base_time + 400 * 10**6, ) end_times = ( start_times[0] + (50 * 10**6), start_times[1] + (100 * 10**6), start_times[2] + (200 * 10**6), start_times[3] + (300 * 10**6), ) parent_span_context = trace_api.SpanContext( trace_id, 0x1111111111111111, is_remote=False ) other_context = trace_api.SpanContext( trace_id, 0x2222222222222222, is_remote=False ) span1 = trace._Span( name="test-span-1", context=trace_api.SpanContext( trace_id, 0x34BF92DEEFC58C92, is_remote=False, trace_flags=TraceFlags(TraceFlags.SAMPLED), ), parent=parent_span_context, events=( trace.Event( name="event0", timestamp=base_time + 50 * 10**6, attributes={ "annotation_bool": True, "annotation_string": "annotation_test", "key_float": 0.3, }, ), ), links=( trace_api.Link( context=other_context, attributes={"key_bool": True} ), ), resource=trace.Resource({}), ) span1.start(start_time=start_times[0]) span1.set_attribute("key_bool", False) span1.set_attribute("key_string", "hello_world") span1.set_attribute("key_float", 111.22) span1.set_status(Status(StatusCode.OK)) span1.end(end_time=end_times[0]) span2 = trace._Span( name="test-span-2", context=parent_span_context, parent=None, resource=trace.Resource( attributes={"key_resource": "some_resource"} ), ) span2.start(start_time=start_times[1]) span2.set_status(Status(StatusCode.ERROR, "Example description")) span2.end(end_time=end_times[1]) span3 = trace._Span( name="test-span-3", context=other_context, parent=None, resource=trace.Resource( attributes={"key_resource": "some_resource"} ), ) span3.start(start_time=start_times[2]) span3.set_attribute("key_string", "hello_world") span3.end(end_time=end_times[2]) span4 = trace._Span( name="test-span-3", context=other_context, parent=None, resource=trace.Resource({}), instrumentation_scope=InstrumentationScope( name="name", version="version" ), ) span4.start(start_time=start_times[3]) span4.end(end_time=end_times[3]) return [span1, span2, span3, span4] # pylint: disable=W0223 class CommonJsonEncoderTest(CommonEncoderTest, abc.ABC): def test_encode_trace_id(self): for trace_id in (1, 1024, 2**32, 2**64, 2**65): self.assertEqual( format(trace_id, "032x"), self.get_encoder_default()._encode_trace_id(trace_id), ) def test_encode_span_id(self): for span_id in (1, 1024, 2**8, 2**16, 2**32, 2**64): self.assertEqual( format(span_id, "016x"), self.get_encoder_default()._encode_span_id(span_id), ) def test_encode_local_endpoint_default(self): self.assertEqual( self.get_encoder_default()._encode_local_endpoint( NodeEndpoint() ), {"serviceName": TEST_SERVICE_NAME}, ) def test_encode_local_endpoint_explicits(self): ipv4 = "192.168.0.1" ipv6 = "2001:db8::c001" port = 414120 self.assertEqual( self.get_encoder_default()._encode_local_endpoint( NodeEndpoint(ipv4, ipv6, port) ), { "serviceName": TEST_SERVICE_NAME, "ipv4": ipv4, "ipv6": ipv6, "port": port, }, ) @staticmethod def pop_and_sort(source_list, source_index, sort_key): """ Convenience method that will pop a specified index from a list, sort it by a given key and then return it. """ popped_item = source_list.pop(source_index, None) if popped_item is not None: popped_item = sorted(popped_item, key=lambda x: x[sort_key]) return popped_item def assert_equal_encoded_spans(self, expected_spans, actual_spans): self.assertEqual(expected_spans, actual_spans) test_v2_protobuf.py000066400000000000000000000237111511654350100364410ustar00rootroot00000000000000python-opentelemetry-1.39.1/exporter/opentelemetry-exporter-zipkin-proto-http/tests/encoder# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import ipaddress import json from opentelemetry.exporter.zipkin.encoder import ( _SCOPE_NAME_KEY, _SCOPE_VERSION_KEY, NAME_KEY, VERSION_KEY, ) from opentelemetry.exporter.zipkin.node_endpoint import NodeEndpoint from opentelemetry.exporter.zipkin.proto.http.v2 import ProtobufEncoder from opentelemetry.exporter.zipkin.proto.http.v2.gen import zipkin_pb2 from opentelemetry.test.spantestutil import ( get_span_with_dropped_attributes_events_links, ) from opentelemetry.trace import SpanKind from .common_tests import ( # pylint: disable=import-error TEST_SERVICE_NAME, CommonEncoderTestCases, ) # pylint: disable=protected-access class TestProtobufEncoder(CommonEncoderTestCases.CommonEncoderTest): @staticmethod def get_encoder(*args, **kwargs) -> ProtobufEncoder: return ProtobufEncoder(*args, **kwargs) def test_encode_trace_id(self): for trace_id in (1, 1024, 2**32, 2**64, 2**127): self.assertEqual( self.get_encoder_default()._encode_trace_id(trace_id), trace_id.to_bytes(length=16, byteorder="big", signed=False), ) def test_encode_span_id(self): for span_id in (1, 1024, 2**8, 2**16, 2**32, 2**63): self.assertEqual( self.get_encoder_default()._encode_span_id(span_id), span_id.to_bytes(length=8, byteorder="big", signed=False), ) def test_encode_local_endpoint_default(self): self.assertEqual( ProtobufEncoder()._encode_local_endpoint(NodeEndpoint()), zipkin_pb2.Endpoint(service_name=TEST_SERVICE_NAME), ) def test_encode_local_endpoint_explicits(self): ipv4 = "192.168.0.1" ipv6 = "2001:db8::c001" port = 414120 self.assertEqual( ProtobufEncoder()._encode_local_endpoint( NodeEndpoint(ipv4, ipv6, port) ), zipkin_pb2.Endpoint( service_name=TEST_SERVICE_NAME, ipv4=ipaddress.ip_address(ipv4).packed, ipv6=ipaddress.ip_address(ipv6).packed, port=port, ), ) def test_encode(self): local_endpoint = zipkin_pb2.Endpoint(service_name=TEST_SERVICE_NAME) span_kind = ProtobufEncoder.SPAN_KIND_MAP[SpanKind.INTERNAL] otel_spans = self.get_exhaustive_otel_span_list() trace_id = ProtobufEncoder._encode_trace_id( otel_spans[0].context.trace_id ) expected_output = zipkin_pb2.ListOfSpans( spans=[ zipkin_pb2.Span( trace_id=trace_id, id=ProtobufEncoder._encode_span_id( otel_spans[0].context.span_id ), name=otel_spans[0].name, timestamp=ProtobufEncoder._nsec_to_usec_round( otel_spans[0].start_time ), duration=( ProtobufEncoder._nsec_to_usec_round( otel_spans[0].end_time - otel_spans[0].start_time ) ), local_endpoint=local_endpoint, kind=span_kind, tags={ "key_bool": "false", "key_string": "hello_world", "key_float": "111.22", "otel.status_code": "OK", }, debug=True, parent_id=ProtobufEncoder._encode_span_id( otel_spans[0].parent.span_id ), annotations=[ zipkin_pb2.Annotation( timestamp=ProtobufEncoder._nsec_to_usec_round( otel_spans[0].events[0].timestamp ), value=json.dumps( { "event0": { "annotation_bool": True, "annotation_string": "annotation_test", "key_float": 0.3, } }, sort_keys=True, ), ), ], ), zipkin_pb2.Span( trace_id=trace_id, id=ProtobufEncoder._encode_span_id( otel_spans[1].context.span_id ), name=otel_spans[1].name, timestamp=ProtobufEncoder._nsec_to_usec_round( otel_spans[1].start_time ), duration=( ProtobufEncoder._nsec_to_usec_round( otel_spans[1].end_time - otel_spans[1].start_time ) ), local_endpoint=local_endpoint, kind=span_kind, tags={ "key_resource": "some_resource", "otel.status_code": "ERROR", "error": "Example description", }, debug=False, ), zipkin_pb2.Span( trace_id=trace_id, id=ProtobufEncoder._encode_span_id( otel_spans[2].context.span_id ), name=otel_spans[2].name, timestamp=ProtobufEncoder._nsec_to_usec_round( otel_spans[2].start_time ), duration=( ProtobufEncoder._nsec_to_usec_round( otel_spans[2].end_time - otel_spans[2].start_time ) ), local_endpoint=local_endpoint, kind=span_kind, tags={ "key_string": "hello_world", "key_resource": "some_resource", }, debug=False, ), zipkin_pb2.Span( trace_id=trace_id, id=ProtobufEncoder._encode_span_id( otel_spans[3].context.span_id ), name=otel_spans[3].name, timestamp=ProtobufEncoder._nsec_to_usec_round( otel_spans[3].start_time ), duration=( ProtobufEncoder._nsec_to_usec_round( otel_spans[3].end_time - otel_spans[3].start_time ) ), local_endpoint=local_endpoint, kind=span_kind, tags={ NAME_KEY: "name", VERSION_KEY: "version", _SCOPE_NAME_KEY: "name", _SCOPE_VERSION_KEY: "version", }, debug=False, ), ], ) actual_output = zipkin_pb2.ListOfSpans.FromString( ProtobufEncoder().serialize(otel_spans, NodeEndpoint()) ) self.assertEqual(actual_output, expected_output) def _test_encode_max_tag_length(self, max_tag_value_length: int): otel_span, expected_tag_output = self.get_data_for_max_tag_length_test( max_tag_value_length ) service_name = otel_span.name expected_output = zipkin_pb2.ListOfSpans( spans=[ zipkin_pb2.Span( trace_id=ProtobufEncoder._encode_trace_id( otel_span.context.trace_id ), id=ProtobufEncoder._encode_span_id( otel_span.context.span_id ), name=service_name, timestamp=ProtobufEncoder._nsec_to_usec_round( otel_span.start_time ), duration=ProtobufEncoder._nsec_to_usec_round( otel_span.end_time - otel_span.start_time ), local_endpoint=zipkin_pb2.Endpoint( service_name=service_name ), kind=ProtobufEncoder.SPAN_KIND_MAP[SpanKind.INTERNAL], tags=expected_tag_output, annotations=None, debug=True, ) ] ) actual_output = zipkin_pb2.ListOfSpans.FromString( ProtobufEncoder(max_tag_value_length).serialize( [otel_span], NodeEndpoint() ) ) self.assertEqual(actual_output, expected_output) def test_dropped_span_attributes(self): otel_span = get_span_with_dropped_attributes_events_links() # pylint: disable=no-member tags = ( ProtobufEncoder() ._encode_span(otel_span, zipkin_pb2.Endpoint()) .tags ) self.assertEqual("1", tags["otel.dropped_links_count"]) self.assertEqual("2", tags["otel.dropped_attributes_count"]) self.assertEqual("3", tags["otel.dropped_events_count"]) test_zipkin_exporter.py000066400000000000000000000211421511654350100360030ustar00rootroot00000000000000python-opentelemetry-1.39.1/exporter/opentelemetry-exporter-zipkin-proto-http/tests# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import ipaddress import os import unittest from unittest.mock import patch import requests from opentelemetry import trace from opentelemetry.exporter.zipkin.node_endpoint import NodeEndpoint from opentelemetry.exporter.zipkin.proto.http import ( DEFAULT_ENDPOINT, ZipkinExporter, ) from opentelemetry.exporter.zipkin.proto.http.v2 import ProtobufEncoder from opentelemetry.sdk.environment_variables import ( OTEL_EXPORTER_ZIPKIN_ENDPOINT, OTEL_EXPORTER_ZIPKIN_TIMEOUT, ) from opentelemetry.sdk.resources import SERVICE_NAME, Resource from opentelemetry.sdk.trace import TracerProvider, _Span from opentelemetry.sdk.trace.export import SpanExportResult TEST_SERVICE_NAME = "test_service" class MockResponse: def __init__(self, status_code): self.status_code = status_code self.text = status_code class TestZipkinExporter(unittest.TestCase): @classmethod def setUpClass(cls): trace.set_tracer_provider( TracerProvider( resource=Resource({SERVICE_NAME: TEST_SERVICE_NAME}) ) ) def tearDown(self): os.environ.pop(OTEL_EXPORTER_ZIPKIN_ENDPOINT, None) os.environ.pop(OTEL_EXPORTER_ZIPKIN_TIMEOUT, None) def test_constructor_default(self): exporter = ZipkinExporter() self.assertIsInstance(exporter.encoder, ProtobufEncoder) self.assertIsInstance(exporter.session, requests.Session) self.assertEqual(exporter.endpoint, DEFAULT_ENDPOINT) self.assertEqual(exporter.local_node.service_name, TEST_SERVICE_NAME) self.assertEqual(exporter.local_node.ipv4, None) self.assertEqual(exporter.local_node.ipv6, None) self.assertEqual(exporter.local_node.port, None) def test_constructor_env_vars(self): os_endpoint = "https://foo:9911/path" os.environ[OTEL_EXPORTER_ZIPKIN_ENDPOINT] = os_endpoint os.environ[OTEL_EXPORTER_ZIPKIN_TIMEOUT] = "15" exporter = ZipkinExporter() self.assertEqual(exporter.endpoint, os_endpoint) self.assertEqual(exporter.local_node.service_name, TEST_SERVICE_NAME) self.assertEqual(exporter.local_node.ipv4, None) self.assertEqual(exporter.local_node.ipv6, None) self.assertEqual(exporter.local_node.port, None) self.assertEqual(exporter.timeout, 15) def test_constructor_protocol_endpoint(self): """Test the constructor for the common usage of providing the protocol and endpoint arguments.""" endpoint = "https://opentelemetry.io:15875/myapi/traces?format=zipkin" exporter = ZipkinExporter(endpoint) self.assertIsInstance(exporter.encoder, ProtobufEncoder) self.assertIsInstance(exporter.session, requests.Session) self.assertEqual(exporter.endpoint, endpoint) self.assertEqual(exporter.local_node.service_name, TEST_SERVICE_NAME) self.assertEqual(exporter.local_node.ipv4, None) self.assertEqual(exporter.local_node.ipv6, None) self.assertEqual(exporter.local_node.port, None) def test_constructor_all_params_and_env_vars(self): """Test the scenario where all params are provided and all OS env vars are set. Explicit params should take precedence. """ os_endpoint = "https://os.env.param:9911/path" os.environ[OTEL_EXPORTER_ZIPKIN_ENDPOINT] = os_endpoint os.environ[OTEL_EXPORTER_ZIPKIN_TIMEOUT] = "15" constructor_param_endpoint = "https://constructor.param:9911/path" local_node_ipv4 = "192.168.0.1" local_node_ipv6 = "2001:db8::1000" local_node_port = 30301 max_tag_value_length = 56 timeout_param = 20 session_param = requests.Session() exporter = ZipkinExporter( constructor_param_endpoint, local_node_ipv4, local_node_ipv6, local_node_port, max_tag_value_length, timeout_param, session_param, ) self.assertIsInstance(exporter.encoder, ProtobufEncoder) self.assertIsInstance(exporter.session, requests.Session) self.assertEqual(exporter.endpoint, constructor_param_endpoint) self.assertEqual(exporter.local_node.service_name, TEST_SERVICE_NAME) self.assertEqual( exporter.local_node.ipv4, ipaddress.IPv4Address(local_node_ipv4) ) self.assertEqual( exporter.local_node.ipv6, ipaddress.IPv6Address(local_node_ipv6) ) self.assertEqual(exporter.local_node.port, local_node_port) # Assert timeout passed in constructor is prioritized over env # when both are set. self.assertEqual(exporter.timeout, 20) @patch("requests.Session.post") def test_export_success(self, mock_post): mock_post.return_value = MockResponse(200) spans = [] exporter = ZipkinExporter() status = exporter.export(spans) self.assertEqual(SpanExportResult.SUCCESS, status) @patch("requests.Session.post") def test_export_invalid_response(self, mock_post): mock_post.return_value = MockResponse(404) spans = [] exporter = ZipkinExporter() status = exporter.export(spans) self.assertEqual(SpanExportResult.FAILURE, status) @patch("requests.Session.post") def test_export_span_service_name(self, mock_post): mock_post.return_value = MockResponse(200) resource = Resource.create({SERVICE_NAME: "test"}) context = trace.SpanContext( trace_id=0x000000000000000000000000DEADBEEF, span_id=0x00000000DEADBEF0, is_remote=False, ) span = _Span("test_span", context=context, resource=resource) span.start() span.end() exporter = ZipkinExporter() exporter.export([span]) self.assertEqual(exporter.local_node.service_name, "test") @patch("requests.Session.post") def test_export_shutdown(self, mock_post): mock_post.return_value = MockResponse(200) spans = [] exporter = ZipkinExporter() status = exporter.export(spans) self.assertEqual(SpanExportResult.SUCCESS, status) exporter.shutdown() # Any call to .export() post shutdown should return failure status = exporter.export(spans) self.assertEqual(SpanExportResult.FAILURE, status) @patch("requests.Session.post") def test_export_timeout(self, mock_post): mock_post.return_value = MockResponse(200) spans = [] exporter = ZipkinExporter(timeout=2) status = exporter.export(spans) self.assertEqual(SpanExportResult.SUCCESS, status) mock_post.assert_called_with( url="http://localhost:9411/api/v2/spans", data=b"", timeout=2 ) class TestZipkinNodeEndpoint(unittest.TestCase): def test_constructor_default(self): node_endpoint = NodeEndpoint() self.assertEqual(node_endpoint.ipv4, None) self.assertEqual(node_endpoint.ipv6, None) self.assertEqual(node_endpoint.port, None) self.assertEqual(node_endpoint.service_name, TEST_SERVICE_NAME) def test_constructor_explicits(self): ipv4 = "192.168.0.1" ipv6 = "2001:db8::c001" port = 414120 node_endpoint = NodeEndpoint(ipv4, ipv6, port) self.assertEqual(node_endpoint.ipv4, ipaddress.IPv4Address(ipv4)) self.assertEqual(node_endpoint.ipv6, ipaddress.IPv6Address(ipv6)) self.assertEqual(node_endpoint.port, port) self.assertEqual(node_endpoint.service_name, TEST_SERVICE_NAME) def test_ipv4_invalid_raises_error(self): with self.assertRaises(ValueError): NodeEndpoint(ipv4="invalid-ipv4-address") def test_ipv4_passed_ipv6_raises_error(self): with self.assertRaises(ValueError): NodeEndpoint(ipv4="2001:db8::c001") def test_ipv6_invalid_raises_error(self): with self.assertRaises(ValueError): NodeEndpoint(ipv6="invalid-ipv6-address") def test_ipv6_passed_ipv4_raises_error(self): with self.assertRaises(ValueError): NodeEndpoint(ipv6="192.168.0.1") python-opentelemetry-1.39.1/exporter/opentelemetry-exporter-zipkin/000077500000000000000000000000001511654350100257355ustar00rootroot00000000000000python-opentelemetry-1.39.1/exporter/opentelemetry-exporter-zipkin/LICENSE000066400000000000000000000261351511654350100267510ustar00rootroot00000000000000 Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. Copyright [yyyy] [name of copyright owner] Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. python-opentelemetry-1.39.1/exporter/opentelemetry-exporter-zipkin/README.rst000066400000000000000000000017011511654350100274230ustar00rootroot00000000000000OpenTelemetry Zipkin Exporter ============================= |pypi| .. |pypi| image:: https://badge.fury.io/py/opentelemetry-exporter-zipkin.svg :target: https://pypi.org/project/opentelemetry-exporter-zipkin/ This library is provided as a convenience to install all supported OpenTelemetry Zipkin Exporters. Currently it installs: * opentelemetry-exporter-zipkin-json * opentelemetry-exporter-zipkin-proto-http In the future, additional packages may be available: * opentelemetry-exporter-zipkin-thrift To avoid unnecessary dependencies, users should install the specific package once they've determined their preferred serialization method. Installation ------------ :: pip install opentelemetry-exporter-zipkin References ---------- * `OpenTelemetry Zipkin Exporter `_ * `Zipkin `_ * `OpenTelemetry Project `_ python-opentelemetry-1.39.1/exporter/opentelemetry-exporter-zipkin/pyproject.toml000066400000000000000000000030171511654350100306520ustar00rootroot00000000000000[build-system] requires = ["hatchling"] build-backend = "hatchling.build" [project] name = "opentelemetry-exporter-zipkin" dynamic = ["version"] description = "Zipkin Span Exporters for OpenTelemetry" readme = "README.rst" license = "Apache-2.0" requires-python = ">=3.9" authors = [ { name = "OpenTelemetry Authors", email = "cncf-opentelemetry-contributors@lists.cncf.io" }, ] classifiers = [ "Development Status :: 5 - Production/Stable", "Framework :: OpenTelemetry", "Framework :: OpenTelemetry :: Exporters", "Intended Audience :: Developers", "Programming Language :: Python", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.9", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", "Programming Language :: Python :: 3.12", "Programming Language :: Python :: 3.13", "Typing :: Typed", ] dependencies = [ "opentelemetry-exporter-zipkin-json == 1.39.1", "opentelemetry-exporter-zipkin-proto-http == 1.39.1", ] [project.entry-points.opentelemetry_traces_exporter] zipkin = "opentelemetry.exporter.zipkin.proto.http:ZipkinExporter" [project.urls] Homepage = "https://github.com/open-telemetry/opentelemetry-python/tree/main/exporter/opentelemetry-exporter-zipkin" Repository = "https://github.com/open-telemetry/opentelemetry-python" [tool.hatch.version] path = "src/opentelemetry/exporter/zipkin/version/__init__.py" [tool.hatch.build.targets.sdist] include = [ "/src", "/tests", ] [tool.hatch.build.targets.wheel] packages = ["src/opentelemetry"] python-opentelemetry-1.39.1/exporter/opentelemetry-exporter-zipkin/src/000077500000000000000000000000001511654350100265245ustar00rootroot00000000000000python-opentelemetry-1.39.1/exporter/opentelemetry-exporter-zipkin/src/opentelemetry/000077500000000000000000000000001511654350100314205ustar00rootroot00000000000000python-opentelemetry-1.39.1/exporter/opentelemetry-exporter-zipkin/src/opentelemetry/exporter/000077500000000000000000000000001511654350100332705ustar00rootroot00000000000000zipkin/000077500000000000000000000000001511654350100345155ustar00rootroot00000000000000python-opentelemetry-1.39.1/exporter/opentelemetry-exporter-zipkin/src/opentelemetry/exporterpy.typed000066400000000000000000000000001511654350100362020ustar00rootroot00000000000000python-opentelemetry-1.39.1/exporter/opentelemetry-exporter-zipkin/src/opentelemetry/exporter/zipkinversion/000077500000000000000000000000001511654350100362025ustar00rootroot00000000000000python-opentelemetry-1.39.1/exporter/opentelemetry-exporter-zipkin/src/opentelemetry/exporter/zipkin__init__.py000066400000000000000000000011401511654350100403070ustar00rootroot00000000000000python-opentelemetry-1.39.1/exporter/opentelemetry-exporter-zipkin/src/opentelemetry/exporter/zipkin/version# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. __version__ = "1.39.1" python-opentelemetry-1.39.1/exporter/opentelemetry-exporter-zipkin/test-requirements.txt000066400000000000000000000006311511654350100321760ustar00rootroot00000000000000asgiref==3.7.2 importlib-metadata==6.11.0 iniconfig==2.0.0 packaging==24.0 pluggy==1.5.0 py-cpuinfo==9.0.0 pytest==7.4.4 tomli==2.0.1 typing_extensions==4.10.0 wrapt==1.16.0 zipp==3.19.2 -e opentelemetry-api -e exporter/opentelemetry-exporter-zipkin-json -e exporter/opentelemetry-exporter-zipkin-proto-http -e opentelemetry-sdk -e opentelemetry-semantic-conventions -e exporter/opentelemetry-exporter-zipkin python-opentelemetry-1.39.1/exporter/opentelemetry-exporter-zipkin/tests/000077500000000000000000000000001511654350100270775ustar00rootroot00000000000000python-opentelemetry-1.39.1/exporter/opentelemetry-exporter-zipkin/tests/__init__.py000066400000000000000000000000001511654350100311760ustar00rootroot00000000000000python-opentelemetry-1.39.1/exporter/opentelemetry-exporter-zipkin/tests/test_zipkin.py000066400000000000000000000017151511654350100320200ustar00rootroot00000000000000# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from opentelemetry.exporter.zipkin import json from opentelemetry.exporter.zipkin.proto import http class TestZipkinExporter(unittest.TestCase): def test_constructors(self): try: json.ZipkinExporter() http.ZipkinExporter() except Exception as exc: # pylint: disable=broad-exception-caught self.assertIsNone(exc) python-opentelemetry-1.39.1/gen-requirements.txt000066400000000000000000000005401511654350100220530ustar00rootroot00000000000000# Use caution when bumping this version to ensure compatibility with the currently supported protobuf version. # Pinning this to the oldest grpcio version that supports protobuf 5 helps avoid RuntimeWarning messages # from the generated protobuf code and ensures continued stability for newer grpcio versions. grpcio-tools==1.63.2 mypy-protobuf~=3.5.0 python-opentelemetry-1.39.1/opentelemetry-api/000077500000000000000000000000001511654350100214645ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-api/LICENSE000066400000000000000000000261351511654350100225000ustar00rootroot00000000000000 Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. Copyright [yyyy] [name of copyright owner] Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. python-opentelemetry-1.39.1/opentelemetry-api/README.rst000066400000000000000000000005711511654350100231560ustar00rootroot00000000000000OpenTelemetry Python API ============================================================================ |pypi| .. |pypi| image:: https://badge.fury.io/py/opentelemetry-api.svg :target: https://pypi.org/project/opentelemetry-api/ Installation ------------ :: pip install opentelemetry-api References ---------- * `OpenTelemetry Project `_ python-opentelemetry-1.39.1/opentelemetry-api/pyproject.toml000066400000000000000000000041361511654350100244040ustar00rootroot00000000000000[build-system] requires = ["hatchling"] build-backend = "hatchling.build" [project] name = "opentelemetry-api" description = "OpenTelemetry Python API" readme = "README.rst" license = "Apache-2.0" requires-python = ">=3.9" authors = [ { name = "OpenTelemetry Authors", email = "cncf-opentelemetry-contributors@lists.cncf.io" }, ] classifiers = [ "Development Status :: 5 - Production/Stable", "Framework :: OpenTelemetry", "Intended Audience :: Developers", "Programming Language :: Python", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.9", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", "Programming Language :: Python :: 3.12", "Programming Language :: Python :: 3.13", "Typing :: Typed", ] dependencies = [ "typing-extensions >= 4.5.0", # FIXME This should be able to be removed after 3.12 is released if there is a reliable API # in importlib.metadata. "importlib-metadata >= 6.0, < 8.8.0", ] dynamic = [ "version", ] [project.entry-points.opentelemetry_context] contextvars_context = "opentelemetry.context.contextvars_context:ContextVarsRuntimeContext" [project.entry-points.opentelemetry_environment_variables] api = "opentelemetry.environment_variables" [project.entry-points.opentelemetry_meter_provider] default_meter_provider = "opentelemetry.metrics:NoOpMeterProvider" [project.entry-points.opentelemetry_propagator] baggage = "opentelemetry.baggage.propagation:W3CBaggagePropagator" tracecontext = "opentelemetry.trace.propagation.tracecontext:TraceContextTextMapPropagator" [project.entry-points.opentelemetry_tracer_provider] default_tracer_provider = "opentelemetry.trace:NoOpTracerProvider" [project.urls] Homepage = "https://github.com/open-telemetry/opentelemetry-python/tree/main/opentelemetry-api" Repository = "https://github.com/open-telemetry/opentelemetry-python" [tool.hatch.version] path = "src/opentelemetry/version/__init__.py" [tool.hatch.build.targets.sdist] include = [ "/src", "/tests", ] [tool.hatch.build.targets.wheel] packages = ["src/opentelemetry"] python-opentelemetry-1.39.1/opentelemetry-api/src/000077500000000000000000000000001511654350100222535ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-api/src/opentelemetry/000077500000000000000000000000001511654350100251475ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-api/src/opentelemetry/_events/000077500000000000000000000000001511654350100266125ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-api/src/opentelemetry/_events/__init__.py000066400000000000000000000202631511654350100307260ustar00rootroot00000000000000# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from abc import ABC, abstractmethod from logging import getLogger from os import environ from typing import Optional, cast from typing_extensions import deprecated from opentelemetry._logs import LogRecord from opentelemetry._logs.severity import SeverityNumber from opentelemetry.environment_variables import ( _OTEL_PYTHON_EVENT_LOGGER_PROVIDER, ) from opentelemetry.trace.span import TraceFlags from opentelemetry.util._once import Once from opentelemetry.util._providers import _load_provider from opentelemetry.util.types import AnyValue, _ExtendedAttributes _logger = getLogger(__name__) @deprecated( "You should use `LogRecord` with the `event_name` field set instead. " "Deprecated since version 1.39.0 and will be removed in a future release." ) class Event(LogRecord): def __init__( self, name: str, timestamp: Optional[int] = None, trace_id: Optional[int] = None, span_id: Optional[int] = None, trace_flags: Optional["TraceFlags"] = None, body: Optional[AnyValue] = None, severity_number: Optional[SeverityNumber] = None, attributes: Optional[_ExtendedAttributes] = None, ): attributes = attributes or {} event_attributes = { **attributes, "event.name": name, } super().__init__( timestamp=timestamp, trace_id=trace_id, span_id=span_id, trace_flags=trace_flags, body=body, severity_number=severity_number, attributes=event_attributes, ) self.name = name @deprecated( "You should use `Logger` instead. " "Deprecated since version 1.39.0 and will be removed in a future release." ) class EventLogger(ABC): def __init__( self, name: str, version: Optional[str] = None, schema_url: Optional[str] = None, attributes: Optional[_ExtendedAttributes] = None, ): self._name = name self._version = version self._schema_url = schema_url self._attributes = attributes @abstractmethod def emit(self, event: "Event") -> None: """Emits a :class:`Event` representing an event.""" @deprecated( "You should use `NoOpLogger` instead. " "Deprecated since version 1.39.0 and will be removed in a future release." ) class NoOpEventLogger(EventLogger): def emit(self, event: Event) -> None: pass @deprecated( "You should use `ProxyLogger` instead. " "Deprecated since version 1.39.0 and will be removed in a future release." ) class ProxyEventLogger(EventLogger): def __init__( self, name: str, version: Optional[str] = None, schema_url: Optional[str] = None, attributes: Optional[_ExtendedAttributes] = None, ): super().__init__( name=name, version=version, schema_url=schema_url, attributes=attributes, ) self._real_event_logger: Optional[EventLogger] = None self._noop_event_logger = NoOpEventLogger(name) @property def _event_logger(self) -> EventLogger: if self._real_event_logger: return self._real_event_logger if _EVENT_LOGGER_PROVIDER: self._real_event_logger = _EVENT_LOGGER_PROVIDER.get_event_logger( self._name, self._version, self._schema_url, self._attributes, ) return self._real_event_logger return self._noop_event_logger def emit(self, event: Event) -> None: self._event_logger.emit(event) @deprecated( "You should use `LoggerProvider` instead. " "Deprecated since version 1.39.0 and will be removed in a future release." ) class EventLoggerProvider(ABC): @abstractmethod def get_event_logger( self, name: str, version: Optional[str] = None, schema_url: Optional[str] = None, attributes: Optional[_ExtendedAttributes] = None, ) -> EventLogger: """Returns an EventLoggerProvider for use.""" @deprecated( "You should use `NoOpLoggerProvider` instead. " "Deprecated since version 1.39.0 and will be removed in a future release." ) class NoOpEventLoggerProvider(EventLoggerProvider): def get_event_logger( self, name: str, version: Optional[str] = None, schema_url: Optional[str] = None, attributes: Optional[_ExtendedAttributes] = None, ) -> EventLogger: return NoOpEventLogger( name, version=version, schema_url=schema_url, attributes=attributes ) @deprecated( "You should use `ProxyLoggerProvider` instead. " "Deprecated since version 1.39.0 and will be removed in a future release." ) class ProxyEventLoggerProvider(EventLoggerProvider): def get_event_logger( self, name: str, version: Optional[str] = None, schema_url: Optional[str] = None, attributes: Optional[_ExtendedAttributes] = None, ) -> EventLogger: if _EVENT_LOGGER_PROVIDER: return _EVENT_LOGGER_PROVIDER.get_event_logger( name, version=version, schema_url=schema_url, attributes=attributes, ) return ProxyEventLogger( name, version=version, schema_url=schema_url, attributes=attributes, ) _EVENT_LOGGER_PROVIDER_SET_ONCE = Once() _EVENT_LOGGER_PROVIDER: Optional[EventLoggerProvider] = None _PROXY_EVENT_LOGGER_PROVIDER = ProxyEventLoggerProvider() @deprecated( "You should use `get_logger_provider` instead. " "Deprecated since version 1.39.0 and will be removed in a future release." ) def get_event_logger_provider() -> EventLoggerProvider: global _EVENT_LOGGER_PROVIDER # pylint: disable=global-variable-not-assigned if _EVENT_LOGGER_PROVIDER is None: if _OTEL_PYTHON_EVENT_LOGGER_PROVIDER not in environ: return _PROXY_EVENT_LOGGER_PROVIDER event_logger_provider: EventLoggerProvider = _load_provider( # type: ignore _OTEL_PYTHON_EVENT_LOGGER_PROVIDER, "event_logger_provider" ) _set_event_logger_provider(event_logger_provider, log=False) return cast("EventLoggerProvider", _EVENT_LOGGER_PROVIDER) def _set_event_logger_provider( event_logger_provider: EventLoggerProvider, log: bool ) -> None: def set_elp() -> None: global _EVENT_LOGGER_PROVIDER # pylint: disable=global-statement _EVENT_LOGGER_PROVIDER = event_logger_provider did_set = _EVENT_LOGGER_PROVIDER_SET_ONCE.do_once(set_elp) if log and not did_set: _logger.warning( "Overriding of current EventLoggerProvider is not allowed" ) @deprecated( "You should use `set_logger_provider` instead. " "Deprecated since version 1.39.0 and will be removed in a future release." ) def set_event_logger_provider( event_logger_provider: EventLoggerProvider, ) -> None: _set_event_logger_provider(event_logger_provider, log=True) @deprecated( "You should use `get_logger` instead. " "Deprecated since version 1.39.0 and will be removed in a future release." ) def get_event_logger( name: str, version: Optional[str] = None, schema_url: Optional[str] = None, attributes: Optional[_ExtendedAttributes] = None, event_logger_provider: Optional[EventLoggerProvider] = None, ) -> "EventLogger": if event_logger_provider is None: event_logger_provider = get_event_logger_provider() return event_logger_provider.get_event_logger( name, version, schema_url, attributes, ) python-opentelemetry-1.39.1/opentelemetry-api/src/opentelemetry/_events/py.typed000066400000000000000000000000001511654350100302770ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-api/src/opentelemetry/_logs/000077500000000000000000000000001511654350100262525ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-api/src/opentelemetry/_logs/__init__.py000066400000000000000000000035221511654350100303650ustar00rootroot00000000000000# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ The OpenTelemetry logging API describes the classes used to generate logs and events. The :class:`.LoggerProvider` provides users access to the :class:`.Logger`. This module provides abstract (i.e. unimplemented) classes required for logging, and a concrete no-op implementation :class:`.NoOpLogger` that allows applications to use the API package alone without a supporting implementation. To get a logger, you need to provide the package name from which you are calling the logging APIs to OpenTelemetry by calling `LoggerProvider.get_logger` with the calling module name and the version of your package. The following code shows how to obtain a logger using the global :class:`.LoggerProvider`:: from opentelemetry._logs import get_logger logger = get_logger("example-logger") .. versionadded:: 1.15.0 """ from opentelemetry._logs._internal import ( Logger, LoggerProvider, LogRecord, NoOpLogger, NoOpLoggerProvider, get_logger, get_logger_provider, set_logger_provider, ) from opentelemetry._logs.severity import SeverityNumber __all__ = [ "Logger", "LoggerProvider", "LogRecord", "NoOpLogger", "NoOpLoggerProvider", "get_logger", "get_logger_provider", "set_logger_provider", "SeverityNumber", ] python-opentelemetry-1.39.1/opentelemetry-api/src/opentelemetry/_logs/_internal/000077500000000000000000000000001511654350100302255ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-api/src/opentelemetry/_logs/_internal/__init__.py000066400000000000000000000347321511654350100323470ustar00rootroot00000000000000# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ The OpenTelemetry logging API describes the classes used to generate logs and events. The :class:`.LoggerProvider` provides users access to the :class:`.Logger`. This module provides abstract (i.e. unimplemented) classes required for logging, and a concrete no-op implementation :class:`.NoOpLogger` that allows applications to use the API package alone without a supporting implementation. To get a logger, you need to provide the package name from which you are calling the logging APIs to OpenTelemetry by calling `LoggerProvider.get_logger` with the calling module name and the version of your package. The following code shows how to obtain a logger using the global :class:`.LoggerProvider`:: from opentelemetry._logs import get_logger logger = get_logger("example-logger") .. versionadded:: 1.15.0 """ from __future__ import annotations from abc import ABC, abstractmethod from logging import getLogger from os import environ from time import time_ns from typing import Optional, cast, overload from typing_extensions import deprecated from opentelemetry._logs.severity import SeverityNumber from opentelemetry.context import get_current from opentelemetry.context.context import Context from opentelemetry.environment_variables import _OTEL_PYTHON_LOGGER_PROVIDER from opentelemetry.trace import get_current_span from opentelemetry.trace.span import TraceFlags from opentelemetry.util._once import Once from opentelemetry.util._providers import _load_provider from opentelemetry.util.types import AnyValue, _ExtendedAttributes _logger = getLogger(__name__) class LogRecord(ABC): """A LogRecord instance represents an event being logged. LogRecord instances are created and emitted via `Logger` every time something is logged. They contain all the information pertinent to the event being logged. """ @overload def __init__( self, *, timestamp: Optional[int] = None, observed_timestamp: Optional[int] = None, context: Optional[Context] = None, severity_text: Optional[str] = None, severity_number: Optional[SeverityNumber] = None, body: AnyValue = None, attributes: Optional[_ExtendedAttributes] = None, event_name: Optional[str] = None, ) -> None: ... @overload @deprecated( "LogRecord init with `trace_id`, `span_id`, and/or `trace_flags` is deprecated since 1.35.0. Use `context` instead." ) def __init__( self, *, timestamp: Optional[int] = None, observed_timestamp: Optional[int] = None, trace_id: Optional[int] = None, span_id: Optional[int] = None, trace_flags: Optional["TraceFlags"] = None, severity_text: Optional[str] = None, severity_number: Optional[SeverityNumber] = None, body: AnyValue = None, attributes: Optional[_ExtendedAttributes] = None, ) -> None: ... def __init__( self, *, timestamp: Optional[int] = None, observed_timestamp: Optional[int] = None, context: Optional[Context] = None, trace_id: Optional[int] = None, span_id: Optional[int] = None, trace_flags: Optional["TraceFlags"] = None, severity_text: Optional[str] = None, severity_number: Optional[SeverityNumber] = None, body: AnyValue = None, attributes: Optional[_ExtendedAttributes] = None, event_name: Optional[str] = None, ) -> None: if not context: context = get_current() span_context = get_current_span(context).get_span_context() self.timestamp = timestamp if observed_timestamp is None: observed_timestamp = time_ns() self.observed_timestamp = observed_timestamp self.context = context self.trace_id = trace_id or span_context.trace_id self.span_id = span_id or span_context.span_id self.trace_flags = trace_flags or span_context.trace_flags self.severity_text = severity_text self.severity_number = severity_number self.body = body self.attributes = attributes self.event_name = event_name class Logger(ABC): """Handles emitting events and logs via `LogRecord`.""" def __init__( self, name: str, version: Optional[str] = None, schema_url: Optional[str] = None, attributes: Optional[_ExtendedAttributes] = None, ) -> None: super().__init__() self._name = name self._version = version self._schema_url = schema_url self._attributes = attributes @overload def emit( self, *, timestamp: int | None = None, observed_timestamp: int | None = None, context: Context | None = None, severity_number: SeverityNumber | None = None, severity_text: str | None = None, body: AnyValue | None = None, attributes: _ExtendedAttributes | None = None, event_name: str | None = None, ) -> None: ... @overload def emit( self, record: LogRecord, ) -> None: ... @abstractmethod def emit( self, record: LogRecord | None = None, *, timestamp: int | None = None, observed_timestamp: int | None = None, context: Context | None = None, severity_number: SeverityNumber | None = None, severity_text: str | None = None, body: AnyValue | None = None, attributes: _ExtendedAttributes | None = None, event_name: str | None = None, ) -> None: """Emits a :class:`LogRecord` representing a log to the processing pipeline.""" class NoOpLogger(Logger): """The default Logger used when no Logger implementation is available. All operations are no-op. """ @overload def emit( self, *, timestamp: int | None = None, observed_timestamp: int | None = None, context: Context | None = None, severity_number: SeverityNumber | None = None, severity_text: str | None = None, body: AnyValue | None = None, attributes: _ExtendedAttributes | None = None, event_name: str | None = None, ) -> None: ... @overload def emit( # pylint:disable=arguments-differ self, record: LogRecord, ) -> None: ... def emit( self, record: LogRecord | None = None, *, timestamp: int | None = None, observed_timestamp: int | None = None, context: Context | None = None, severity_number: SeverityNumber | None = None, severity_text: str | None = None, body: AnyValue | None = None, attributes: _ExtendedAttributes | None = None, event_name: str | None = None, ) -> None: pass class ProxyLogger(Logger): def __init__( # pylint: disable=super-init-not-called self, name: str, version: Optional[str] = None, schema_url: Optional[str] = None, attributes: Optional[_ExtendedAttributes] = None, ): self._name = name self._version = version self._schema_url = schema_url self._attributes = attributes self._real_logger: Optional[Logger] = None self._noop_logger = NoOpLogger(name) @property def _logger(self) -> Logger: if self._real_logger: return self._real_logger if _LOGGER_PROVIDER: self._real_logger = _LOGGER_PROVIDER.get_logger( self._name, self._version, self._schema_url, self._attributes, ) return self._real_logger return self._noop_logger @overload def emit( self, *, timestamp: int | None = None, observed_timestamp: int | None = None, context: Context | None = None, severity_number: SeverityNumber | None = None, severity_text: str | None = None, body: AnyValue | None = None, attributes: _ExtendedAttributes | None = None, event_name: str | None = None, ) -> None: ... @overload def emit( # pylint:disable=arguments-differ self, record: LogRecord, ) -> None: ... def emit( self, record: LogRecord | None = None, *, timestamp: int | None = None, observed_timestamp: int | None = None, context: Context | None = None, severity_number: SeverityNumber | None = None, severity_text: str | None = None, body: AnyValue | None = None, attributes: _ExtendedAttributes | None = None, event_name: str | None = None, ) -> None: if record: self._logger.emit(record) else: self._logger.emit( timestamp=timestamp, observed_timestamp=observed_timestamp, context=context, severity_number=severity_number, severity_text=severity_text, body=body, attributes=attributes, event_name=event_name, ) class LoggerProvider(ABC): """ LoggerProvider is the entry point of the API. It provides access to Logger instances. """ @abstractmethod def get_logger( self, name: str, version: Optional[str] = None, schema_url: Optional[str] = None, attributes: Optional[_ExtendedAttributes] = None, ) -> Logger: """Returns a `Logger` for use by the given instrumentation library. For any two calls with identical parameters, it is undefined whether the same or different `Logger` instances are returned. This function may return different `Logger` types (e.g. a no-op logger vs. a functional logger). Args: name: The name of the instrumenting module, package or class. This should *not* be the name of the module, package or class that is instrumented but the name of the code doing the instrumentation. E.g., instead of ``"requests"``, use ``"opentelemetry.instrumentation.requests"``. For log sources which define a logger name (e.g. logging.Logger.name) the Logger Name should be recorded as the instrumentation scope name. version: Optional. The version string of the instrumenting library. Usually this should be the same as ``importlib.metadata.version(instrumenting_library_name)``. schema_url: Optional. Specifies the Schema URL of the emitted telemetry. attributes: Optional. Specifies the instrumentation scope attributes to associate with emitted telemetry. """ class NoOpLoggerProvider(LoggerProvider): """The default LoggerProvider used when no LoggerProvider implementation is available.""" def get_logger( self, name: str, version: Optional[str] = None, schema_url: Optional[str] = None, attributes: Optional[_ExtendedAttributes] = None, ) -> Logger: """Returns a NoOpLogger.""" return NoOpLogger( name, version=version, schema_url=schema_url, attributes=attributes ) class ProxyLoggerProvider(LoggerProvider): def get_logger( self, name: str, version: Optional[str] = None, schema_url: Optional[str] = None, attributes: Optional[_ExtendedAttributes] = None, ) -> Logger: if _LOGGER_PROVIDER: return _LOGGER_PROVIDER.get_logger( name, version=version, schema_url=schema_url, attributes=attributes, ) return ProxyLogger( name, version=version, schema_url=schema_url, attributes=attributes, ) _LOGGER_PROVIDER_SET_ONCE = Once() _LOGGER_PROVIDER: Optional[LoggerProvider] = None _PROXY_LOGGER_PROVIDER = ProxyLoggerProvider() def get_logger_provider() -> LoggerProvider: """Gets the current global :class:`~.LoggerProvider` object.""" global _LOGGER_PROVIDER # pylint: disable=global-variable-not-assigned if _LOGGER_PROVIDER is None: if _OTEL_PYTHON_LOGGER_PROVIDER not in environ: return _PROXY_LOGGER_PROVIDER logger_provider: LoggerProvider = _load_provider( # type: ignore _OTEL_PYTHON_LOGGER_PROVIDER, "logger_provider" ) _set_logger_provider(logger_provider, log=False) # _LOGGER_PROVIDER will have been set by one thread return cast("LoggerProvider", _LOGGER_PROVIDER) def _set_logger_provider(logger_provider: LoggerProvider, log: bool) -> None: def set_lp() -> None: global _LOGGER_PROVIDER # pylint: disable=global-statement _LOGGER_PROVIDER = logger_provider did_set = _LOGGER_PROVIDER_SET_ONCE.do_once(set_lp) if log and not did_set: _logger.warning("Overriding of current LoggerProvider is not allowed") def set_logger_provider(logger_provider: LoggerProvider) -> None: """Sets the current global :class:`~.LoggerProvider` object. This can only be done once, a warning will be logged if any further attempt is made. """ _set_logger_provider(logger_provider, log=True) def get_logger( instrumenting_module_name: str, instrumenting_library_version: str = "", logger_provider: Optional[LoggerProvider] = None, schema_url: Optional[str] = None, attributes: Optional[_ExtendedAttributes] = None, ) -> "Logger": """Returns a `Logger` for use within a python process. This function is a convenience wrapper for opentelemetry.sdk._logs.LoggerProvider.get_logger. If logger_provider param is omitted the current configured one is used. """ if logger_provider is None: logger_provider = get_logger_provider() return logger_provider.get_logger( instrumenting_module_name, instrumenting_library_version, schema_url, attributes, ) python-opentelemetry-1.39.1/opentelemetry-api/src/opentelemetry/_logs/py.typed000066400000000000000000000000001511654350100277370ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-api/src/opentelemetry/_logs/severity/000077500000000000000000000000001511654350100301245ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-api/src/opentelemetry/_logs/severity/__init__.py000066400000000000000000000027531511654350100322440ustar00rootroot00000000000000# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import enum class SeverityNumber(enum.Enum): """Numerical value of severity. Smaller numerical values correspond to less severe events (such as debug events), larger numerical values correspond to more severe events (such as errors and critical events). See the `Log Data Model`_ spec for more info and how to map the severity from source format to OTLP Model. .. _Log Data Model: https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/logs/data-model.md#field-severitynumber """ UNSPECIFIED = 0 TRACE = 1 TRACE2 = 2 TRACE3 = 3 TRACE4 = 4 DEBUG = 5 DEBUG2 = 6 DEBUG3 = 7 DEBUG4 = 8 INFO = 9 INFO2 = 10 INFO3 = 11 INFO4 = 12 WARN = 13 WARN2 = 14 WARN3 = 15 WARN4 = 16 ERROR = 17 ERROR2 = 18 ERROR3 = 19 ERROR4 = 20 FATAL = 21 FATAL2 = 22 FATAL3 = 23 FATAL4 = 24 python-opentelemetry-1.39.1/opentelemetry-api/src/opentelemetry/attributes/000077500000000000000000000000001511654350100273355ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-api/src/opentelemetry/attributes/__init__.py000066400000000000000000000262501511654350100314530ustar00rootroot00000000000000# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging import threading from collections import OrderedDict from collections.abc import MutableMapping from typing import Mapping, Optional, Sequence, Tuple, Union from opentelemetry.util import types # bytes are accepted as a user supplied value for attributes but # decoded to strings internally. _VALID_ATTR_VALUE_TYPES = (bool, str, bytes, int, float) # AnyValue possible values _VALID_ANY_VALUE_TYPES = ( type(None), bool, bytes, int, float, str, Sequence, Mapping, ) _logger = logging.getLogger(__name__) def _clean_attribute( key: str, value: types.AttributeValue, max_len: Optional[int] ) -> Optional[Union[types.AttributeValue, Tuple[Union[str, int, float], ...]]]: """Checks if attribute value is valid and cleans it if required. The function returns the cleaned value or None if the value is not valid. An attribute value is valid if it is either: - A primitive type: string, boolean, double precision floating point (IEEE 754-1985) or integer. - An array of primitive type values. The array MUST be homogeneous, i.e. it MUST NOT contain values of different types. An attribute needs cleansing if: - Its length is greater than the maximum allowed length. - It needs to be encoded/decoded e.g, bytes to strings. """ if not (key and isinstance(key, str)): _logger.warning("invalid key `%s`. must be non-empty string.", key) return None if isinstance(value, _VALID_ATTR_VALUE_TYPES): return _clean_attribute_value(value, max_len) if isinstance(value, Sequence): sequence_first_valid_type = None cleaned_seq = [] for element in value: element = _clean_attribute_value(element, max_len) # type: ignore if element is None: cleaned_seq.append(element) continue element_type = type(element) # Reject attribute value if sequence contains a value with an incompatible type. if element_type not in _VALID_ATTR_VALUE_TYPES: _logger.warning( "Invalid type %s in attribute '%s' value sequence. Expected one of " "%s or None", element_type.__name__, key, [ valid_type.__name__ for valid_type in _VALID_ATTR_VALUE_TYPES ], ) return None # The type of the sequence must be homogeneous. The first non-None # element determines the type of the sequence if sequence_first_valid_type is None: sequence_first_valid_type = element_type # use equality instead of isinstance as isinstance(True, int) evaluates to True elif element_type != sequence_first_valid_type: _logger.warning( "Attribute %r mixes types %s and %s in attribute value sequence", key, sequence_first_valid_type.__name__, type(element).__name__, ) return None cleaned_seq.append(element) # Freeze mutable sequences defensively return tuple(cleaned_seq) _logger.warning( "Invalid type %s for attribute '%s' value. Expected one of %s or a " "sequence of those types", type(value).__name__, key, [valid_type.__name__ for valid_type in _VALID_ATTR_VALUE_TYPES], ) return None def _clean_extended_attribute_value( # pylint: disable=too-many-branches value: types.AnyValue, max_len: Optional[int] ) -> types.AnyValue: # for primitive types just return the value and eventually shorten the string length if value is None or isinstance(value, _VALID_ATTR_VALUE_TYPES): if max_len is not None and isinstance(value, str): value = value[:max_len] return value if isinstance(value, Mapping): cleaned_dict: dict[str, types.AnyValue] = {} for key, element in value.items(): # skip invalid keys if not (key and isinstance(key, str)): _logger.warning( "invalid key `%s`. must be non-empty string.", key ) continue cleaned_dict[key] = _clean_extended_attribute( key=key, value=element, max_len=max_len ) return cleaned_dict if isinstance(value, Sequence): sequence_first_valid_type = None cleaned_seq: list[types.AnyValue] = [] for element in value: if element is None: cleaned_seq.append(element) continue if max_len is not None and isinstance(element, str): element = element[:max_len] element_type = type(element) if element_type not in _VALID_ATTR_VALUE_TYPES: element = _clean_extended_attribute_value( element, max_len=max_len ) element_type = type(element) # type: ignore # The type of the sequence must be homogeneous. The first non-None # element determines the type of the sequence if sequence_first_valid_type is None: sequence_first_valid_type = element_type # use equality instead of isinstance as isinstance(True, int) evaluates to True elif element_type != sequence_first_valid_type: _logger.warning( "Mixed types %s and %s in attribute value sequence", sequence_first_valid_type.__name__, type(element).__name__, ) return None cleaned_seq.append(element) # Freeze mutable sequences defensively return tuple(cleaned_seq) # Some applications such as Django add values to log records whose types fall outside the # primitive types and `_VALID_ANY_VALUE_TYPES`, i.e., they are not of type `AnyValue`. # Rather than attempt to whitelist every possible instrumentation, we stringify those values here # so they can still be represented as attributes, falling back to the original TypeError only if # converting to string raises. try: return str(value) except Exception: raise TypeError( f"Invalid type {type(value).__name__} for attribute value. " f"Expected one of {[valid_type.__name__ for valid_type in _VALID_ANY_VALUE_TYPES]} or a " "sequence of those types", ) def _clean_extended_attribute( key: str, value: types.AnyValue, max_len: Optional[int] ) -> types.AnyValue: """Checks if attribute value is valid and cleans it if required. The function returns the cleaned value or None if the value is not valid. An attribute value is valid if it is an AnyValue. An attribute needs cleansing if: - Its length is greater than the maximum allowed length. """ if not (key and isinstance(key, str)): _logger.warning("invalid key `%s`. must be non-empty string.", key) return None try: return _clean_extended_attribute_value(value, max_len=max_len) except TypeError as exception: _logger.warning("Attribute %s: %s", key, exception) return None def _clean_attribute_value( value: types.AttributeValue, limit: Optional[int] ) -> Optional[types.AttributeValue]: if value is None: return None if isinstance(value, bytes): try: value = value.decode() except UnicodeDecodeError: _logger.warning("Byte attribute could not be decoded.") return None if limit is not None and isinstance(value, str): value = value[:limit] return value class BoundedAttributes(MutableMapping): # type: ignore """An ordered dict with a fixed max capacity. Oldest elements are dropped when the dict is full and a new element is added. """ def __init__( self, maxlen: Optional[int] = None, attributes: Optional[types._ExtendedAttributes] = None, immutable: bool = True, max_value_len: Optional[int] = None, extended_attributes: bool = False, ): if maxlen is not None: if not isinstance(maxlen, int) or maxlen < 0: raise ValueError( "maxlen must be valid int greater or equal to 0" ) self.maxlen = maxlen self.dropped = 0 self.max_value_len = max_value_len self._extended_attributes = extended_attributes # OrderedDict is not used until the maxlen is reached for efficiency. self._dict: Union[ MutableMapping[str, types.AnyValue], OrderedDict[str, types.AnyValue], ] = {} self._lock = threading.RLock() if attributes: for key, value in attributes.items(): self[key] = value self._immutable = immutable def __repr__(self) -> str: return f"{dict(self._dict)}" def __getitem__(self, key: str) -> types.AnyValue: return self._dict[key] def __setitem__(self, key: str, value: types.AnyValue) -> None: if getattr(self, "_immutable", False): # type: ignore raise TypeError with self._lock: if self.maxlen is not None and self.maxlen == 0: self.dropped += 1 return if self._extended_attributes: value = _clean_extended_attribute( key, value, self.max_value_len ) else: value = _clean_attribute(key, value, self.max_value_len) # type: ignore if value is None: return if key in self._dict: del self._dict[key] elif self.maxlen is not None and len(self._dict) == self.maxlen: if not isinstance(self._dict, OrderedDict): self._dict = OrderedDict(self._dict) self._dict.popitem(last=False) # type: ignore self.dropped += 1 self._dict[key] = value # type: ignore def __delitem__(self, key: str) -> None: if getattr(self, "_immutable", False): # type: ignore raise TypeError with self._lock: del self._dict[key] def __iter__(self): # type: ignore with self._lock: return iter(self._dict.copy()) # type: ignore def __len__(self) -> int: return len(self._dict) def copy(self): # type: ignore return self._dict.copy() # type: ignore python-opentelemetry-1.39.1/opentelemetry-api/src/opentelemetry/attributes/py.typed000066400000000000000000000000001511654350100310220ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-api/src/opentelemetry/baggage/000077500000000000000000000000001511654350100265245ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-api/src/opentelemetry/baggage/__init__.py000066400000000000000000000076731511654350100306520ustar00rootroot00000000000000# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from logging import getLogger from re import compile from types import MappingProxyType from typing import Dict, Mapping, Optional from opentelemetry.context import create_key, get_value, set_value from opentelemetry.context.context import Context from opentelemetry.util.re import ( _BAGGAGE_PROPERTY_FORMAT, _KEY_FORMAT, _VALUE_FORMAT, ) _BAGGAGE_KEY = create_key("baggage") _logger = getLogger(__name__) _KEY_PATTERN = compile(_KEY_FORMAT) _VALUE_PATTERN = compile(_VALUE_FORMAT) _PROPERT_PATTERN = compile(_BAGGAGE_PROPERTY_FORMAT) def get_all( context: Optional[Context] = None, ) -> Mapping[str, object]: """Returns the name/value pairs in the Baggage Args: context: The Context to use. If not set, uses current Context Returns: The name/value pairs in the Baggage """ return MappingProxyType(_get_baggage_value(context=context)) def get_baggage( name: str, context: Optional[Context] = None ) -> Optional[object]: """Provides access to the value for a name/value pair in the Baggage Args: name: The name of the value to retrieve context: The Context to use. If not set, uses current Context Returns: The value associated with the given name, or null if the given name is not present. """ return _get_baggage_value(context=context).get(name) def set_baggage( name: str, value: object, context: Optional[Context] = None ) -> Context: """Sets a value in the Baggage Args: name: The name of the value to set value: The value to set context: The Context to use. If not set, uses current Context Returns: A Context with the value updated """ baggage = _get_baggage_value(context=context).copy() baggage[name] = value return set_value(_BAGGAGE_KEY, baggage, context=context) def remove_baggage(name: str, context: Optional[Context] = None) -> Context: """Removes a value from the Baggage Args: name: The name of the value to remove context: The Context to use. If not set, uses current Context Returns: A Context with the name/value removed """ baggage = _get_baggage_value(context=context).copy() baggage.pop(name, None) return set_value(_BAGGAGE_KEY, baggage, context=context) def clear(context: Optional[Context] = None) -> Context: """Removes all values from the Baggage Args: context: The Context to use. If not set, uses current Context Returns: A Context with all baggage entries removed """ return set_value(_BAGGAGE_KEY, {}, context=context) def _get_baggage_value(context: Optional[Context] = None) -> Dict[str, object]: baggage = get_value(_BAGGAGE_KEY, context=context) if isinstance(baggage, dict): return baggage return {} def _is_valid_key(name: str) -> bool: return _KEY_PATTERN.fullmatch(str(name)) is not None def _is_valid_value(value: object) -> bool: parts = str(value).split(";") is_valid_value = _VALUE_PATTERN.fullmatch(parts[0]) is not None if len(parts) > 1: # one or more properties metadata for property in parts[1:]: if _PROPERT_PATTERN.fullmatch(property) is None: is_valid_value = False break return is_valid_value def _is_valid_pair(key: str, value: str) -> bool: return _is_valid_key(key) and _is_valid_value(value) python-opentelemetry-1.39.1/opentelemetry-api/src/opentelemetry/baggage/propagation/000077500000000000000000000000001511654350100310475ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-api/src/opentelemetry/baggage/propagation/__init__.py000066400000000000000000000111171511654350100331610ustar00rootroot00000000000000# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from logging import getLogger from re import split from typing import Iterable, List, Mapping, Optional, Set from urllib.parse import quote_plus, unquote_plus from opentelemetry.baggage import _is_valid_pair, get_all, set_baggage from opentelemetry.context import get_current from opentelemetry.context.context import Context from opentelemetry.propagators import textmap from opentelemetry.util.re import _DELIMITER_PATTERN _logger = getLogger(__name__) class W3CBaggagePropagator(textmap.TextMapPropagator): """Extracts and injects Baggage which is used to annotate telemetry.""" _MAX_HEADER_LENGTH = 8192 _MAX_PAIR_LENGTH = 4096 _MAX_PAIRS = 180 _BAGGAGE_HEADER_NAME = "baggage" def extract( self, carrier: textmap.CarrierT, context: Optional[Context] = None, getter: textmap.Getter[textmap.CarrierT] = textmap.default_getter, ) -> Context: """Extract Baggage from the carrier. See `opentelemetry.propagators.textmap.TextMapPropagator.extract` """ if context is None: context = get_current() header = _extract_first_element( getter.get(carrier, self._BAGGAGE_HEADER_NAME) ) if not header: return context if len(header) > self._MAX_HEADER_LENGTH: _logger.warning( "Baggage header `%s` exceeded the maximum number of bytes per baggage-string", header, ) return context baggage_entries: List[str] = split(_DELIMITER_PATTERN, header) total_baggage_entries = self._MAX_PAIRS if len(baggage_entries) > self._MAX_PAIRS: _logger.warning( "Baggage header `%s` exceeded the maximum number of list-members", header, ) for entry in baggage_entries: if len(entry) > self._MAX_PAIR_LENGTH: _logger.warning( "Baggage entry `%s` exceeded the maximum number of bytes per list-member", entry, ) continue if not entry: # empty string continue try: name, value = entry.split("=", 1) except Exception: # pylint: disable=broad-exception-caught _logger.warning( "Baggage list-member `%s` doesn't match the format", entry ) continue if not _is_valid_pair(name, value): _logger.warning("Invalid baggage entry: `%s`", entry) continue name = unquote_plus(name).strip() value = unquote_plus(value).strip() context = set_baggage( name, value, context=context, ) total_baggage_entries -= 1 if total_baggage_entries == 0: break return context def inject( self, carrier: textmap.CarrierT, context: Optional[Context] = None, setter: textmap.Setter[textmap.CarrierT] = textmap.default_setter, ) -> None: """Injects Baggage into the carrier. See `opentelemetry.propagators.textmap.TextMapPropagator.inject` """ baggage_entries = get_all(context=context) if not baggage_entries: return baggage_string = _format_baggage(baggage_entries) setter.set(carrier, self._BAGGAGE_HEADER_NAME, baggage_string) @property def fields(self) -> Set[str]: """Returns a set with the fields set in `inject`.""" return {self._BAGGAGE_HEADER_NAME} def _format_baggage(baggage_entries: Mapping[str, object]) -> str: return ",".join( quote_plus(str(key)) + "=" + quote_plus(str(value)) for key, value in baggage_entries.items() ) def _extract_first_element( items: Optional[Iterable[textmap.CarrierT]], ) -> Optional[textmap.CarrierT]: if items is None: return None return next(iter(items), None) python-opentelemetry-1.39.1/opentelemetry-api/src/opentelemetry/baggage/py.typed000066400000000000000000000000001511654350100302110ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-api/src/opentelemetry/context/000077500000000000000000000000001511654350100266335ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-api/src/opentelemetry/context/__init__.py000066400000000000000000000127141511654350100307510ustar00rootroot00000000000000# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import annotations import logging import typing from contextvars import Token from os import environ from uuid import uuid4 # pylint: disable=wrong-import-position from opentelemetry.context.context import Context, _RuntimeContext # noqa from opentelemetry.environment_variables import OTEL_PYTHON_CONTEXT from opentelemetry.util._importlib_metadata import entry_points logger = logging.getLogger(__name__) def _load_runtime_context() -> _RuntimeContext: """Initialize the RuntimeContext Returns: An instance of RuntimeContext. """ # FIXME use a better implementation of a configuration manager # to avoid having to get configuration values straight from # environment variables default_context = "contextvars_context" configured_context = environ.get(OTEL_PYTHON_CONTEXT, default_context) # type: str try: return next( # type: ignore iter( # type: ignore entry_points( # type: ignore group="opentelemetry_context", name=configured_context, ) ) ).load()() except Exception: # pylint: disable=broad-exception-caught logger.exception( "Failed to load context: %s, fallback to %s", configured_context, default_context, ) return next( # type: ignore iter( # type: ignore entry_points( # type: ignore group="opentelemetry_context", name=default_context, ) ) ).load()() _RUNTIME_CONTEXT = _load_runtime_context() def create_key(keyname: str) -> str: """To allow cross-cutting concern to control access to their local state, the RuntimeContext API provides a function which takes a keyname as input, and returns a unique key. Args: keyname: The key name is for debugging purposes and is not required to be unique. Returns: A unique string representing the newly created key. """ return keyname + "-" + str(uuid4()) def get_value(key: str, context: typing.Optional[Context] = None) -> "object": """To access the local state of a concern, the RuntimeContext API provides a function which takes a context and a key as input, and returns a value. Args: key: The key of the value to retrieve. context: The context from which to retrieve the value, if None, the current context is used. Returns: The value associated with the key. """ return context.get(key) if context is not None else get_current().get(key) def set_value( key: str, value: "object", context: typing.Optional[Context] = None ) -> Context: """To record the local state of a cross-cutting concern, the RuntimeContext API provides a function which takes a context, a key, and a value as input, and returns an updated context which contains the new value. Args: key: The key of the entry to set. value: The value of the entry to set. context: The context to copy, if None, the current context is used. Returns: A new `Context` containing the value set. """ if context is None: context = get_current() new_values = context.copy() new_values[key] = value return Context(new_values) def get_current() -> Context: """To access the context associated with program execution, the Context API provides a function which takes no arguments and returns a Context. Returns: The current `Context` object. """ return _RUNTIME_CONTEXT.get_current() def attach(context: Context) -> Token[Context]: """Associates a Context with the caller's current execution unit. Returns a token that can be used to restore the previous Context. Args: context: The Context to set as current. Returns: A token that can be used with `detach` to reset the context. """ return _RUNTIME_CONTEXT.attach(context) def detach(token: Token[Context]) -> None: """Resets the Context associated with the caller's current execution unit to the value it had before attaching a specified Context. Args: token: The Token that was returned by a previous call to attach a Context. """ try: _RUNTIME_CONTEXT.detach(token) except Exception: # pylint: disable=broad-exception-caught logger.exception("Failed to detach context") # FIXME This is a temporary location for the suppress instrumentation key. # Once the decision around how to suppress instrumentation is made in the # spec, this key should be moved accordingly. _SUPPRESS_INSTRUMENTATION_KEY = create_key("suppress_instrumentation") _SUPPRESS_HTTP_INSTRUMENTATION_KEY = create_key( "suppress_http_instrumentation" ) __all__ = [ "Context", "attach", "create_key", "detach", "get_current", "get_value", "set_value", ] python-opentelemetry-1.39.1/opentelemetry-api/src/opentelemetry/context/context.py000066400000000000000000000032621511654350100306740ustar00rootroot00000000000000# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import annotations import typing from abc import ABC, abstractmethod from contextvars import Token class Context(typing.Dict[str, object]): def __setitem__(self, key: str, value: object) -> None: raise ValueError class _RuntimeContext(ABC): """The RuntimeContext interface provides a wrapper for the different mechanisms that are used to propagate context in Python. Implementations can be made available via entry_points and selected through environment variables. """ @abstractmethod def attach(self, context: Context) -> Token[Context]: """Sets the current `Context` object. Returns a token that can be used to reset to the previous `Context`. Args: context: The Context to set. """ @abstractmethod def get_current(self) -> Context: """Returns the current `Context` object.""" @abstractmethod def detach(self, token: Token[Context]) -> None: """Resets Context to a previous value Args: token: A reference to a previous Context. """ __all__ = ["Context"] python-opentelemetry-1.39.1/opentelemetry-api/src/opentelemetry/context/contextvars_context.py000066400000000000000000000034451511654350100333370ustar00rootroot00000000000000# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import annotations from contextvars import ContextVar, Token from opentelemetry.context.context import Context, _RuntimeContext class ContextVarsRuntimeContext(_RuntimeContext): """An implementation of the RuntimeContext interface which wraps ContextVar under the hood. This is the preferred implementation for usage with Python 3.5+ """ _CONTEXT_KEY = "current_context" def __init__(self) -> None: self._current_context = ContextVar( self._CONTEXT_KEY, default=Context() ) def attach(self, context: Context) -> Token[Context]: """Sets the current `Context` object. Returns a token that can be used to reset to the previous `Context`. Args: context: The Context to set. """ return self._current_context.set(context) def get_current(self) -> Context: """Returns the current `Context` object.""" return self._current_context.get() def detach(self, token: Token[Context]) -> None: """Resets Context to a previous value Args: token: A reference to a previous Context. """ self._current_context.reset(token) __all__ = ["ContextVarsRuntimeContext"] python-opentelemetry-1.39.1/opentelemetry-api/src/opentelemetry/context/py.typed000066400000000000000000000000001511654350100303200ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-api/src/opentelemetry/environment_variables/000077500000000000000000000000001511654350100315435ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-api/src/opentelemetry/environment_variables/__init__.py000066400000000000000000000046771511654350100336720ustar00rootroot00000000000000# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. OTEL_LOGS_EXPORTER = "OTEL_LOGS_EXPORTER" """ .. envvar:: OTEL_LOGS_EXPORTER """ OTEL_METRICS_EXPORTER = "OTEL_METRICS_EXPORTER" """ .. envvar:: OTEL_METRICS_EXPORTER Specifies which exporter is used for metrics. See `General SDK Configuration `_. **Default value:** ``"otlp"`` **Example:** ``export OTEL_METRICS_EXPORTER="prometheus"`` Accepted values for ``OTEL_METRICS_EXPORTER`` are: - ``"otlp"`` - ``"prometheus"`` - ``"none"``: No automatically configured exporter for metrics. .. note:: Exporter packages may add entry points for group ``opentelemetry_metrics_exporter`` which can then be used with this environment variable by name. The entry point should point to either a `opentelemetry.sdk.metrics.export.MetricExporter` (push exporter) or `opentelemetry.sdk.metrics.export.MetricReader` (pull exporter) subclass; it must be constructable without any required arguments. This mechanism is considered experimental and may change in subsequent releases. """ OTEL_PROPAGATORS = "OTEL_PROPAGATORS" """ .. envvar:: OTEL_PROPAGATORS """ OTEL_PYTHON_CONTEXT = "OTEL_PYTHON_CONTEXT" """ .. envvar:: OTEL_PYTHON_CONTEXT """ OTEL_PYTHON_ID_GENERATOR = "OTEL_PYTHON_ID_GENERATOR" """ .. envvar:: OTEL_PYTHON_ID_GENERATOR """ OTEL_TRACES_EXPORTER = "OTEL_TRACES_EXPORTER" """ .. envvar:: OTEL_TRACES_EXPORTER """ OTEL_PYTHON_TRACER_PROVIDER = "OTEL_PYTHON_TRACER_PROVIDER" """ .. envvar:: OTEL_PYTHON_TRACER_PROVIDER """ OTEL_PYTHON_METER_PROVIDER = "OTEL_PYTHON_METER_PROVIDER" """ .. envvar:: OTEL_PYTHON_METER_PROVIDER """ _OTEL_PYTHON_LOGGER_PROVIDER = "OTEL_PYTHON_LOGGER_PROVIDER" """ .. envvar:: OTEL_PYTHON_LOGGER_PROVIDER """ _OTEL_PYTHON_EVENT_LOGGER_PROVIDER = "OTEL_PYTHON_EVENT_LOGGER_PROVIDER" """ .. envvar:: OTEL_PYTHON_EVENT_LOGGER_PROVIDER """ python-opentelemetry-1.39.1/opentelemetry-api/src/opentelemetry/environment_variables/py.typed000066400000000000000000000000001511654350100332300ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-api/src/opentelemetry/metrics/000077500000000000000000000000001511654350100266155ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-api/src/opentelemetry/metrics/__init__.py000066400000000000000000000067701511654350100307400ustar00rootroot00000000000000# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ The OpenTelemetry metrics API describes the classes used to generate metrics. The :class:`.MeterProvider` provides users access to the :class:`.Meter` which in turn is used to create :class:`.Instrument` objects. The :class:`.Instrument` objects are used to record measurements. This module provides abstract (i.e. unimplemented) classes required for metrics, and a concrete no-op implementation :class:`.NoOpMeter` that allows applications to use the API package alone without a supporting implementation. To get a meter, you need to provide the package name from which you are calling the meter APIs to OpenTelemetry by calling `MeterProvider.get_meter` with the calling instrumentation name and the version of your package. The following code shows how to obtain a meter using the global :class:`.MeterProvider`:: from opentelemetry.metrics import get_meter meter = get_meter("example-meter") counter = meter.create_counter("example-counter") .. versionadded:: 1.10.0 .. versionchanged:: 1.12.0rc """ from opentelemetry.metrics._internal import ( Meter, MeterProvider, NoOpMeter, NoOpMeterProvider, get_meter, get_meter_provider, set_meter_provider, ) from opentelemetry.metrics._internal.instrument import ( Asynchronous, CallbackOptions, CallbackT, Counter, Histogram, Instrument, NoOpCounter, NoOpHistogram, NoOpObservableCounter, NoOpObservableGauge, NoOpObservableUpDownCounter, NoOpUpDownCounter, ObservableCounter, ObservableGauge, ObservableUpDownCounter, Synchronous, UpDownCounter, ) from opentelemetry.metrics._internal.instrument import Gauge as _Gauge from opentelemetry.metrics._internal.instrument import NoOpGauge as _NoOpGauge from opentelemetry.metrics._internal.observation import Observation for obj in [ Counter, Synchronous, Asynchronous, CallbackOptions, _Gauge, _NoOpGauge, get_meter_provider, get_meter, Histogram, Meter, MeterProvider, Instrument, NoOpCounter, NoOpHistogram, NoOpMeter, NoOpMeterProvider, NoOpObservableCounter, NoOpObservableGauge, NoOpObservableUpDownCounter, NoOpUpDownCounter, ObservableCounter, ObservableGauge, ObservableUpDownCounter, Observation, set_meter_provider, UpDownCounter, ]: obj.__module__ = __name__ __all__ = [ "CallbackOptions", "MeterProvider", "NoOpMeterProvider", "Meter", "Counter", "_Gauge", "_NoOpGauge", "NoOpCounter", "UpDownCounter", "NoOpUpDownCounter", "Histogram", "NoOpHistogram", "ObservableCounter", "NoOpObservableCounter", "ObservableUpDownCounter", "Instrument", "Synchronous", "Asynchronous", "NoOpObservableGauge", "ObservableGauge", "NoOpObservableUpDownCounter", "get_meter", "get_meter_provider", "set_meter_provider", "Observation", "CallbackT", "NoOpMeter", ] python-opentelemetry-1.39.1/opentelemetry-api/src/opentelemetry/metrics/_internal/000077500000000000000000000000001511654350100305705ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-api/src/opentelemetry/metrics/_internal/__init__.py000066400000000000000000000740031511654350100327050ustar00rootroot00000000000000# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # pylint: disable=too-many-ancestors """ The OpenTelemetry metrics API describes the classes used to generate metrics. The :class:`.MeterProvider` provides users access to the :class:`.Meter` which in turn is used to create :class:`.Instrument` objects. The :class:`.Instrument` objects are used to record measurements. This module provides abstract (i.e. unimplemented) classes required for metrics, and a concrete no-op implementation :class:`.NoOpMeter` that allows applications to use the API package alone without a supporting implementation. To get a meter, you need to provide the package name from which you are calling the meter APIs to OpenTelemetry by calling `MeterProvider.get_meter` with the calling instrumentation name and the version of your package. The following code shows how to obtain a meter using the global :class:`.MeterProvider`:: from opentelemetry.metrics import get_meter meter = get_meter("example-meter") counter = meter.create_counter("example-counter") .. versionadded:: 1.10.0 """ import warnings from abc import ABC, abstractmethod from dataclasses import dataclass from logging import getLogger from os import environ from threading import Lock from typing import Dict, List, Optional, Sequence, Union, cast from opentelemetry.environment_variables import OTEL_PYTHON_METER_PROVIDER from opentelemetry.metrics._internal.instrument import ( CallbackT, Counter, Gauge, Histogram, NoOpCounter, NoOpGauge, NoOpHistogram, NoOpObservableCounter, NoOpObservableGauge, NoOpObservableUpDownCounter, NoOpUpDownCounter, ObservableCounter, ObservableGauge, ObservableUpDownCounter, UpDownCounter, _MetricsHistogramAdvisory, _ProxyCounter, _ProxyGauge, _ProxyHistogram, _ProxyObservableCounter, _ProxyObservableGauge, _ProxyObservableUpDownCounter, _ProxyUpDownCounter, ) from opentelemetry.util._once import Once from opentelemetry.util._providers import _load_provider from opentelemetry.util.types import ( Attributes, ) _logger = getLogger(__name__) # pylint: disable=invalid-name _ProxyInstrumentT = Union[ _ProxyCounter, _ProxyHistogram, _ProxyGauge, _ProxyObservableCounter, _ProxyObservableGauge, _ProxyObservableUpDownCounter, _ProxyUpDownCounter, ] class MeterProvider(ABC): """ MeterProvider is the entry point of the API. It provides access to `Meter` instances. """ @abstractmethod def get_meter( self, name: str, version: Optional[str] = None, schema_url: Optional[str] = None, attributes: Optional[Attributes] = None, ) -> "Meter": """Returns a `Meter` for use by the given instrumentation library. For any two calls it is undefined whether the same or different `Meter` instances are returned, even for different library names. This function may return different `Meter` types (e.g. a no-op meter vs. a functional meter). Args: name: The name of the instrumenting module. ``__name__`` may not be used as this can result in different meter names if the meters are in different files. It is better to use a fixed string that can be imported where needed and used consistently as the name of the meter. This should *not* be the name of the module that is instrumented but the name of the module doing the instrumentation. E.g., instead of ``"requests"``, use ``"opentelemetry.instrumentation.requests"``. version: Optional. The version string of the instrumenting library. Usually this should be the same as ``importlib.metadata.version(instrumenting_library_name)``. schema_url: Optional. Specifies the Schema URL of the emitted telemetry. attributes: Optional. Attributes that are associated with the emitted telemetry. """ class NoOpMeterProvider(MeterProvider): """The default MeterProvider used when no MeterProvider implementation is available.""" def get_meter( self, name: str, version: Optional[str] = None, schema_url: Optional[str] = None, attributes: Optional[Attributes] = None, ) -> "Meter": """Returns a NoOpMeter.""" return NoOpMeter(name, version=version, schema_url=schema_url) class _ProxyMeterProvider(MeterProvider): def __init__(self) -> None: self._lock = Lock() self._meters: List[_ProxyMeter] = [] self._real_meter_provider: Optional[MeterProvider] = None def get_meter( self, name: str, version: Optional[str] = None, schema_url: Optional[str] = None, attributes: Optional[Attributes] = None, ) -> "Meter": with self._lock: if self._real_meter_provider is not None: return self._real_meter_provider.get_meter( name, version, schema_url ) meter = _ProxyMeter(name, version=version, schema_url=schema_url) self._meters.append(meter) return meter def on_set_meter_provider(self, meter_provider: MeterProvider) -> None: with self._lock: self._real_meter_provider = meter_provider for meter in self._meters: meter.on_set_meter_provider(meter_provider) @dataclass class _InstrumentRegistrationStatus: instrument_id: str already_registered: bool conflict: bool current_advisory: Optional[_MetricsHistogramAdvisory] class Meter(ABC): """Handles instrument creation. This class provides methods for creating instruments which are then used to produce measurements. """ def __init__( self, name: str, version: Optional[str] = None, schema_url: Optional[str] = None, ) -> None: super().__init__() self._name = name self._version = version self._schema_url = schema_url self._instrument_ids: Dict[ str, Optional[_MetricsHistogramAdvisory] ] = {} self._instrument_ids_lock = Lock() @property def name(self) -> str: """ The name of the instrumenting module. """ return self._name @property def version(self) -> Optional[str]: """ The version string of the instrumenting library. """ return self._version @property def schema_url(self) -> Optional[str]: """ Specifies the Schema URL of the emitted telemetry """ return self._schema_url def _register_instrument( self, name: str, type_: type, unit: str, description: str, advisory: Optional[_MetricsHistogramAdvisory] = None, ) -> _InstrumentRegistrationStatus: """ Register an instrument with the name, type, unit and description as identifying keys and the advisory as value. Returns a tuple. The first value is the instrument id. The second value is an `_InstrumentRegistrationStatus` where `already_registered` is `True` if the instrument has been registered already. If `conflict` is set to True the `current_advisory` attribute contains the registered instrument advisory. """ instrument_id = ",".join( [name.strip().lower(), type_.__name__, unit, description] ) already_registered = False conflict = False current_advisory = None with self._instrument_ids_lock: # we are not using get because None is a valid value already_registered = instrument_id in self._instrument_ids if already_registered: current_advisory = self._instrument_ids[instrument_id] conflict = current_advisory != advisory else: self._instrument_ids[instrument_id] = advisory return _InstrumentRegistrationStatus( instrument_id=instrument_id, already_registered=already_registered, conflict=conflict, current_advisory=current_advisory, ) @staticmethod def _log_instrument_registration_conflict( name: str, instrumentation_type: str, unit: str, description: str, status: _InstrumentRegistrationStatus, ) -> None: _logger.warning( "An instrument with name %s, type %s, unit %s and " "description %s has been created already with a " "different advisory value %s and will be used instead.", name, instrumentation_type, unit, description, status.current_advisory, ) @abstractmethod def create_counter( self, name: str, unit: str = "", description: str = "", ) -> Counter: """Creates a `Counter` instrument Args: name: The name of the instrument to be created unit: The unit for observations this instrument reports. For example, ``By`` for bytes. UCUM units are recommended. description: A description for this instrument and what it measures. """ @abstractmethod def create_up_down_counter( self, name: str, unit: str = "", description: str = "", ) -> UpDownCounter: """Creates an `UpDownCounter` instrument Args: name: The name of the instrument to be created unit: The unit for observations this instrument reports. For example, ``By`` for bytes. UCUM units are recommended. description: A description for this instrument and what it measures. """ @abstractmethod def create_observable_counter( self, name: str, callbacks: Optional[Sequence[CallbackT]] = None, unit: str = "", description: str = "", ) -> ObservableCounter: """Creates an `ObservableCounter` instrument An observable counter observes a monotonically increasing count by calling provided callbacks which accept a :class:`~opentelemetry.metrics.CallbackOptions` and return multiple :class:`~opentelemetry.metrics.Observation`. For example, an observable counter could be used to report system CPU time periodically. Here is a basic implementation:: def cpu_time_callback(options: CallbackOptions) -> Iterable[Observation]: observations = [] with open("/proc/stat") as procstat: procstat.readline() # skip the first line for line in procstat: if not line.startswith("cpu"): break cpu, *states = line.split() observations.append(Observation(int(states[0]) // 100, {"cpu": cpu, "state": "user"})) observations.append(Observation(int(states[1]) // 100, {"cpu": cpu, "state": "nice"})) observations.append(Observation(int(states[2]) // 100, {"cpu": cpu, "state": "system"})) # ... other states return observations meter.create_observable_counter( "system.cpu.time", callbacks=[cpu_time_callback], unit="s", description="CPU time" ) To reduce memory usage, you can use generator callbacks instead of building the full list:: def cpu_time_callback(options: CallbackOptions) -> Iterable[Observation]: with open("/proc/stat") as procstat: procstat.readline() # skip the first line for line in procstat: if not line.startswith("cpu"): break cpu, *states = line.split() yield Observation(int(states[0]) // 100, {"cpu": cpu, "state": "user"}) yield Observation(int(states[1]) // 100, {"cpu": cpu, "state": "nice"}) # ... other states Alternatively, you can pass a sequence of generators directly instead of a sequence of callbacks, which each should return iterables of :class:`~opentelemetry.metrics.Observation`:: def cpu_time_callback(states_to_include: set[str]) -> Iterable[Iterable[Observation]]: # accept options sent in from OpenTelemetry options = yield while True: observations = [] with open("/proc/stat") as procstat: procstat.readline() # skip the first line for line in procstat: if not line.startswith("cpu"): break cpu, *states = line.split() if "user" in states_to_include: observations.append(Observation(int(states[0]) // 100, {"cpu": cpu, "state": "user"})) if "nice" in states_to_include: observations.append(Observation(int(states[1]) // 100, {"cpu": cpu, "state": "nice"})) # ... other states # yield the observations and receive the options for next iteration options = yield observations meter.create_observable_counter( "system.cpu.time", callbacks=[cpu_time_callback({"user", "system"})], unit="s", description="CPU time" ) The :class:`~opentelemetry.metrics.CallbackOptions` contain a timeout which the callback should respect. For example if the callback does asynchronous work, like making HTTP requests, it should respect the timeout:: def scrape_http_callback(options: CallbackOptions) -> Iterable[Observation]: r = requests.get('http://scrapethis.com', timeout=options.timeout_millis / 10**3) for value in r.json(): yield Observation(value) Args: name: The name of the instrument to be created callbacks: A sequence of callbacks that return an iterable of :class:`~opentelemetry.metrics.Observation`. Alternatively, can be a sequence of generators that each yields iterables of :class:`~opentelemetry.metrics.Observation`. unit: The unit for observations this instrument reports. For example, ``By`` for bytes. UCUM units are recommended. description: A description for this instrument and what it measures. """ @abstractmethod def create_histogram( self, name: str, unit: str = "", description: str = "", *, explicit_bucket_boundaries_advisory: Optional[Sequence[float]] = None, ) -> Histogram: """Creates a :class:`~opentelemetry.metrics.Histogram` instrument Args: name: The name of the instrument to be created unit: The unit for observations this instrument reports. For example, ``By`` for bytes. UCUM units are recommended. description: A description for this instrument and what it measures. """ def create_gauge( # type: ignore # pylint: disable=no-self-use self, name: str, unit: str = "", description: str = "", ) -> Gauge: # pyright: ignore[reportReturnType] """Creates a ``Gauge`` instrument Args: name: The name of the instrument to be created unit: The unit for observations this instrument reports. For example, ``By`` for bytes. UCUM units are recommended. description: A description for this instrument and what it measures. """ warnings.warn("create_gauge() is not implemented and will be a no-op") @abstractmethod def create_observable_gauge( self, name: str, callbacks: Optional[Sequence[CallbackT]] = None, unit: str = "", description: str = "", ) -> ObservableGauge: """Creates an `ObservableGauge` instrument Args: name: The name of the instrument to be created callbacks: A sequence of callbacks that return an iterable of :class:`~opentelemetry.metrics.Observation`. Alternatively, can be a generator that yields iterables of :class:`~opentelemetry.metrics.Observation`. unit: The unit for observations this instrument reports. For example, ``By`` for bytes. UCUM units are recommended. description: A description for this instrument and what it measures. """ @abstractmethod def create_observable_up_down_counter( self, name: str, callbacks: Optional[Sequence[CallbackT]] = None, unit: str = "", description: str = "", ) -> ObservableUpDownCounter: """Creates an `ObservableUpDownCounter` instrument Args: name: The name of the instrument to be created callbacks: A sequence of callbacks that return an iterable of :class:`~opentelemetry.metrics.Observation`. Alternatively, can be a generator that yields iterables of :class:`~opentelemetry.metrics.Observation`. unit: The unit for observations this instrument reports. For example, ``By`` for bytes. UCUM units are recommended. description: A description for this instrument and what it measures. """ class _ProxyMeter(Meter): def __init__( self, name: str, version: Optional[str] = None, schema_url: Optional[str] = None, ) -> None: super().__init__(name, version=version, schema_url=schema_url) self._lock = Lock() self._instruments: List[_ProxyInstrumentT] = [] self._real_meter: Optional[Meter] = None def on_set_meter_provider(self, meter_provider: MeterProvider) -> None: """Called when a real meter provider is set on the creating _ProxyMeterProvider Creates a real backing meter for this instance and notifies all created instruments so they can create real backing instruments. """ real_meter = meter_provider.get_meter( self._name, self._version, self._schema_url ) with self._lock: self._real_meter = real_meter # notify all proxy instruments of the new meter so they can create # real instruments to back themselves for instrument in self._instruments: instrument.on_meter_set(real_meter) def create_counter( self, name: str, unit: str = "", description: str = "", ) -> Counter: with self._lock: if self._real_meter: return self._real_meter.create_counter(name, unit, description) proxy = _ProxyCounter(name, unit, description) self._instruments.append(proxy) return proxy def create_up_down_counter( self, name: str, unit: str = "", description: str = "", ) -> UpDownCounter: with self._lock: if self._real_meter: return self._real_meter.create_up_down_counter( name, unit, description ) proxy = _ProxyUpDownCounter(name, unit, description) self._instruments.append(proxy) return proxy def create_observable_counter( self, name: str, callbacks: Optional[Sequence[CallbackT]] = None, unit: str = "", description: str = "", ) -> ObservableCounter: with self._lock: if self._real_meter: return self._real_meter.create_observable_counter( name, callbacks, unit, description ) proxy = _ProxyObservableCounter( name, callbacks, unit=unit, description=description ) self._instruments.append(proxy) return proxy def create_histogram( self, name: str, unit: str = "", description: str = "", *, explicit_bucket_boundaries_advisory: Optional[Sequence[float]] = None, ) -> Histogram: with self._lock: if self._real_meter: return self._real_meter.create_histogram( name, unit, description, explicit_bucket_boundaries_advisory=explicit_bucket_boundaries_advisory, ) proxy = _ProxyHistogram( name, unit, description, explicit_bucket_boundaries_advisory ) self._instruments.append(proxy) return proxy def create_gauge( self, name: str, unit: str = "", description: str = "", ) -> Gauge: with self._lock: if self._real_meter: return self._real_meter.create_gauge(name, unit, description) proxy = _ProxyGauge(name, unit, description) self._instruments.append(proxy) return proxy def create_observable_gauge( self, name: str, callbacks: Optional[Sequence[CallbackT]] = None, unit: str = "", description: str = "", ) -> ObservableGauge: with self._lock: if self._real_meter: return self._real_meter.create_observable_gauge( name, callbacks, unit, description ) proxy = _ProxyObservableGauge( name, callbacks, unit=unit, description=description ) self._instruments.append(proxy) return proxy def create_observable_up_down_counter( self, name: str, callbacks: Optional[Sequence[CallbackT]] = None, unit: str = "", description: str = "", ) -> ObservableUpDownCounter: with self._lock: if self._real_meter: return self._real_meter.create_observable_up_down_counter( name, callbacks, unit, description, ) proxy = _ProxyObservableUpDownCounter( name, callbacks, unit=unit, description=description ) self._instruments.append(proxy) return proxy class NoOpMeter(Meter): """The default Meter used when no Meter implementation is available. All operations are no-op. """ def create_counter( self, name: str, unit: str = "", description: str = "", ) -> Counter: """Returns a no-op Counter.""" status = self._register_instrument( name, NoOpCounter, unit, description ) if status.conflict: self._log_instrument_registration_conflict( name, Counter.__name__, unit, description, status, ) return NoOpCounter(name, unit=unit, description=description) def create_gauge( self, name: str, unit: str = "", description: str = "", ) -> Gauge: """Returns a no-op Gauge.""" status = self._register_instrument(name, NoOpGauge, unit, description) if status.conflict: self._log_instrument_registration_conflict( name, Gauge.__name__, unit, description, status, ) return NoOpGauge(name, unit=unit, description=description) def create_up_down_counter( self, name: str, unit: str = "", description: str = "", ) -> UpDownCounter: """Returns a no-op UpDownCounter.""" status = self._register_instrument( name, NoOpUpDownCounter, unit, description ) if status.conflict: self._log_instrument_registration_conflict( name, UpDownCounter.__name__, unit, description, status, ) return NoOpUpDownCounter(name, unit=unit, description=description) def create_observable_counter( self, name: str, callbacks: Optional[Sequence[CallbackT]] = None, unit: str = "", description: str = "", ) -> ObservableCounter: """Returns a no-op ObservableCounter.""" status = self._register_instrument( name, NoOpObservableCounter, unit, description ) if status.conflict: self._log_instrument_registration_conflict( name, ObservableCounter.__name__, unit, description, status, ) return NoOpObservableCounter( name, callbacks, unit=unit, description=description, ) def create_histogram( self, name: str, unit: str = "", description: str = "", *, explicit_bucket_boundaries_advisory: Optional[Sequence[float]] = None, ) -> Histogram: """Returns a no-op Histogram.""" status = self._register_instrument( name, NoOpHistogram, unit, description, _MetricsHistogramAdvisory( explicit_bucket_boundaries=explicit_bucket_boundaries_advisory ), ) if status.conflict: self._log_instrument_registration_conflict( name, Histogram.__name__, unit, description, status, ) return NoOpHistogram( name, unit=unit, description=description, explicit_bucket_boundaries_advisory=explicit_bucket_boundaries_advisory, ) def create_observable_gauge( self, name: str, callbacks: Optional[Sequence[CallbackT]] = None, unit: str = "", description: str = "", ) -> ObservableGauge: """Returns a no-op ObservableGauge.""" status = self._register_instrument( name, NoOpObservableGauge, unit, description ) if status.conflict: self._log_instrument_registration_conflict( name, ObservableGauge.__name__, unit, description, status, ) return NoOpObservableGauge( name, callbacks, unit=unit, description=description, ) def create_observable_up_down_counter( self, name: str, callbacks: Optional[Sequence[CallbackT]] = None, unit: str = "", description: str = "", ) -> ObservableUpDownCounter: """Returns a no-op ObservableUpDownCounter.""" status = self._register_instrument( name, NoOpObservableUpDownCounter, unit, description ) if status.conflict: self._log_instrument_registration_conflict( name, ObservableUpDownCounter.__name__, unit, description, status, ) return NoOpObservableUpDownCounter( name, callbacks, unit=unit, description=description, ) _METER_PROVIDER_SET_ONCE = Once() _METER_PROVIDER: Optional[MeterProvider] = None _PROXY_METER_PROVIDER = _ProxyMeterProvider() def get_meter( name: str, version: str = "", meter_provider: Optional[MeterProvider] = None, schema_url: Optional[str] = None, attributes: Optional[Attributes] = None, ) -> "Meter": """Returns a `Meter` for use by the given instrumentation library. This function is a convenience wrapper for `opentelemetry.metrics.MeterProvider.get_meter`. If meter_provider is omitted the current configured one is used. """ if meter_provider is None: meter_provider = get_meter_provider() return meter_provider.get_meter(name, version, schema_url, attributes) def _set_meter_provider(meter_provider: MeterProvider, log: bool) -> None: def set_mp() -> None: global _METER_PROVIDER # pylint: disable=global-statement _METER_PROVIDER = meter_provider # gives all proxies real instruments off the newly set meter provider _PROXY_METER_PROVIDER.on_set_meter_provider(meter_provider) did_set = _METER_PROVIDER_SET_ONCE.do_once(set_mp) if log and not did_set: _logger.warning("Overriding of current MeterProvider is not allowed") def set_meter_provider(meter_provider: MeterProvider) -> None: """Sets the current global :class:`~.MeterProvider` object. This can only be done once, a warning will be logged if any further attempt is made. """ _set_meter_provider(meter_provider, log=True) def get_meter_provider() -> MeterProvider: """Gets the current global :class:`~.MeterProvider` object.""" if _METER_PROVIDER is None: if OTEL_PYTHON_METER_PROVIDER not in environ: return _PROXY_METER_PROVIDER meter_provider: MeterProvider = _load_provider( # type: ignore OTEL_PYTHON_METER_PROVIDER, "meter_provider" ) _set_meter_provider(meter_provider, log=False) # _METER_PROVIDER will have been set by one thread return cast("MeterProvider", _METER_PROVIDER) python-opentelemetry-1.39.1/opentelemetry-api/src/opentelemetry/metrics/_internal/instrument.py000066400000000000000000000347061511654350100333640ustar00rootroot00000000000000# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # pylint: disable=too-many-ancestors from abc import ABC, abstractmethod from dataclasses import dataclass from logging import getLogger from re import compile as re_compile from typing import ( Callable, Dict, Generator, Generic, Iterable, Optional, Sequence, TypeVar, Union, ) # pylint: disable=unused-import; needed for typing and sphinx from opentelemetry import metrics from opentelemetry.context import Context from opentelemetry.metrics._internal.observation import Observation from opentelemetry.util.types import ( Attributes, ) _logger = getLogger(__name__) _name_regex = re_compile(r"[a-zA-Z][-_./a-zA-Z0-9]{0,254}") _unit_regex = re_compile(r"[\x00-\x7F]{0,63}") @dataclass(frozen=True) class _MetricsHistogramAdvisory: explicit_bucket_boundaries: Optional[Sequence[float]] = None @dataclass(frozen=True) class CallbackOptions: """Options for the callback Args: timeout_millis: Timeout for the callback's execution. If the callback does asynchronous work (e.g. HTTP requests), it should respect this timeout. """ timeout_millis: float = 10_000 InstrumentT = TypeVar("InstrumentT", bound="Instrument") # pylint: disable=invalid-name CallbackT = Union[ Callable[[CallbackOptions], Iterable[Observation]], Generator[Iterable[Observation], CallbackOptions, None], ] class Instrument(ABC): """Abstract class that serves as base for all instruments.""" @abstractmethod def __init__( self, name: str, unit: str = "", description: str = "", ) -> None: pass @staticmethod def _check_name_unit_description( name: str, unit: str, description: str ) -> Dict[str, Optional[str]]: """ Checks the following instrument name, unit and description for compliance with the spec. Returns a dict with keys "name", "unit" and "description", the corresponding values will be the checked strings or `None` if the value is invalid. If valid, the checked strings should be used instead of the original values. """ result: Dict[str, Optional[str]] = {} if _name_regex.fullmatch(name) is not None: result["name"] = name else: result["name"] = None if unit is None: unit = "" if _unit_regex.fullmatch(unit) is not None: result["unit"] = unit else: result["unit"] = None if description is None: result["description"] = "" else: result["description"] = description return result class _ProxyInstrument(ABC, Generic[InstrumentT]): def __init__( self, name: str, unit: str = "", description: str = "", ) -> None: self._name = name self._unit = unit self._description = description self._real_instrument: Optional[InstrumentT] = None def on_meter_set(self, meter: "metrics.Meter") -> None: """Called when a real meter is set on the creating _ProxyMeter""" # We don't need any locking on proxy instruments because it's OK if some # measurements get dropped while a real backing instrument is being # created. self._real_instrument = self._create_real_instrument(meter) @abstractmethod def _create_real_instrument(self, meter: "metrics.Meter") -> InstrumentT: """Create an instance of the real instrument. Implement this.""" class _ProxyAsynchronousInstrument(_ProxyInstrument[InstrumentT]): def __init__( self, name: str, callbacks: Optional[Sequence[CallbackT]] = None, unit: str = "", description: str = "", ) -> None: super().__init__(name, unit, description) self._callbacks = callbacks class Synchronous(Instrument): """Base class for all synchronous instruments""" class Asynchronous(Instrument): """Base class for all asynchronous instruments""" @abstractmethod def __init__( self, name: str, callbacks: Optional[Sequence[CallbackT]] = None, unit: str = "", description: str = "", ) -> None: super().__init__(name, unit=unit, description=description) class Counter(Synchronous): """A Counter is a synchronous `Instrument` which supports non-negative increments.""" @abstractmethod def add( self, amount: Union[int, float], attributes: Optional[Attributes] = None, context: Optional[Context] = None, ) -> None: pass class NoOpCounter(Counter): """No-op implementation of `Counter`.""" def __init__( self, name: str, unit: str = "", description: str = "", ) -> None: super().__init__(name, unit=unit, description=description) def add( self, amount: Union[int, float], attributes: Optional[Attributes] = None, context: Optional[Context] = None, ) -> None: return super().add(amount, attributes=attributes, context=context) class _ProxyCounter(_ProxyInstrument[Counter], Counter): def add( self, amount: Union[int, float], attributes: Optional[Attributes] = None, context: Optional[Context] = None, ) -> None: if self._real_instrument: self._real_instrument.add(amount, attributes, context) def _create_real_instrument(self, meter: "metrics.Meter") -> Counter: return meter.create_counter( self._name, self._unit, self._description, ) class UpDownCounter(Synchronous): """An UpDownCounter is a synchronous `Instrument` which supports increments and decrements.""" @abstractmethod def add( self, amount: Union[int, float], attributes: Optional[Attributes] = None, context: Optional[Context] = None, ) -> None: pass class NoOpUpDownCounter(UpDownCounter): """No-op implementation of `UpDownCounter`.""" def __init__( self, name: str, unit: str = "", description: str = "", ) -> None: super().__init__(name, unit=unit, description=description) def add( self, amount: Union[int, float], attributes: Optional[Attributes] = None, context: Optional[Context] = None, ) -> None: return super().add(amount, attributes=attributes, context=context) class _ProxyUpDownCounter(_ProxyInstrument[UpDownCounter], UpDownCounter): def add( self, amount: Union[int, float], attributes: Optional[Attributes] = None, context: Optional[Context] = None, ) -> None: if self._real_instrument: self._real_instrument.add(amount, attributes, context) def _create_real_instrument(self, meter: "metrics.Meter") -> UpDownCounter: return meter.create_up_down_counter( self._name, self._unit, self._description, ) class ObservableCounter(Asynchronous): """An ObservableCounter is an asynchronous `Instrument` which reports monotonically increasing value(s) when the instrument is being observed. """ class NoOpObservableCounter(ObservableCounter): """No-op implementation of `ObservableCounter`.""" def __init__( self, name: str, callbacks: Optional[Sequence[CallbackT]] = None, unit: str = "", description: str = "", ) -> None: super().__init__( name, callbacks, unit=unit, description=description, ) class _ProxyObservableCounter( _ProxyAsynchronousInstrument[ObservableCounter], ObservableCounter ): def _create_real_instrument( self, meter: "metrics.Meter" ) -> ObservableCounter: return meter.create_observable_counter( self._name, self._callbacks, self._unit, self._description, ) class ObservableUpDownCounter(Asynchronous): """An ObservableUpDownCounter is an asynchronous `Instrument` which reports additive value(s) (e.g. the process heap size - it makes sense to report the heap size from multiple processes and sum them up, so we get the total heap usage) when the instrument is being observed. """ class NoOpObservableUpDownCounter(ObservableUpDownCounter): """No-op implementation of `ObservableUpDownCounter`.""" def __init__( self, name: str, callbacks: Optional[Sequence[CallbackT]] = None, unit: str = "", description: str = "", ) -> None: super().__init__( name, callbacks, unit=unit, description=description, ) class _ProxyObservableUpDownCounter( _ProxyAsynchronousInstrument[ObservableUpDownCounter], ObservableUpDownCounter, ): def _create_real_instrument( self, meter: "metrics.Meter" ) -> ObservableUpDownCounter: return meter.create_observable_up_down_counter( self._name, self._callbacks, self._unit, self._description, ) class Histogram(Synchronous): """Histogram is a synchronous `Instrument` which can be used to report arbitrary values that are likely to be statistically meaningful. It is intended for statistics such as histograms, summaries, and percentile. """ @abstractmethod def __init__( self, name: str, unit: str = "", description: str = "", explicit_bucket_boundaries_advisory: Optional[Sequence[float]] = None, ) -> None: pass @abstractmethod def record( self, amount: Union[int, float], attributes: Optional[Attributes] = None, context: Optional[Context] = None, ) -> None: pass class NoOpHistogram(Histogram): """No-op implementation of `Histogram`.""" def __init__( self, name: str, unit: str = "", description: str = "", explicit_bucket_boundaries_advisory: Optional[Sequence[float]] = None, ) -> None: super().__init__( name, unit=unit, description=description, explicit_bucket_boundaries_advisory=explicit_bucket_boundaries_advisory, ) def record( self, amount: Union[int, float], attributes: Optional[Attributes] = None, context: Optional[Context] = None, ) -> None: return super().record(amount, attributes=attributes, context=context) class _ProxyHistogram(_ProxyInstrument[Histogram], Histogram): def __init__( self, name: str, unit: str = "", description: str = "", explicit_bucket_boundaries_advisory: Optional[Sequence[float]] = None, ) -> None: super().__init__(name, unit=unit, description=description) self._explicit_bucket_boundaries_advisory = ( explicit_bucket_boundaries_advisory ) def record( self, amount: Union[int, float], attributes: Optional[Attributes] = None, context: Optional[Context] = None, ) -> None: if self._real_instrument: self._real_instrument.record(amount, attributes, context) def _create_real_instrument(self, meter: "metrics.Meter") -> Histogram: return meter.create_histogram( self._name, self._unit, self._description, explicit_bucket_boundaries_advisory=self._explicit_bucket_boundaries_advisory, ) class ObservableGauge(Asynchronous): """Asynchronous Gauge is an asynchronous `Instrument` which reports non-additive value(s) (e.g. the room temperature - it makes no sense to report the temperature value from multiple rooms and sum them up) when the instrument is being observed. """ class NoOpObservableGauge(ObservableGauge): """No-op implementation of `ObservableGauge`.""" def __init__( self, name: str, callbacks: Optional[Sequence[CallbackT]] = None, unit: str = "", description: str = "", ) -> None: super().__init__( name, callbacks, unit=unit, description=description, ) class _ProxyObservableGauge( _ProxyAsynchronousInstrument[ObservableGauge], ObservableGauge, ): def _create_real_instrument( self, meter: "metrics.Meter" ) -> ObservableGauge: return meter.create_observable_gauge( self._name, self._callbacks, self._unit, self._description, ) class Gauge(Synchronous): """A Gauge is a synchronous `Instrument` which can be used to record non-additive values as they occur.""" @abstractmethod def set( self, amount: Union[int, float], attributes: Optional[Attributes] = None, context: Optional[Context] = None, ) -> None: pass class NoOpGauge(Gauge): """No-op implementation of ``Gauge``.""" def __init__( self, name: str, unit: str = "", description: str = "", ) -> None: super().__init__(name, unit=unit, description=description) def set( self, amount: Union[int, float], attributes: Optional[Attributes] = None, context: Optional[Context] = None, ) -> None: return super().set(amount, attributes=attributes, context=context) class _ProxyGauge( _ProxyInstrument[Gauge], Gauge, ): def set( self, amount: Union[int, float], attributes: Optional[Attributes] = None, context: Optional[Context] = None, ) -> None: if self._real_instrument: self._real_instrument.set(amount, attributes, context) def _create_real_instrument(self, meter: "metrics.Meter") -> Gauge: return meter.create_gauge( self._name, self._unit, self._description, ) python-opentelemetry-1.39.1/opentelemetry-api/src/opentelemetry/metrics/_internal/observation.py000066400000000000000000000036311511654350100335000ustar00rootroot00000000000000# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Optional, Union from opentelemetry.context import Context from opentelemetry.util.types import Attributes class Observation: """A measurement observed in an asynchronous instrument Return/yield instances of this class from asynchronous instrument callbacks. Args: value: The float or int measured value attributes: The measurement's attributes context: The measurement's context """ def __init__( self, value: Union[int, float], attributes: Attributes = None, context: Optional[Context] = None, ) -> None: self._value = value self._attributes = attributes self._context = context @property def value(self) -> Union[float, int]: return self._value @property def attributes(self) -> Attributes: return self._attributes @property def context(self) -> Optional[Context]: return self._context def __eq__(self, other: object) -> bool: return ( isinstance(other, Observation) and self.value == other.value and self.attributes == other.attributes and self.context == other.context ) def __repr__(self) -> str: return f"Observation(value={self.value}, attributes={self.attributes}, context={self.context})" python-opentelemetry-1.39.1/opentelemetry-api/src/opentelemetry/metrics/py.typed000066400000000000000000000000001511654350100303020ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-api/src/opentelemetry/propagate/000077500000000000000000000000001511654350100271315ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-api/src/opentelemetry/propagate/__init__.py000066400000000000000000000134521511654350100312470ustar00rootroot00000000000000# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ API for propagation of context. The propagators for the ``opentelemetry.propagators.composite.CompositePropagator`` can be defined via configuration in the ``OTEL_PROPAGATORS`` environment variable. This variable should be set to a comma-separated string of names of values for the ``opentelemetry_propagator`` entry point. For example, setting ``OTEL_PROPAGATORS`` to ``tracecontext,baggage`` (which is the default value) would instantiate ``opentelemetry.propagators.composite.CompositePropagator`` with 2 propagators, one of type ``opentelemetry.trace.propagation.tracecontext.TraceContextTextMapPropagator`` and other of type ``opentelemetry.baggage.propagation.W3CBaggagePropagator``. Notice that these propagator classes are defined as ``opentelemetry_propagator`` entry points in the ``pyproject.toml`` file of ``opentelemetry``. Example:: import flask import requests from opentelemetry import propagate PROPAGATOR = propagate.get_global_textmap() def get_header_from_flask_request(request, key): return request.headers.get_all(key) def set_header_into_requests_request(request: requests.Request, key: str, value: str): request.headers[key] = value def example_route(): context = PROPAGATOR.extract( get_header_from_flask_request, flask.request ) request_to_downstream = requests.Request( "GET", "http://httpbin.org/get" ) PROPAGATOR.inject( set_header_into_requests_request, request_to_downstream, context=context ) session = requests.Session() session.send(request_to_downstream.prepare()) .. _Propagation API Specification: https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/context/api-propagators.md """ from logging import getLogger from os import environ from typing import List, Optional from opentelemetry.context.context import Context from opentelemetry.environment_variables import OTEL_PROPAGATORS from opentelemetry.propagators import composite, textmap from opentelemetry.util._importlib_metadata import entry_points logger = getLogger(__name__) def extract( carrier: textmap.CarrierT, context: Optional[Context] = None, getter: textmap.Getter[textmap.CarrierT] = textmap.default_getter, ) -> Context: """Uses the configured propagator to extract a Context from the carrier. Args: getter: an object which contains a get function that can retrieve zero or more values from the carrier and a keys function that can get all the keys from carrier. carrier: and object which contains values that are used to construct a Context. This object must be paired with an appropriate getter which understands how to extract a value from it. context: an optional Context to use. Defaults to root context if not set. """ return get_global_textmap().extract(carrier, context, getter=getter) def inject( carrier: textmap.CarrierT, context: Optional[Context] = None, setter: textmap.Setter[textmap.CarrierT] = textmap.default_setter, ) -> None: """Uses the configured propagator to inject a Context into the carrier. Args: carrier: the medium used by Propagators to read values from and write values to. Should be paired with setter, which should know how to set header values on the carrier. context: An optional Context to use. Defaults to current context if not set. setter: An optional `Setter` object that can set values on the carrier. """ get_global_textmap().inject(carrier, context=context, setter=setter) propagators: List[textmap.TextMapPropagator] = [] # Single use variable here to hack black and make lint pass environ_propagators = environ.get( OTEL_PROPAGATORS, "tracecontext,baggage", ) for propagator in environ_propagators.split(","): propagator = propagator.strip() if propagator.lower() == "none": logger.debug( "OTEL_PROPAGATORS environment variable contains none, removing all propagators" ) propagators = [] break try: propagators.append( next( # type: ignore iter( # type: ignore entry_points( # type: ignore[misc] group="opentelemetry_propagator", name=propagator, ) ) ).load()() ) except StopIteration: raise ValueError( f"Propagator {propagator} not found. It is either misspelled or not installed." ) except Exception: # pylint: disable=broad-exception-caught logger.exception("Failed to load propagator: %s", propagator) raise _HTTP_TEXT_FORMAT: textmap.TextMapPropagator = composite.CompositePropagator( propagators ) def get_global_textmap() -> textmap.TextMapPropagator: return _HTTP_TEXT_FORMAT def set_global_textmap( http_text_format: textmap.TextMapPropagator, ) -> None: global _HTTP_TEXT_FORMAT # pylint:disable=global-statement _HTTP_TEXT_FORMAT = http_text_format python-opentelemetry-1.39.1/opentelemetry-api/src/opentelemetry/propagate/py.typed000066400000000000000000000000001511654350100306160ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-api/src/opentelemetry/propagators/000077500000000000000000000000001511654350100275105ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-api/src/opentelemetry/propagators/composite.py000066400000000000000000000062751511654350100320760ustar00rootroot00000000000000# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging import typing from typing_extensions import deprecated from opentelemetry.context.context import Context from opentelemetry.propagators import textmap logger = logging.getLogger(__name__) class CompositePropagator(textmap.TextMapPropagator): """CompositePropagator provides a mechanism for combining multiple propagators into a single one. Args: propagators: the list of propagators to use """ def __init__( self, propagators: typing.Sequence[textmap.TextMapPropagator] ) -> None: self._propagators = propagators def extract( self, carrier: textmap.CarrierT, context: typing.Optional[Context] = None, getter: textmap.Getter[textmap.CarrierT] = textmap.default_getter, ) -> Context: """Run each of the configured propagators with the given context and carrier. Propagators are run in the order they are configured, if multiple propagators write the same context key, the propagator later in the list will override previous propagators. See `opentelemetry.propagators.textmap.TextMapPropagator.extract` """ for propagator in self._propagators: context = propagator.extract(carrier, context, getter=getter) return context # type: ignore def inject( self, carrier: textmap.CarrierT, context: typing.Optional[Context] = None, setter: textmap.Setter[textmap.CarrierT] = textmap.default_setter, ) -> None: """Run each of the configured propagators with the given context and carrier. Propagators are run in the order they are configured, if multiple propagators write the same carrier key, the propagator later in the list will override previous propagators. See `opentelemetry.propagators.textmap.TextMapPropagator.inject` """ for propagator in self._propagators: propagator.inject(carrier, context, setter=setter) @property def fields(self) -> typing.Set[str]: """Returns a set with the fields set in `inject`. See `opentelemetry.propagators.textmap.TextMapPropagator.fields` """ composite_fields = set() for propagator in self._propagators: for field in propagator.fields: composite_fields.add(field) return composite_fields @deprecated( "You should use CompositePropagator. Deprecated since version 1.2.0." ) class CompositeHTTPPropagator(CompositePropagator): """CompositeHTTPPropagator provides a mechanism for combining multiple propagators into a single one. """ python-opentelemetry-1.39.1/opentelemetry-api/src/opentelemetry/propagators/py.typed000066400000000000000000000000001511654350100311750ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-api/src/opentelemetry/propagators/textmap.py000066400000000000000000000147621511654350100315560ustar00rootroot00000000000000# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import abc import typing from opentelemetry.context.context import Context CarrierT = typing.TypeVar("CarrierT") # pylint: disable=invalid-name CarrierValT = typing.Union[typing.List[str], str] class Getter(abc.ABC, typing.Generic[CarrierT]): """This class implements a Getter that enables extracting propagated fields from a carrier. """ @abc.abstractmethod def get( self, carrier: CarrierT, key: str ) -> typing.Optional[typing.List[str]]: """Function that can retrieve zero or more values from the carrier. In the case that the value does not exist, returns None. Args: carrier: An object which contains values that are used to construct a Context. key: key of a field in carrier. Returns: first value of the propagation key or None if the key doesn't exist. """ @abc.abstractmethod def keys(self, carrier: CarrierT) -> typing.List[str]: """Function that can retrieve all the keys in a carrier object. Args: carrier: An object which contains values that are used to construct a Context. Returns: list of keys from the carrier. """ class Setter(abc.ABC, typing.Generic[CarrierT]): """This class implements a Setter that enables injecting propagated fields into a carrier. """ @abc.abstractmethod def set(self, carrier: CarrierT, key: str, value: str) -> None: """Function that can set a value into a carrier"" Args: carrier: An object which contains values that are used to construct a Context. key: key of a field in carrier. value: value for a field in carrier. """ class DefaultGetter(Getter[typing.Mapping[str, CarrierValT]]): def get( self, carrier: typing.Mapping[str, CarrierValT], key: str ) -> typing.Optional[typing.List[str]]: """Getter implementation to retrieve a value from a dictionary. Args: carrier: dictionary in which to get value key: the key used to get the value Returns: A list with a single string with the value if it exists, else None. """ val = carrier.get(key, None) if val is None: return None if isinstance(val, typing.Iterable) and not isinstance(val, str): return list(val) return [val] def keys( self, carrier: typing.Mapping[str, CarrierValT] ) -> typing.List[str]: """Keys implementation that returns all keys from a dictionary.""" return list(carrier.keys()) default_getter: Getter[CarrierT] = DefaultGetter() # type: ignore class DefaultSetter(Setter[typing.MutableMapping[str, CarrierValT]]): def set( self, carrier: typing.MutableMapping[str, CarrierValT], key: str, value: CarrierValT, ) -> None: """Setter implementation to set a value into a dictionary. Args: carrier: dictionary in which to set value key: the key used to set the value value: the value to set """ carrier[key] = value default_setter: Setter[CarrierT] = DefaultSetter() # type: ignore class TextMapPropagator(abc.ABC): """This class provides an interface that enables extracting and injecting context into headers of HTTP requests. HTTP frameworks and clients can integrate with TextMapPropagator by providing the object containing the headers, and a getter and setter function for the extraction and injection of values, respectively. """ @abc.abstractmethod def extract( self, carrier: CarrierT, context: typing.Optional[Context] = None, getter: Getter[CarrierT] = default_getter, ) -> Context: """Create a Context from values in the carrier. The extract function should retrieve values from the carrier object using getter, and use values to populate a Context value and return it. Args: getter: a function that can retrieve zero or more values from the carrier. In the case that the value does not exist, return an empty list. carrier: and object which contains values that are used to construct a Context. This object must be paired with an appropriate getter which understands how to extract a value from it. context: an optional Context to use. Defaults to root context if not set. Returns: A Context with configuration found in the carrier. """ @abc.abstractmethod def inject( self, carrier: CarrierT, context: typing.Optional[Context] = None, setter: Setter[CarrierT] = default_setter, ) -> None: """Inject values from a Context into a carrier. inject enables the propagation of values into HTTP clients or other objects which perform an HTTP request. Implementations should use the `Setter` 's set method to set values on the carrier. Args: carrier: An object that a place to define HTTP headers. Should be paired with setter, which should know how to set header values on the carrier. context: an optional Context to use. Defaults to current context if not set. setter: An optional `Setter` object that can set values on the carrier. """ @property @abc.abstractmethod def fields(self) -> typing.Set[str]: """ Gets the fields set in the carrier by the `inject` method. If the carrier is reused, its fields that correspond with the ones present in this attribute should be deleted before calling `inject`. Returns: A set with the fields set in `inject`. """ python-opentelemetry-1.39.1/opentelemetry-api/src/opentelemetry/py.typed000066400000000000000000000000001511654350100266340ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-api/src/opentelemetry/trace/000077500000000000000000000000001511654350100262455ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-api/src/opentelemetry/trace/__init__.py000066400000000000000000000546151511654350100303710ustar00rootroot00000000000000# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ The OpenTelemetry tracing API describes the classes used to generate distributed traces. The :class:`.Tracer` class controls access to the execution context, and manages span creation. Each operation in a trace is represented by a :class:`.Span`, which records the start, end time, and metadata associated with the operation. This module provides abstract (i.e. unimplemented) classes required for tracing, and a concrete no-op :class:`.NonRecordingSpan` that allows applications to use the API package alone without a supporting implementation. To get a tracer, you need to provide the package name from which you are calling the tracer APIs to OpenTelemetry by calling `TracerProvider.get_tracer` with the calling module name and the version of your package. The tracer supports creating spans that are "attached" or "detached" from the context. New spans are "attached" to the context in that they are created as children of the currently active span, and the newly-created span can optionally become the new active span:: from opentelemetry import trace tracer = trace.get_tracer(__name__) # Create a new root span, set it as the current span in context with tracer.start_as_current_span("parent"): # Attach a new child and update the current span with tracer.start_as_current_span("child"): do_work(): # Close child span, set parent as current # Close parent span, set default span as current When creating a span that's "detached" from the context the active span doesn't change, and the caller is responsible for managing the span's lifetime:: # Explicit parent span assignment is done via the Context from opentelemetry.trace import set_span_in_context context = set_span_in_context(parent) child = tracer.start_span("child", context=context) try: do_work(span=child) finally: child.end() Applications should generally use a single global TracerProvider, and use either implicit or explicit context propagation consistently throughout. .. versionadded:: 0.1.0 .. versionchanged:: 0.3.0 `TracerProvider` was introduced and the global ``tracer`` getter was replaced by ``tracer_provider``. .. versionchanged:: 0.5.0 ``tracer_provider`` was replaced by `get_tracer_provider`, ``set_preferred_tracer_provider_implementation`` was replaced by `set_tracer_provider`. """ import os import typing from abc import ABC, abstractmethod from enum import Enum from logging import getLogger from typing import Iterator, Optional, Sequence, cast from typing_extensions import deprecated from opentelemetry import context as context_api from opentelemetry.attributes import BoundedAttributes from opentelemetry.context.context import Context from opentelemetry.environment_variables import OTEL_PYTHON_TRACER_PROVIDER from opentelemetry.trace.propagation import ( _SPAN_KEY, get_current_span, set_span_in_context, ) from opentelemetry.trace.span import ( DEFAULT_TRACE_OPTIONS, DEFAULT_TRACE_STATE, INVALID_SPAN, INVALID_SPAN_CONTEXT, INVALID_SPAN_ID, INVALID_TRACE_ID, NonRecordingSpan, Span, SpanContext, TraceFlags, TraceState, format_span_id, format_trace_id, ) from opentelemetry.trace.status import Status, StatusCode from opentelemetry.util import types from opentelemetry.util._decorator import _agnosticcontextmanager from opentelemetry.util._once import Once from opentelemetry.util._providers import _load_provider logger = getLogger(__name__) class _LinkBase(ABC): def __init__(self, context: "SpanContext") -> None: self._context = context @property def context(self) -> "SpanContext": return self._context @property @abstractmethod def attributes(self) -> types.Attributes: pass class Link(_LinkBase): """A link to a `Span`. The attributes of a Link are immutable. Args: context: `SpanContext` of the `Span` to link to. attributes: Link's attributes. """ def __init__( self, context: "SpanContext", attributes: types.Attributes = None, ) -> None: super().__init__(context) self._attributes = attributes @property def attributes(self) -> types.Attributes: return self._attributes @property def dropped_attributes(self) -> int: if isinstance(self._attributes, BoundedAttributes): return self._attributes.dropped return 0 _Links = Optional[Sequence[Link]] class SpanKind(Enum): """Specifies additional details on how this span relates to its parent span. Note that this enumeration is experimental and likely to change. See https://github.com/open-telemetry/opentelemetry-specification/pull/226. """ #: Default value. Indicates that the span is used internally in the # application. INTERNAL = 0 #: Indicates that the span describes an operation that handles a remote # request. SERVER = 1 #: Indicates that the span describes a request to some remote service. CLIENT = 2 #: Indicates that the span describes a producer sending a message to a #: broker. Unlike client and server, there is usually no direct critical #: path latency relationship between producer and consumer spans. PRODUCER = 3 #: Indicates that the span describes a consumer receiving a message from a #: broker. Unlike client and server, there is usually no direct critical #: path latency relationship between producer and consumer spans. CONSUMER = 4 class TracerProvider(ABC): @abstractmethod def get_tracer( self, instrumenting_module_name: str, instrumenting_library_version: typing.Optional[str] = None, schema_url: typing.Optional[str] = None, attributes: typing.Optional[types.Attributes] = None, ) -> "Tracer": """Returns a `Tracer` for use by the given instrumentation library. For any two calls it is undefined whether the same or different `Tracer` instances are returned, even for different library names. This function may return different `Tracer` types (e.g. a no-op tracer vs. a functional tracer). Args: instrumenting_module_name: The uniquely identifiable name for instrumentation scope, such as instrumentation library, package, module or class name. ``__name__`` may not be used as this can result in different tracer names if the tracers are in different files. It is better to use a fixed string that can be imported where needed and used consistently as the name of the tracer. This should *not* be the name of the module that is instrumented but the name of the module doing the instrumentation. E.g., instead of ``"requests"``, use ``"opentelemetry.instrumentation.requests"``. instrumenting_library_version: Optional. The version string of the instrumenting library. Usually this should be the same as ``importlib.metadata.version(instrumenting_library_name)``. schema_url: Optional. Specifies the Schema URL of the emitted telemetry. attributes: Optional. Specifies the attributes of the emitted telemetry. """ class NoOpTracerProvider(TracerProvider): """The default TracerProvider, used when no implementation is available. All operations are no-op. """ def get_tracer( self, instrumenting_module_name: str, instrumenting_library_version: typing.Optional[str] = None, schema_url: typing.Optional[str] = None, attributes: typing.Optional[types.Attributes] = None, ) -> "Tracer": # pylint:disable=no-self-use,unused-argument return NoOpTracer() @deprecated( "You should use NoOpTracerProvider. Deprecated since version 1.9.0." ) class _DefaultTracerProvider(NoOpTracerProvider): """The default TracerProvider, used when no implementation is available. All operations are no-op. """ class ProxyTracerProvider(TracerProvider): def get_tracer( self, instrumenting_module_name: str, instrumenting_library_version: typing.Optional[str] = None, schema_url: typing.Optional[str] = None, attributes: typing.Optional[types.Attributes] = None, ) -> "Tracer": if _TRACER_PROVIDER: return _TRACER_PROVIDER.get_tracer( instrumenting_module_name, instrumenting_library_version, schema_url, attributes, ) return ProxyTracer( instrumenting_module_name, instrumenting_library_version, schema_url, attributes, ) class Tracer(ABC): """Handles span creation and in-process context propagation. This class provides methods for manipulating the context, creating spans, and controlling spans' lifecycles. """ @abstractmethod def start_span( self, name: str, context: Optional[Context] = None, kind: SpanKind = SpanKind.INTERNAL, attributes: types.Attributes = None, links: _Links = None, start_time: Optional[int] = None, record_exception: bool = True, set_status_on_exception: bool = True, ) -> "Span": """Starts a span. Create a new span. Start the span without setting it as the current span in the context. To start the span and use the context in a single method, see :meth:`start_as_current_span`. By default the current span in the context will be used as parent, but an explicit context can also be specified, by passing in a `Context` containing a current `Span`. If there is no current span in the global `Context` or in the specified context, the created span will be a root span. The span can be used as a context manager. On exiting the context manager, the span's end() method will be called. Example:: # trace.get_current_span() will be used as the implicit parent. # If none is found, the created span will be a root instance. with tracer.start_span("one") as child: child.add_event("child's event") Args: name: The name of the span to be created. context: An optional Context containing the span's parent. Defaults to the global context. kind: The span's kind (relationship to parent). Note that is meaningful even if there is no parent. attributes: The span's attributes. links: Links span to other spans start_time: Sets the start time of a span record_exception: Whether to record any exceptions raised within the context as error event on the span. set_status_on_exception: Only relevant if the returned span is used in a with/context manager. Defines whether the span status will be automatically set to ERROR when an uncaught exception is raised in the span with block. The span status won't be set by this mechanism if it was previously set manually. Returns: The newly-created span. """ @_agnosticcontextmanager @abstractmethod def start_as_current_span( self, name: str, context: Optional[Context] = None, kind: SpanKind = SpanKind.INTERNAL, attributes: types.Attributes = None, links: _Links = None, start_time: Optional[int] = None, record_exception: bool = True, set_status_on_exception: bool = True, end_on_exit: bool = True, ) -> Iterator["Span"]: """Context manager for creating a new span and set it as the current span in this tracer's context. Exiting the context manager will call the span's end method, as well as return the current span to its previous value by returning to the previous context. Example:: with tracer.start_as_current_span("one") as parent: parent.add_event("parent's event") with tracer.start_as_current_span("two") as child: child.add_event("child's event") trace.get_current_span() # returns child trace.get_current_span() # returns parent trace.get_current_span() # returns previously active span This is a convenience method for creating spans attached to the tracer's context. Applications that need more control over the span lifetime should use :meth:`start_span` instead. For example:: with tracer.start_as_current_span(name) as span: do_work() is equivalent to:: span = tracer.start_span(name) with opentelemetry.trace.use_span(span, end_on_exit=True): do_work() This can also be used as a decorator:: @tracer.start_as_current_span("name") def function(): ... function() Args: name: The name of the span to be created. context: An optional Context containing the span's parent. Defaults to the global context. kind: The span's kind (relationship to parent). Note that is meaningful even if there is no parent. attributes: The span's attributes. links: Links span to other spans start_time: Sets the start time of a span record_exception: Whether to record any exceptions raised within the context as error event on the span. set_status_on_exception: Only relevant if the returned span is used in a with/context manager. Defines whether the span status will be automatically set to ERROR when an uncaught exception is raised in the span with block. The span status won't be set by this mechanism if it was previously set manually. end_on_exit: Whether to end the span automatically when leaving the context manager. Yields: The newly-created span. """ class ProxyTracer(Tracer): # pylint: disable=W0222,signature-differs def __init__( self, instrumenting_module_name: str, instrumenting_library_version: typing.Optional[str] = None, schema_url: typing.Optional[str] = None, attributes: typing.Optional[types.Attributes] = None, ): self._instrumenting_module_name = instrumenting_module_name self._instrumenting_library_version = instrumenting_library_version self._schema_url = schema_url self._attributes = attributes self._real_tracer: Optional[Tracer] = None self._noop_tracer = NoOpTracer() @property def _tracer(self) -> Tracer: if self._real_tracer: return self._real_tracer if _TRACER_PROVIDER: self._real_tracer = _TRACER_PROVIDER.get_tracer( self._instrumenting_module_name, self._instrumenting_library_version, self._schema_url, self._attributes, ) return self._real_tracer return self._noop_tracer def start_span(self, *args, **kwargs) -> Span: # type: ignore return self._tracer.start_span(*args, **kwargs) # type: ignore @_agnosticcontextmanager # type: ignore def start_as_current_span(self, *args, **kwargs) -> Iterator[Span]: with self._tracer.start_as_current_span(*args, **kwargs) as span: # type: ignore yield span class NoOpTracer(Tracer): """The default Tracer, used when no Tracer implementation is available. All operations are no-op. """ def start_span( self, name: str, context: Optional[Context] = None, kind: SpanKind = SpanKind.INTERNAL, attributes: types.Attributes = None, links: _Links = None, start_time: Optional[int] = None, record_exception: bool = True, set_status_on_exception: bool = True, ) -> "Span": return INVALID_SPAN @_agnosticcontextmanager def start_as_current_span( self, name: str, context: Optional[Context] = None, kind: SpanKind = SpanKind.INTERNAL, attributes: types.Attributes = None, links: _Links = None, start_time: Optional[int] = None, record_exception: bool = True, set_status_on_exception: bool = True, end_on_exit: bool = True, ) -> Iterator["Span"]: yield INVALID_SPAN @deprecated("You should use NoOpTracer. Deprecated since version 1.9.0.") class _DefaultTracer(NoOpTracer): """The default Tracer, used when no Tracer implementation is available. All operations are no-op. """ _TRACER_PROVIDER_SET_ONCE = Once() _TRACER_PROVIDER: Optional[TracerProvider] = None _PROXY_TRACER_PROVIDER = ProxyTracerProvider() def get_tracer( instrumenting_module_name: str, instrumenting_library_version: typing.Optional[str] = None, tracer_provider: Optional[TracerProvider] = None, schema_url: typing.Optional[str] = None, attributes: typing.Optional[types.Attributes] = None, ) -> "Tracer": """Returns a `Tracer` for use by the given instrumentation library. This function is a convenience wrapper for opentelemetry.trace.TracerProvider.get_tracer. If tracer_provider is omitted the current configured one is used. """ if tracer_provider is None: tracer_provider = get_tracer_provider() return tracer_provider.get_tracer( instrumenting_module_name, instrumenting_library_version, schema_url, attributes, ) def _set_tracer_provider(tracer_provider: TracerProvider, log: bool) -> None: def set_tp() -> None: global _TRACER_PROVIDER # pylint: disable=global-statement _TRACER_PROVIDER = tracer_provider did_set = _TRACER_PROVIDER_SET_ONCE.do_once(set_tp) if log and not did_set: logger.warning("Overriding of current TracerProvider is not allowed") def set_tracer_provider(tracer_provider: TracerProvider) -> None: """Sets the current global :class:`~.TracerProvider` object. This can only be done once, a warning will be logged if any further attempt is made. """ _set_tracer_provider(tracer_provider, log=True) def get_tracer_provider() -> TracerProvider: """Gets the current global :class:`~.TracerProvider` object.""" if _TRACER_PROVIDER is None: # if a global tracer provider has not been set either via code or env # vars, return a proxy tracer provider if OTEL_PYTHON_TRACER_PROVIDER not in os.environ: return _PROXY_TRACER_PROVIDER tracer_provider: TracerProvider = _load_provider( OTEL_PYTHON_TRACER_PROVIDER, "tracer_provider" ) _set_tracer_provider(tracer_provider, log=False) # _TRACER_PROVIDER will have been set by one thread return cast("TracerProvider", _TRACER_PROVIDER) @_agnosticcontextmanager def use_span( span: Span, end_on_exit: bool = False, record_exception: bool = True, set_status_on_exception: bool = True, ) -> Iterator[Span]: """Takes a non-active span and activates it in the current context. Args: span: The span that should be activated in the current context. end_on_exit: Whether to end the span automatically when leaving the context manager scope. record_exception: Whether to record any exceptions raised within the context as error event on the span. set_status_on_exception: Only relevant if the returned span is used in a with/context manager. Defines whether the span status will be automatically set to ERROR when an uncaught exception is raised in the span with block. The span status won't be set by this mechanism if it was previously set manually. """ try: token = context_api.attach(context_api.set_value(_SPAN_KEY, span)) try: yield span finally: context_api.detach(token) # Record only exceptions that inherit Exception class but not BaseException, because # classes that directly inherit BaseException are not technically errors, e.g. GeneratorExit. # See https://github.com/open-telemetry/opentelemetry-python/issues/4484 except Exception as exc: # pylint: disable=broad-exception-caught if isinstance(span, Span) and span.is_recording(): # Record the exception as an event if record_exception: span.record_exception(exc) # Set status in case exception was raised if set_status_on_exception: span.set_status( Status( status_code=StatusCode.ERROR, description=f"{type(exc).__name__}: {exc}", ) ) # This causes parent spans to set their status to ERROR and to record # an exception as an event if a child span raises an exception even if # such child span was started with both record_exception and # set_status_on_exception attributes set to False. raise finally: if end_on_exit: span.end() __all__ = [ "DEFAULT_TRACE_OPTIONS", "DEFAULT_TRACE_STATE", "INVALID_SPAN", "INVALID_SPAN_CONTEXT", "INVALID_SPAN_ID", "INVALID_TRACE_ID", "NonRecordingSpan", "Link", "Span", "SpanContext", "SpanKind", "TraceFlags", "TraceState", "TracerProvider", "Tracer", "format_span_id", "format_trace_id", "get_current_span", "get_tracer", "get_tracer_provider", "set_tracer_provider", "set_span_in_context", "use_span", "Status", "StatusCode", ] python-opentelemetry-1.39.1/opentelemetry-api/src/opentelemetry/trace/propagation/000077500000000000000000000000001511654350100305705ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-api/src/opentelemetry/trace/propagation/__init__.py000066400000000000000000000032241511654350100327020ustar00rootroot00000000000000# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Optional from opentelemetry.context import create_key, get_value, set_value from opentelemetry.context.context import Context from opentelemetry.trace.span import INVALID_SPAN, Span SPAN_KEY = "current-span" _SPAN_KEY = create_key("current-span") def set_span_in_context( span: Span, context: Optional[Context] = None ) -> Context: """Set the span in the given context. Args: span: The Span to set. context: a Context object. if one is not passed, the default current context is used instead. """ ctx = set_value(_SPAN_KEY, span, context=context) return ctx def get_current_span(context: Optional[Context] = None) -> Span: """Retrieve the current span. Args: context: A Context object. If one is not passed, the default current context is used instead. Returns: The Span set in the context if it exists. INVALID_SPAN otherwise. """ span = get_value(_SPAN_KEY, context=context) if span is None or not isinstance(span, Span): return INVALID_SPAN return span python-opentelemetry-1.39.1/opentelemetry-api/src/opentelemetry/trace/propagation/tracecontext.py000066400000000000000000000101221511654350100336410ustar00rootroot00000000000000# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import re import typing from opentelemetry import trace from opentelemetry.context.context import Context from opentelemetry.propagators import textmap from opentelemetry.trace import format_span_id, format_trace_id from opentelemetry.trace.span import TraceState class TraceContextTextMapPropagator(textmap.TextMapPropagator): """Extracts and injects using w3c TraceContext's headers.""" _TRACEPARENT_HEADER_NAME = "traceparent" _TRACESTATE_HEADER_NAME = "tracestate" _TRACEPARENT_HEADER_FORMAT = ( "^[ \t]*([0-9a-f]{2})-([0-9a-f]{32})-([0-9a-f]{16})-([0-9a-f]{2})" + "(-.*)?[ \t]*$" ) _TRACEPARENT_HEADER_FORMAT_RE = re.compile(_TRACEPARENT_HEADER_FORMAT) def extract( self, carrier: textmap.CarrierT, context: typing.Optional[Context] = None, getter: textmap.Getter[textmap.CarrierT] = textmap.default_getter, ) -> Context: """Extracts SpanContext from the carrier. See `opentelemetry.propagators.textmap.TextMapPropagator.extract` """ if context is None: context = Context() header = getter.get(carrier, self._TRACEPARENT_HEADER_NAME) if not header: return context match = re.search(self._TRACEPARENT_HEADER_FORMAT_RE, header[0]) if not match: return context version: str = match.group(1) trace_id: str = match.group(2) span_id: str = match.group(3) trace_flags: str = match.group(4) if trace_id == "0" * 32 or span_id == "0" * 16: return context if version == "00": if match.group(5): # type: ignore return context if version == "ff": return context tracestate_headers = getter.get(carrier, self._TRACESTATE_HEADER_NAME) if tracestate_headers is None: tracestate = None else: tracestate = TraceState.from_header(tracestate_headers) span_context = trace.SpanContext( trace_id=int(trace_id, 16), span_id=int(span_id, 16), is_remote=True, trace_flags=trace.TraceFlags(int(trace_flags, 16)), trace_state=tracestate, ) return trace.set_span_in_context( trace.NonRecordingSpan(span_context), context ) def inject( self, carrier: textmap.CarrierT, context: typing.Optional[Context] = None, setter: textmap.Setter[textmap.CarrierT] = textmap.default_setter, ) -> None: """Injects SpanContext into the carrier. See `opentelemetry.propagators.textmap.TextMapPropagator.inject` """ span = trace.get_current_span(context) span_context = span.get_span_context() if span_context == trace.INVALID_SPAN_CONTEXT: return traceparent_string = f"00-{format_trace_id(span_context.trace_id)}-{format_span_id(span_context.span_id)}-{span_context.trace_flags:02x}" setter.set(carrier, self._TRACEPARENT_HEADER_NAME, traceparent_string) if span_context.trace_state: tracestate_string = span_context.trace_state.to_header() setter.set( carrier, self._TRACESTATE_HEADER_NAME, tracestate_string ) @property def fields(self) -> typing.Set[str]: """Returns a set with the fields set in `inject`. See `opentelemetry.propagators.textmap.TextMapPropagator.fields` """ return {self._TRACEPARENT_HEADER_NAME, self._TRACESTATE_HEADER_NAME} python-opentelemetry-1.39.1/opentelemetry-api/src/opentelemetry/trace/py.typed000066400000000000000000000000001511654350100277320ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-api/src/opentelemetry/trace/span.py000066400000000000000000000462341511654350100275710ustar00rootroot00000000000000import abc import logging import re import types as python_types import typing import warnings from opentelemetry.trace.status import Status, StatusCode from opentelemetry.util import types # The key MUST begin with a lowercase letter or a digit, # and can only contain lowercase letters (a-z), digits (0-9), # underscores (_), dashes (-), asterisks (*), and forward slashes (/). # For multi-tenant vendor scenarios, an at sign (@) can be used to # prefix the vendor name. Vendors SHOULD set the tenant ID # at the beginning of the key. # key = ( lcalpha ) 0*255( lcalpha / DIGIT / "_" / "-"/ "*" / "/" ) # key = ( lcalpha / DIGIT ) 0*240( lcalpha / DIGIT / "_" / "-"/ "*" / "/" ) "@" lcalpha 0*13( lcalpha / DIGIT / "_" / "-"/ "*" / "/" ) # lcalpha = %x61-7A ; a-z _KEY_FORMAT = ( r"[a-z][_0-9a-z\-\*\/]{0,255}|" r"[a-z0-9][_0-9a-z\-\*\/]{0,240}@[a-z][_0-9a-z\-\*\/]{0,13}" ) _KEY_PATTERN = re.compile(_KEY_FORMAT) # The value is an opaque string containing up to 256 printable # ASCII [RFC0020] characters (i.e., the range 0x20 to 0x7E) # except comma (,) and (=). # value = 0*255(chr) nblk-chr # nblk-chr = %x21-2B / %x2D-3C / %x3E-7E # chr = %x20 / nblk-chr _VALUE_FORMAT = ( r"[\x20-\x2b\x2d-\x3c\x3e-\x7e]{0,255}[\x21-\x2b\x2d-\x3c\x3e-\x7e]" ) _VALUE_PATTERN = re.compile(_VALUE_FORMAT) _TRACECONTEXT_MAXIMUM_TRACESTATE_KEYS = 32 _delimiter_pattern = re.compile(r"[ \t]*,[ \t]*") _member_pattern = re.compile(f"({_KEY_FORMAT})(=)({_VALUE_FORMAT})[ \t]*") _logger = logging.getLogger(__name__) def _is_valid_pair(key: str, value: str) -> bool: return ( isinstance(key, str) and _KEY_PATTERN.fullmatch(key) is not None and isinstance(value, str) and _VALUE_PATTERN.fullmatch(value) is not None ) class Span(abc.ABC): """A span represents a single operation within a trace.""" @abc.abstractmethod def end(self, end_time: typing.Optional[int] = None) -> None: """Sets the current time as the span's end time. The span's end time is the wall time at which the operation finished. Only the first call to `end` should modify the span, and implementations are free to ignore or raise on further calls. """ @abc.abstractmethod def get_span_context(self) -> "SpanContext": """Gets the span's SpanContext. Get an immutable, serializable identifier for this span that can be used to create new child spans. Returns: A :class:`opentelemetry.trace.SpanContext` with a copy of this span's immutable state. """ @abc.abstractmethod def set_attributes( self, attributes: typing.Mapping[str, types.AttributeValue] ) -> None: """Sets Attributes. Sets Attributes with the key and value passed as arguments dict. Note: The behavior of `None` value attributes is undefined, and hence strongly discouraged. It is also preferred to set attributes at span creation, instead of calling this method later since samplers can only consider information already present during span creation. """ @abc.abstractmethod def set_attribute(self, key: str, value: types.AttributeValue) -> None: """Sets an Attribute. Sets a single Attribute with the key and value passed as arguments. Note: The behavior of `None` value attributes is undefined, and hence strongly discouraged. It is also preferred to set attributes at span creation, instead of calling this method later since samplers can only consider information already present during span creation. """ @abc.abstractmethod def add_event( self, name: str, attributes: types.Attributes = None, timestamp: typing.Optional[int] = None, ) -> None: """Adds an `Event`. Adds a single `Event` with the name and, optionally, a timestamp and attributes passed as arguments. Implementations should generate a timestamp if the `timestamp` argument is omitted. """ def add_link( # pylint: disable=no-self-use self, context: "SpanContext", attributes: types.Attributes = None, ) -> None: """Adds a `Link`. Adds a single `Link` with the `SpanContext` of the span to link to and, optionally, attributes passed as arguments. Implementations may ignore calls with an invalid span context if both attributes and TraceState are empty. Note: It is preferred to add links at span creation, instead of calling this method later since samplers can only consider information already present during span creation. """ warnings.warn( "Span.add_link() not implemented and will be a no-op. " "Use opentelemetry-sdk >= 1.23 to add links after span creation" ) @abc.abstractmethod def update_name(self, name: str) -> None: """Updates the `Span` name. This will override the name provided via :func:`opentelemetry.trace.Tracer.start_span`. Upon this update, any sampling behavior based on Span name will depend on the implementation. """ @abc.abstractmethod def is_recording(self) -> bool: """Returns whether this span will be recorded. Returns true if this Span is active and recording information like events with the add_event operation and attributes using set_attribute. """ @abc.abstractmethod def set_status( self, status: typing.Union[Status, StatusCode], description: typing.Optional[str] = None, ) -> None: """Sets the Status of the Span. If used, this will override the default Span status. """ @abc.abstractmethod def record_exception( self, exception: BaseException, attributes: types.Attributes = None, timestamp: typing.Optional[int] = None, escaped: bool = False, ) -> None: """Records an exception as a span event.""" def __enter__(self) -> "Span": """Invoked when `Span` is used as a context manager. Returns the `Span` itself. """ return self def __exit__( self, exc_type: typing.Optional[typing.Type[BaseException]], exc_val: typing.Optional[BaseException], exc_tb: typing.Optional[python_types.TracebackType], ) -> None: """Ends context manager and calls `end` on the `Span`.""" self.end() class TraceFlags(int): """A bitmask that represents options specific to the trace. The only supported option is the "sampled" flag (``0x01``). If set, this flag indicates that the trace may have been sampled upstream. See the `W3C Trace Context - Traceparent`_ spec for details. .. _W3C Trace Context - Traceparent: https://www.w3.org/TR/trace-context/#trace-flags """ DEFAULT = 0x00 SAMPLED = 0x01 @classmethod def get_default(cls) -> "TraceFlags": return cls(cls.DEFAULT) @property def sampled(self) -> bool: return bool(self & TraceFlags.SAMPLED) DEFAULT_TRACE_OPTIONS = TraceFlags.get_default() class TraceState(typing.Mapping[str, str]): """A list of key-value pairs representing vendor-specific trace info. Keys and values are strings of up to 256 printable US-ASCII characters. Implementations should conform to the `W3C Trace Context - Tracestate`_ spec, which describes additional restrictions on valid field values. .. _W3C Trace Context - Tracestate: https://www.w3.org/TR/trace-context/#tracestate-field """ def __init__( self, entries: typing.Optional[ typing.Sequence[typing.Tuple[str, str]] ] = None, ) -> None: self._dict = {} # type: dict[str, str] if entries is None: return if len(entries) > _TRACECONTEXT_MAXIMUM_TRACESTATE_KEYS: _logger.warning( "There can't be more than %s key/value pairs.", _TRACECONTEXT_MAXIMUM_TRACESTATE_KEYS, ) return for key, value in entries: if _is_valid_pair(key, value): if key in self._dict: _logger.warning("Duplicate key: %s found.", key) continue self._dict[key] = value else: _logger.warning( "Invalid key/value pair (%s, %s) found.", key, value ) def __contains__(self, item: object) -> bool: return item in self._dict def __getitem__(self, key: str) -> str: return self._dict[key] def __iter__(self) -> typing.Iterator[str]: return iter(self._dict) def __len__(self) -> int: return len(self._dict) def __repr__(self) -> str: pairs = [ f"{{key={key}, value={value}}}" for key, value in self._dict.items() ] return str(pairs) def add(self, key: str, value: str) -> "TraceState": """Adds a key-value pair to tracestate. The provided pair should adhere to w3c tracestate identifiers format. Args: key: A valid tracestate key to add value: A valid tracestate value to add Returns: A new TraceState with the modifications applied. If the provided key-value pair is invalid or results in tracestate that violates tracecontext specification, they are discarded and same tracestate will be returned. """ if not _is_valid_pair(key, value): _logger.warning( "Invalid key/value pair (%s, %s) found.", key, value ) return self # There can be a maximum of 32 pairs if len(self) >= _TRACECONTEXT_MAXIMUM_TRACESTATE_KEYS: _logger.warning("There can't be more 32 key/value pairs.") return self # Duplicate entries are not allowed if key in self._dict: _logger.warning("The provided key %s already exists.", key) return self new_state = [(key, value)] + list(self._dict.items()) return TraceState(new_state) def update(self, key: str, value: str) -> "TraceState": """Updates a key-value pair in tracestate. The provided pair should adhere to w3c tracestate identifiers format. Args: key: A valid tracestate key to update value: A valid tracestate value to update for key Returns: A new TraceState with the modifications applied. If the provided key-value pair is invalid or results in tracestate that violates tracecontext specification, they are discarded and same tracestate will be returned. """ if not _is_valid_pair(key, value): _logger.warning( "Invalid key/value pair (%s, %s) found.", key, value ) return self prev_state = self._dict.copy() prev_state.pop(key, None) new_state = [(key, value), *prev_state.items()] return TraceState(new_state) def delete(self, key: str) -> "TraceState": """Deletes a key-value from tracestate. Args: key: A valid tracestate key to remove key-value pair from tracestate Returns: A new TraceState with the modifications applied. If the provided key-value pair is invalid or results in tracestate that violates tracecontext specification, they are discarded and same tracestate will be returned. """ if key not in self._dict: _logger.warning("The provided key %s doesn't exist.", key) return self prev_state = self._dict.copy() prev_state.pop(key) new_state = list(prev_state.items()) return TraceState(new_state) def to_header(self) -> str: """Creates a w3c tracestate header from a TraceState. Returns: A string that adheres to the w3c tracestate header format. """ return ",".join(key + "=" + value for key, value in self._dict.items()) @classmethod def from_header(cls, header_list: typing.List[str]) -> "TraceState": """Parses one or more w3c tracestate header into a TraceState. Args: header_list: one or more w3c tracestate headers. Returns: A valid TraceState that contains values extracted from the tracestate header. If the format of one headers is illegal, all values will be discarded and an empty tracestate will be returned. If the number of keys is beyond the maximum, all values will be discarded and an empty tracestate will be returned. """ pairs = {} # type: dict[str, str] for header in header_list: members: typing.List[str] = re.split(_delimiter_pattern, header) for member in members: # empty members are valid, but no need to process further. if not member: continue match = _member_pattern.fullmatch(member) if not match: _logger.warning( "Member doesn't match the w3c identifiers format %s", member, ) return cls() groups: typing.Tuple[str, ...] = match.groups() key, _eq, value = groups # duplicate keys are not legal in header if key in pairs: return cls() pairs[key] = value return cls(list(pairs.items())) @classmethod def get_default(cls) -> "TraceState": return cls() def keys(self) -> typing.KeysView[str]: return self._dict.keys() def items(self) -> typing.ItemsView[str, str]: return self._dict.items() def values(self) -> typing.ValuesView[str]: return self._dict.values() DEFAULT_TRACE_STATE = TraceState.get_default() _TRACE_ID_MAX_VALUE = 2**128 - 1 _SPAN_ID_MAX_VALUE = 2**64 - 1 class SpanContext( typing.Tuple[int, int, bool, "TraceFlags", "TraceState", bool] ): """The state of a Span to propagate between processes. This class includes the immutable attributes of a :class:`.Span` that must be propagated to a span's children and across process boundaries. Args: trace_id: The ID of the trace that this span belongs to. span_id: This span's ID. is_remote: True if propagated from a remote parent. trace_flags: Trace options to propagate. trace_state: Tracing-system-specific info to propagate. """ def __new__( cls, trace_id: int, span_id: int, is_remote: bool, trace_flags: typing.Optional["TraceFlags"] = DEFAULT_TRACE_OPTIONS, trace_state: typing.Optional["TraceState"] = DEFAULT_TRACE_STATE, ) -> "SpanContext": if trace_flags is None: trace_flags = DEFAULT_TRACE_OPTIONS if trace_state is None: trace_state = DEFAULT_TRACE_STATE is_valid = ( INVALID_TRACE_ID < trace_id <= _TRACE_ID_MAX_VALUE and INVALID_SPAN_ID < span_id <= _SPAN_ID_MAX_VALUE ) return tuple.__new__( cls, (trace_id, span_id, is_remote, trace_flags, trace_state, is_valid), ) def __getnewargs__( self, ) -> typing.Tuple[int, int, bool, "TraceFlags", "TraceState"]: return ( self.trace_id, self.span_id, self.is_remote, self.trace_flags, self.trace_state, ) @property def trace_id(self) -> int: return self[0] # pylint: disable=unsubscriptable-object @property def span_id(self) -> int: return self[1] # pylint: disable=unsubscriptable-object @property def is_remote(self) -> bool: return self[2] # pylint: disable=unsubscriptable-object @property def trace_flags(self) -> "TraceFlags": return self[3] # pylint: disable=unsubscriptable-object @property def trace_state(self) -> "TraceState": return self[4] # pylint: disable=unsubscriptable-object @property def is_valid(self) -> bool: return self[5] # pylint: disable=unsubscriptable-object def __setattr__(self, *args: str) -> None: _logger.debug( "Immutable type, ignoring call to set attribute", stack_info=True ) def __delattr__(self, *args: str) -> None: _logger.debug( "Immutable type, ignoring call to set attribute", stack_info=True ) def __repr__(self) -> str: return f"{type(self).__name__}(trace_id=0x{format_trace_id(self.trace_id)}, span_id=0x{format_span_id(self.span_id)}, trace_flags=0x{self.trace_flags:02x}, trace_state={self.trace_state!r}, is_remote={self.is_remote})" class NonRecordingSpan(Span): """The Span that is used when no Span implementation is available. All operations are no-op except context propagation. """ def __init__(self, context: "SpanContext") -> None: self._context = context def get_span_context(self) -> "SpanContext": return self._context def is_recording(self) -> bool: return False def end(self, end_time: typing.Optional[int] = None) -> None: pass def set_attributes( self, attributes: typing.Mapping[str, types.AttributeValue] ) -> None: pass def set_attribute(self, key: str, value: types.AttributeValue) -> None: pass def add_event( self, name: str, attributes: types.Attributes = None, timestamp: typing.Optional[int] = None, ) -> None: pass def add_link( self, context: "SpanContext", attributes: types.Attributes = None, ) -> None: pass def update_name(self, name: str) -> None: pass def set_status( self, status: typing.Union[Status, StatusCode], description: typing.Optional[str] = None, ) -> None: pass def record_exception( self, exception: BaseException, attributes: types.Attributes = None, timestamp: typing.Optional[int] = None, escaped: bool = False, ) -> None: pass def __repr__(self) -> str: return f"NonRecordingSpan({self._context!r})" INVALID_SPAN_ID = 0x0000000000000000 INVALID_TRACE_ID = 0x00000000000000000000000000000000 INVALID_SPAN_CONTEXT = SpanContext( trace_id=INVALID_TRACE_ID, span_id=INVALID_SPAN_ID, is_remote=False, trace_flags=DEFAULT_TRACE_OPTIONS, trace_state=DEFAULT_TRACE_STATE, ) INVALID_SPAN = NonRecordingSpan(INVALID_SPAN_CONTEXT) def format_trace_id(trace_id: int) -> str: """Convenience trace ID formatting method Args: trace_id: Trace ID int Returns: The trace ID (16 bytes) cast to a 32-character hexadecimal string """ return format(trace_id, "032x") def format_span_id(span_id: int) -> str: """Convenience span ID formatting method Args: span_id: Span ID int Returns: The span ID (8 bytes) cast to a 16-character hexadecimal string """ return format(span_id, "016x") python-opentelemetry-1.39.1/opentelemetry-api/src/opentelemetry/trace/status.py000066400000000000000000000047531511654350100301530ustar00rootroot00000000000000# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import enum import logging import typing logger = logging.getLogger(__name__) class StatusCode(enum.Enum): """Represents the canonical set of status codes of a finished Span.""" UNSET = 0 """The default status.""" OK = 1 """The operation has been validated by an Application developer or Operator to have completed successfully.""" ERROR = 2 """The operation contains an error.""" class Status: """Represents the status of a finished Span. Args: status_code: The canonical status code that describes the result status of the operation. description: An optional description of the status. """ def __init__( self, status_code: StatusCode = StatusCode.UNSET, description: typing.Optional[str] = None, ): self._status_code = status_code self._description = None if description: if not isinstance(description, str): logger.warning("Invalid status description type, expected str") return if status_code is not StatusCode.ERROR: logger.warning( "description should only be set when status_code is set to StatusCode.ERROR" ) return self._description = description @property def status_code(self) -> StatusCode: """Represents the canonical status code of a finished Span.""" return self._status_code @property def description(self) -> typing.Optional[str]: """Status description""" return self._description @property def is_ok(self) -> bool: """Returns false if this represents an error, true otherwise.""" return self.is_unset or self._status_code is StatusCode.OK @property def is_unset(self) -> bool: """Returns true if unset, false otherwise.""" return self._status_code is StatusCode.UNSET python-opentelemetry-1.39.1/opentelemetry-api/src/opentelemetry/util/000077500000000000000000000000001511654350100261245ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-api/src/opentelemetry/util/_decorator.py000066400000000000000000000066201511654350100306230ustar00rootroot00000000000000# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import contextlib import functools import inspect from typing import TYPE_CHECKING, Callable, Generic, Iterator, TypeVar V = TypeVar("V") R = TypeVar("R") # Return type Pargs = TypeVar("Pargs") # Generic type for arguments Pkwargs = TypeVar("Pkwargs") # Generic type for arguments # We don't actually depend on typing_extensions but we can use it in CI with this conditional # import. ParamSpec can be imported directly from typing after python 3.9 is dropped # https://peps.python.org/pep-0612/. if TYPE_CHECKING: from typing_extensions import ParamSpec P = ParamSpec("P") # Generic type for all arguments class _AgnosticContextManager( contextlib._GeneratorContextManager[R], Generic[R], ): # pylint: disable=protected-access """Context manager that can decorate both async and sync functions. This is an overridden version of the contextlib._GeneratorContextManager class that will decorate async functions with an async context manager to end the span AFTER the entire async function coroutine finishes. Else it will report near zero spans durations for async functions. We are overriding the contextlib._GeneratorContextManager class as reimplementing it is a lot of code to maintain and this class (even if it's marked as protected) doesn't seems like to be evolving a lot. For more information, see: https://github.com/open-telemetry/opentelemetry-python/pull/3633 """ def __enter__(self) -> R: """Reimplementing __enter__ to avoid the type error. The original __enter__ method returns Any type, but we want to return R. """ del self.args, self.kwds, self.func # type: ignore try: return next(self.gen) # type: ignore except StopIteration: raise RuntimeError("generator didn't yield") from None def __call__(self, func: V) -> V: # pyright: ignore [reportIncompatibleMethodOverride] if inspect.iscoroutinefunction(func): @functools.wraps(func) # type: ignore async def async_wrapper(*args: Pargs, **kwargs: Pkwargs) -> R: # pyright: ignore [reportInvalidTypeVarUse] with self._recreate_cm(): # type: ignore return await func(*args, **kwargs) # type: ignore return async_wrapper # type: ignore return super().__call__(func) # type: ignore def _agnosticcontextmanager( func: "Callable[P, Iterator[R]]", ) -> "Callable[P, _AgnosticContextManager[R]]": @functools.wraps(func) def helper(*args: Pargs, **kwargs: Pkwargs) -> _AgnosticContextManager[R]: # pyright: ignore [reportInvalidTypeVarUse] return _AgnosticContextManager(func, args, kwargs) # pyright: ignore [reportArgumentType] # Ignoring the type to keep the original signature of the function return helper # type: ignore[return-value] python-opentelemetry-1.39.1/opentelemetry-api/src/opentelemetry/util/_importlib_metadata.py000066400000000000000000000030131511654350100324730ustar00rootroot00000000000000# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from functools import cache # FIXME: Use importlib.metadata (not importlib_metadata) # when support for 3.11 is dropped if the rest of # the supported versions at that time have the same API. from importlib_metadata import ( # type: ignore Distribution, EntryPoint, EntryPoints, PackageNotFoundError, distributions, requires, version, ) from importlib_metadata import ( entry_points as original_entry_points, ) @cache def _original_entry_points_cached(): return original_entry_points() def entry_points(**params): """Replacement for importlib_metadata.entry_points that caches getting all the entry points. That part can be very slow, and OTel uses this function many times.""" return _original_entry_points_cached().select(**params) __all__ = [ "entry_points", "version", "EntryPoint", "EntryPoints", "requires", "Distribution", "distributions", "PackageNotFoundError", ] python-opentelemetry-1.39.1/opentelemetry-api/src/opentelemetry/util/_once.py000066400000000000000000000026401511654350100275630ustar00rootroot00000000000000# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from threading import Lock from typing import Callable class Once: """Execute a function exactly once and block all callers until the function returns Same as golang's `sync.Once `_ """ def __init__(self) -> None: self._lock = Lock() self._done = False def do_once(self, func: Callable[[], None]) -> bool: """Execute ``func`` if it hasn't been executed or return. Will block until ``func`` has been called by one thread. Returns: Whether or not ``func`` was executed in this call """ # fast path, try to avoid locking if self._done: return False with self._lock: if not self._done: func() self._done = True return True return False python-opentelemetry-1.39.1/opentelemetry-api/src/opentelemetry/util/_providers.py000066400000000000000000000033011511654350100306470ustar00rootroot00000000000000# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from logging import getLogger from os import environ from typing import TYPE_CHECKING, TypeVar, cast from opentelemetry.util._importlib_metadata import entry_points if TYPE_CHECKING: from opentelemetry.metrics import MeterProvider from opentelemetry.trace import TracerProvider Provider = TypeVar("Provider", "TracerProvider", "MeterProvider") logger = getLogger(__name__) def _load_provider( provider_environment_variable: str, provider: str ) -> Provider: # type: ignore[type-var] try: provider_name = cast( str, environ.get(provider_environment_variable, f"default_{provider}"), ) return cast( Provider, next( # type: ignore iter( # type: ignore entry_points( # type: ignore group=f"opentelemetry_{provider}", name=provider_name, ) ) ).load()(), ) except Exception: # pylint: disable=broad-exception-caught logger.exception("Failed to load configured provider %s", provider) raise python-opentelemetry-1.39.1/opentelemetry-api/src/opentelemetry/util/py.typed000066400000000000000000000000001511654350100276110ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-api/src/opentelemetry/util/re.py000066400000000000000000000111311511654350100271010ustar00rootroot00000000000000# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from logging import getLogger from re import compile, split from typing import Dict, List, Mapping from urllib.parse import unquote from typing_extensions import deprecated _logger = getLogger(__name__) # The following regexes reference this spec: https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/protocol/exporter.md#specifying-headers-via-environment-variables # Optional whitespace _OWS = r"[ \t]*" # A key contains printable US-ASCII characters except: SP and "(),/:;<=>?@[\]{} _KEY_FORMAT = ( r"[\x21\x23-\x27\x2a\x2b\x2d\x2e\x30-\x39\x41-\x5a\x5e-\x7a\x7c\x7e]+" ) # A value contains a URL-encoded UTF-8 string. The encoded form can contain any # printable US-ASCII characters (0x20-0x7f) other than SP, DEL, and ",;/ _VALUE_FORMAT = r"[\x21\x23-\x2b\x2d-\x3a\x3c-\x5b\x5d-\x7e]*" # Like above with SP included _LIBERAL_VALUE_FORMAT = r"[\x20\x21\x23-\x2b\x2d-\x3a\x3c-\x5b\x5d-\x7e]*" # A key-value is key=value, with optional whitespace surrounding key and value _KEY_VALUE_FORMAT = rf"{_OWS}{_KEY_FORMAT}{_OWS}={_OWS}{_VALUE_FORMAT}{_OWS}" _HEADER_PATTERN = compile(_KEY_VALUE_FORMAT) _LIBERAL_HEADER_PATTERN = compile( rf"{_OWS}{_KEY_FORMAT}{_OWS}={_OWS}{_LIBERAL_VALUE_FORMAT}{_OWS}" ) _DELIMITER_PATTERN = compile(r"[ \t]*,[ \t]*") _BAGGAGE_PROPERTY_FORMAT = rf"{_KEY_VALUE_FORMAT}|{_OWS}{_KEY_FORMAT}{_OWS}" _INVALID_HEADER_ERROR_MESSAGE_STRICT_TEMPLATE = ( "Header format invalid! Header values in environment variables must be " "URL encoded per the OpenTelemetry Protocol Exporter specification: %s" ) _INVALID_HEADER_ERROR_MESSAGE_LIBERAL_TEMPLATE = ( "Header format invalid! Header values in environment variables must be " "URL encoded per the OpenTelemetry Protocol Exporter specification or " "a comma separated list of name=value occurrences: %s" ) # pylint: disable=invalid-name @deprecated( "You should use parse_env_headers. Deprecated since version 1.15.0." ) def parse_headers(s: str) -> Mapping[str, str]: return parse_env_headers(s) def parse_env_headers(s: str, liberal: bool = False) -> Mapping[str, str]: """ Parse ``s``, which is a ``str`` instance containing HTTP headers encoded for use in ENV variables per the W3C Baggage HTTP header format at https://www.w3.org/TR/baggage/#baggage-http-header-format, except that additional semi-colon delimited metadata is not supported. If ``liberal`` is True we try to parse ``s`` anyway to be more compatible with other languages SDKs that accept non URL-encoded headers by default. """ headers: Dict[str, str] = {} headers_list: List[str] = split(_DELIMITER_PATTERN, s) for header in headers_list: if not header: # empty string continue header_match = _HEADER_PATTERN.fullmatch(header.strip()) if not header_match and not liberal: _logger.warning( _INVALID_HEADER_ERROR_MESSAGE_STRICT_TEMPLATE, header ) continue if header_match: match_string: str = header_match.string # value may contain any number of `=` name, value = match_string.split("=", 1) name = unquote(name).strip().lower() value = unquote(value).strip() headers[name] = value else: # this is not url-encoded and does not match the spec but we decided to be # liberal in what we accept to match other languages SDKs behaviour liberal_header_match = _LIBERAL_HEADER_PATTERN.fullmatch( header.strip() ) if not liberal_header_match: _logger.warning( _INVALID_HEADER_ERROR_MESSAGE_LIBERAL_TEMPLATE, header ) continue liberal_match_string: str = liberal_header_match.string # value may contain any number of `=` name, value = liberal_match_string.split("=", 1) name = name.strip().lower() value = value.strip() headers[name] = value return headers python-opentelemetry-1.39.1/opentelemetry-api/src/opentelemetry/util/types.py000066400000000000000000000031501511654350100276410ustar00rootroot00000000000000# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Mapping, Optional, Sequence, Tuple, Union # This is the implementation of the "Any" type as specified by the specifications of OpenTelemetry data model for logs. # For more details, refer to the OTel specification: # https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/logs/data-model.md#type-any AnyValue = Union[ str, bool, int, float, bytes, Sequence["AnyValue"], Mapping[str, "AnyValue"], None, ] AttributeValue = Union[ str, bool, int, float, Sequence[str], Sequence[bool], Sequence[int], Sequence[float], ] Attributes = Optional[Mapping[str, AttributeValue]] AttributesAsKey = Tuple[ Tuple[ str, Union[ str, bool, int, float, Tuple[Optional[str], ...], Tuple[Optional[bool], ...], Tuple[Optional[int], ...], Tuple[Optional[float], ...], ], ], ..., ] _ExtendedAttributes = Mapping[str, "AnyValue"] python-opentelemetry-1.39.1/opentelemetry-api/src/opentelemetry/version/000077500000000000000000000000001511654350100266345ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-api/src/opentelemetry/version/__init__.py000066400000000000000000000011401511654350100307410ustar00rootroot00000000000000# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. __version__ = "1.39.1" python-opentelemetry-1.39.1/opentelemetry-api/src/opentelemetry/version/py.typed000066400000000000000000000000001511654350100303210ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-api/test-requirements.txt000066400000000000000000000005711511654350100257300ustar00rootroot00000000000000asgiref==3.7.2 importlib-metadata==8.5.0 ; python_version < "3.9" importlib-metadata==8.7.0 ; python_version >= "3.9" iniconfig==2.0.0 packaging==24.0 pluggy==1.5.0 py-cpuinfo==9.0.0 pytest==7.4.4 tomli==2.0.1 typing_extensions==4.10.0 wrapt==1.16.0 zipp==3.20.2 -e opentelemetry-sdk -e opentelemetry-semantic-conventions -e tests/opentelemetry-test-utils -e opentelemetry-api python-opentelemetry-1.39.1/opentelemetry-api/tests/000077500000000000000000000000001511654350100226265ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-api/tests/__init__.py000066400000000000000000000011101511654350100247300ustar00rootroot00000000000000# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. python-opentelemetry-1.39.1/opentelemetry-api/tests/attributes/000077500000000000000000000000001511654350100250145ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-api/tests/attributes/test_attributes.py000066400000000000000000000246331511654350100306230ustar00rootroot00000000000000# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # type: ignore import unittest from typing import MutableSequence from opentelemetry.attributes import ( BoundedAttributes, _clean_attribute, _clean_extended_attribute, ) class TestAttributes(unittest.TestCase): # pylint: disable=invalid-name def assertValid(self, value, key="k"): expected = value if isinstance(value, MutableSequence): expected = tuple(value) self.assertEqual(_clean_attribute(key, value, None), expected) def assertInvalid(self, value, key="k"): self.assertIsNone(_clean_attribute(key, value, None)) def test_attribute_key_validation(self): # only non-empty strings are valid keys self.assertInvalid(1, "") self.assertInvalid(1, 1) self.assertInvalid(1, {}) self.assertInvalid(1, []) self.assertInvalid(1, b"1") self.assertValid(1, "k") self.assertValid(1, "1") def test_clean_attribute(self): self.assertInvalid([1, 2, 3.4, "ss", 4]) self.assertInvalid([{}, 1, 2, 3.4, 4]) self.assertInvalid(["sw", "lf", 3.4, "ss"]) self.assertInvalid([1, 2, 3.4, 5]) self.assertInvalid({}) self.assertInvalid([1, True]) self.assertValid(True) self.assertValid("hi") self.assertValid(3.4) self.assertValid(15) self.assertValid([1, 2, 3, 5]) self.assertValid([1.2, 2.3, 3.4, 4.5]) self.assertValid([True, False]) self.assertValid(["ss", "dw", "fw"]) self.assertValid([]) # None in sequences are valid self.assertValid(["A", None, None]) self.assertValid(["A", None, None, "B"]) self.assertValid([None, None]) self.assertInvalid(["A", None, 1]) self.assertInvalid([None, "A", None, 1]) # test keys self.assertValid("value", "key") self.assertInvalid("value", "") self.assertInvalid("value", None) def test_sequence_attr_decode(self): seq = [ None, b"Content-Disposition", b"Content-Type", b"\x81", b"Keep-Alive", ] expected = [ None, "Content-Disposition", "Content-Type", None, "Keep-Alive", ] self.assertEqual( _clean_attribute("headers", seq, None), tuple(expected) ) class TestExtendedAttributes(unittest.TestCase): # pylint: disable=invalid-name def assertValid(self, value, key="k"): expected = value if isinstance(value, MutableSequence): expected = tuple(value) self.assertEqual(_clean_extended_attribute(key, value, None), expected) def assertInvalid(self, value, key="k"): self.assertIsNone(_clean_extended_attribute(key, value, None)) def test_attribute_key_validation(self): # only non-empty strings are valid keys self.assertInvalid(1, "") self.assertInvalid(1, 1) self.assertInvalid(1, {}) self.assertInvalid(1, []) self.assertInvalid(1, b"1") self.assertValid(1, "k") self.assertValid(1, "1") def test_clean_extended_attribute(self): self.assertInvalid([1, 2, 3.4, "ss", 4]) self.assertInvalid([{}, 1, 2, 3.4, 4]) self.assertInvalid(["sw", "lf", 3.4, "ss"]) self.assertInvalid([1, 2, 3.4, 5]) self.assertInvalid([1, True]) self.assertValid(None) self.assertValid(True) self.assertValid("hi") self.assertValid(3.4) self.assertValid(15) self.assertValid([1, 2, 3, 5]) self.assertValid([1.2, 2.3, 3.4, 4.5]) self.assertValid([True, False]) self.assertValid(["ss", "dw", "fw"]) self.assertValid([]) # None in sequences are valid self.assertValid(["A", None, None]) self.assertValid(["A", None, None, "B"]) self.assertValid([None, None]) self.assertInvalid(["A", None, 1]) self.assertInvalid([None, "A", None, 1]) # mappings self.assertValid({}) self.assertValid({"k": "v"}) # mappings in sequences self.assertValid([{"k": "v"}]) # test keys self.assertValid("value", "key") self.assertInvalid("value", "") self.assertInvalid("value", None) def test_sequence_attr_decode(self): seq = [ None, b"Content-Disposition", b"Content-Type", b"\x81", b"Keep-Alive", ] self.assertEqual( _clean_extended_attribute("headers", seq, None), tuple(seq) ) def test_mapping(self): mapping = { "": "invalid", b"bytes": "invalid", "none": {"": "invalid"}, "valid_primitive": "str", "valid_sequence": ["str"], "invalid_sequence": ["str", 1], "valid_mapping": {"str": 1}, "invalid_mapping": {"": 1}, } expected = { "none": {}, "valid_primitive": "str", "valid_sequence": ("str",), "invalid_sequence": None, "valid_mapping": {"str": 1}, "invalid_mapping": {}, } self.assertEqual( _clean_extended_attribute("headers", mapping, None), expected ) class TestBoundedAttributes(unittest.TestCase): # pylint: disable=consider-using-dict-items base = { "name": "Firulais", "age": 7, "weight": 13, "vaccinated": True, } def test_negative_maxlen(self): with self.assertRaises(ValueError): BoundedAttributes(-1) def test_from_map(self): dic_len = len(self.base) base_copy = self.base.copy() bdict = BoundedAttributes(dic_len, base_copy) self.assertEqual(len(bdict), dic_len) # modify base_copy and test that bdict is not changed base_copy["name"] = "Bruno" base_copy["age"] = 3 for key in self.base: self.assertEqual(bdict[key], self.base[key]) # test that iter yields the correct number of elements self.assertEqual(len(tuple(bdict)), dic_len) # map too big half_len = dic_len // 2 bdict = BoundedAttributes(half_len, self.base) self.assertEqual(len(tuple(bdict)), half_len) self.assertEqual(bdict.dropped, dic_len - half_len) def test_bounded_dict(self): # create empty dict dic_len = len(self.base) bdict = BoundedAttributes(dic_len, immutable=False) self.assertEqual(len(bdict), 0) # fill dict for key in self.base: bdict[key] = self.base[key] self.assertEqual(len(bdict), dic_len) self.assertEqual(bdict.dropped, 0) for key in self.base: self.assertEqual(bdict[key], self.base[key]) # test __iter__ in BoundedAttributes for key in bdict: self.assertEqual(bdict[key], self.base[key]) # updating an existing element should not drop bdict["name"] = "Bruno" self.assertEqual(bdict.dropped, 0) # try to append more elements for key in self.base: bdict["new-" + key] = self.base[key] self.assertEqual(len(bdict), dic_len) self.assertEqual(bdict.dropped, dic_len) # Invalid values shouldn't be considered for `dropped` bdict["invalid-seq"] = [None, 1, "2"] self.assertEqual(bdict.dropped, dic_len) # test that elements in the dict are the new ones for key in self.base: self.assertEqual(bdict["new-" + key], self.base[key]) # delete an element del bdict["new-name"] self.assertEqual(len(bdict), dic_len - 1) with self.assertRaises(KeyError): _ = bdict["new-name"] def test_no_limit_code(self): bdict = BoundedAttributes(maxlen=None, immutable=False) for num in range(100): bdict[str(num)] = num for num in range(100): self.assertEqual(bdict[str(num)], num) def test_immutable(self): bdict = BoundedAttributes() with self.assertRaises(TypeError): bdict["should-not-work"] = "dict immutable" def test_locking(self): """Supporting test case for a commit titled: Fix class BoundedAttributes to have RLock rather than Lock. See #3858. The change was introduced because __iter__ of the class BoundedAttributes holds lock, and we observed some deadlock symptoms in the codebase. This test case is to verify that the fix works as expected. """ bdict = BoundedAttributes(immutable=False) with bdict._lock: # pylint: disable=protected-access for num in range(100): bdict[str(num)] = num for num in range(100): self.assertEqual(bdict[str(num)], num) # pylint: disable=no-self-use def test_extended_attributes(self): bdict = BoundedAttributes(extended_attributes=True, immutable=False) with unittest.mock.patch( "opentelemetry.attributes._clean_extended_attribute", return_value="mock_value", ) as clean_extended_attribute_mock: bdict["key"] = "value" clean_extended_attribute_mock.assert_called_once() def test_wsgi_request_conversion_to_string(self): """Test that WSGI request objects are converted to strings when _clean_extended_attribute is called.""" class DummyWSGIRequest: def __str__(self): return "" wsgi_request = DummyWSGIRequest() cleaned_value = _clean_extended_attribute( "request", wsgi_request, None ) # Verify we get a string back from the cleaner self.assertIsInstance(cleaned_value, str) self.assertEqual( "", cleaned_value ) python-opentelemetry-1.39.1/opentelemetry-api/tests/baggage/000077500000000000000000000000001511654350100242035ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-api/tests/baggage/propagation/000077500000000000000000000000001511654350100265265ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-api/tests/baggage/propagation/test_propagation.py000066400000000000000000000024311511654350100324620ustar00rootroot00000000000000# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # type: ignore from unittest import TestCase from opentelemetry.baggage import get_baggage, set_baggage from opentelemetry.baggage.propagation import W3CBaggagePropagator class TestBaggageManager(TestCase): def test_propagate_baggage(self): carrier = {} propagator = W3CBaggagePropagator() ctx = set_baggage("Test1", "value1") ctx = set_baggage("test2", "value2", context=ctx) propagator.inject(carrier, ctx) ctx_propagated = propagator.extract(carrier) self.assertEqual( get_baggage("Test1", context=ctx_propagated), "value1" ) self.assertEqual( get_baggage("test2", context=ctx_propagated), "value2" ) python-opentelemetry-1.39.1/opentelemetry-api/tests/baggage/test_baggage.py000066400000000000000000000055401511654350100271750ustar00rootroot00000000000000# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # type: ignore from unittest import TestCase from opentelemetry.baggage import ( _is_valid_value, clear, get_all, get_baggage, remove_baggage, set_baggage, ) from opentelemetry.context import attach, detach class TestBaggageManager(TestCase): def test_set_baggage(self): self.assertEqual({}, get_all()) ctx = set_baggage("test", "value") self.assertEqual(get_baggage("test", context=ctx), "value") ctx = set_baggage("test", "value2", context=ctx) self.assertEqual(get_baggage("test", context=ctx), "value2") def test_baggages_current_context(self): token = attach(set_baggage("test", "value")) self.assertEqual(get_baggage("test"), "value") detach(token) self.assertEqual(get_baggage("test"), None) def test_set_multiple_baggage_entries(self): ctx = set_baggage("test", "value") ctx = set_baggage("test2", "value2", context=ctx) self.assertEqual(get_baggage("test", context=ctx), "value") self.assertEqual(get_baggage("test2", context=ctx), "value2") self.assertEqual( get_all(context=ctx), {"test": "value", "test2": "value2"}, ) def test_modifying_baggage(self): ctx = set_baggage("test", "value") self.assertEqual(get_baggage("test", context=ctx), "value") baggage_entries = get_all(context=ctx) with self.assertRaises(TypeError): baggage_entries["test"] = "mess-this-up" self.assertEqual(get_baggage("test", context=ctx), "value") def test_remove_baggage_entry(self): self.assertEqual({}, get_all()) ctx = set_baggage("test", "value") ctx = set_baggage("test2", "value2", context=ctx) ctx = remove_baggage("test", context=ctx) self.assertEqual(get_baggage("test", context=ctx), None) self.assertEqual(get_baggage("test2", context=ctx), "value2") def test_clear_baggage(self): self.assertEqual({}, get_all()) ctx = set_baggage("test", "value") self.assertEqual(get_baggage("test", context=ctx), "value") ctx = clear(context=ctx) self.assertEqual(get_all(context=ctx), {}) def test__is_valid_value(self): self.assertTrue(_is_valid_value("GET%20%2Fapi%2F%2Freport")) python-opentelemetry-1.39.1/opentelemetry-api/tests/context/000077500000000000000000000000001511654350100243125ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-api/tests/context/__init__.py000066400000000000000000000000001511654350100264110ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-api/tests/context/base_context.py000066400000000000000000000055761511654350100273570ustar00rootroot00000000000000# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from logging import ERROR from opentelemetry import context def do_work() -> None: context.attach(context.set_value("say", "bar")) class ContextTestCases: class BaseTest(unittest.TestCase): def setUp(self) -> None: self.previous_context = context.get_current() def tearDown(self) -> None: context.attach(self.previous_context) def test_context(self): self.assertIsNone(context.get_value("say")) empty = context.get_current() second = context.set_value("say", "foo") self.assertEqual(context.get_value("say", context=second), "foo") do_work() self.assertEqual(context.get_value("say"), "bar") third = context.get_current() self.assertIsNone(context.get_value("say", context=empty)) self.assertEqual(context.get_value("say", context=second), "foo") self.assertEqual(context.get_value("say", context=third), "bar") def test_set_value(self): first = context.set_value("a", "yyy") second = context.set_value("a", "zzz") third = context.set_value("a", "---", first) self.assertEqual("yyy", context.get_value("a", context=first)) self.assertEqual("zzz", context.get_value("a", context=second)) self.assertEqual("---", context.get_value("a", context=third)) self.assertEqual(None, context.get_value("a")) def test_attach(self): context.attach(context.set_value("a", "yyy")) token = context.attach(context.set_value("a", "zzz")) self.assertEqual("zzz", context.get_value("a")) context.detach(token) self.assertEqual("yyy", context.get_value("a")) with self.assertLogs(level=ERROR): context.detach(token) def test_detach_out_of_order(self): t1 = context.attach(context.set_value("c", 1)) self.assertEqual(context.get_current(), {"c": 1}) t2 = context.attach(context.set_value("c", 2)) self.assertEqual(context.get_current(), {"c": 2}) context.detach(t1) self.assertEqual(context.get_current(), {}) context.detach(t2) self.assertEqual(context.get_current(), {"c": 1}) python-opentelemetry-1.39.1/opentelemetry-api/tests/context/propagation/000077500000000000000000000000001511654350100266355ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-api/tests/context/propagation/__init__.py000066400000000000000000000000001511654350100307340ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-api/tests/context/test_context.py000066400000000000000000000073111511654350100274110ustar00rootroot00000000000000# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from unittest.mock import patch from opentelemetry import context from opentelemetry.context.context import Context from opentelemetry.context.contextvars_context import ContextVarsRuntimeContext from opentelemetry.environment_variables import OTEL_PYTHON_CONTEXT def _do_work() -> str: key = context.create_key("say") context.attach(context.set_value(key, "bar")) return key class TestContext(unittest.TestCase): def setUp(self): context.attach(Context()) def test_context_key(self): key1 = context.create_key("say") key2 = context.create_key("say") self.assertNotEqual(key1, key2) first = context.set_value(key1, "foo") second = context.set_value(key2, "bar") self.assertEqual(context.get_value(key1, context=first), "foo") self.assertEqual(context.get_value(key2, context=second), "bar") def test_context(self): key1 = context.create_key("say") self.assertIsNone(context.get_value(key1)) empty = context.get_current() second = context.set_value(key1, "foo") self.assertEqual(context.get_value(key1, context=second), "foo") key2 = _do_work() self.assertEqual(context.get_value(key2), "bar") third = context.get_current() self.assertIsNone(context.get_value(key1, context=empty)) self.assertEqual(context.get_value(key1, context=second), "foo") self.assertEqual(context.get_value(key2, context=third), "bar") def test_set_value(self): first = context.set_value("a", "yyy") second = context.set_value("a", "zzz") third = context.set_value("a", "---", first) self.assertEqual("yyy", context.get_value("a", context=first)) self.assertEqual("zzz", context.get_value("a", context=second)) self.assertEqual("---", context.get_value("a", context=third)) self.assertEqual(None, context.get_value("a")) def test_context_is_immutable(self): with self.assertRaises(ValueError): # ensure a context context.get_current()["test"] = "cant-change-immutable" def test_set_current(self): context.attach(context.set_value("a", "yyy")) token = context.attach(context.set_value("a", "zzz")) self.assertEqual("zzz", context.get_value("a")) context.detach(token) self.assertEqual("yyy", context.get_value("a")) class TestInitContext(unittest.TestCase): def test_load_runtime_context_default(self): ctx = context._load_runtime_context() # pylint: disable=W0212 self.assertIsInstance(ctx, ContextVarsRuntimeContext) @patch.dict("os.environ", {OTEL_PYTHON_CONTEXT: "contextvars_context"}) def test_load_runtime_context(self): # type: ignore[misc] ctx = context._load_runtime_context() # pylint: disable=W0212 self.assertIsInstance(ctx, ContextVarsRuntimeContext) @patch.dict("os.environ", {OTEL_PYTHON_CONTEXT: "foo"}) def test_load_runtime_context_fallback(self): # type: ignore[misc] ctx = context._load_runtime_context() # pylint: disable=W0212 self.assertIsInstance(ctx, ContextVarsRuntimeContext) python-opentelemetry-1.39.1/opentelemetry-api/tests/context/test_contextvars_context.py000066400000000000000000000024151511654350100320510ustar00rootroot00000000000000# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from unittest.mock import patch from opentelemetry import context from opentelemetry.context.contextvars_context import ContextVarsRuntimeContext # pylint: disable=import-error,no-name-in-module from tests.context.base_context import ContextTestCases class TestContextVarsContext(ContextTestCases.BaseTest): # pylint: disable=invalid-name def setUp(self) -> None: super().setUp() self.mock_runtime = patch.object( context, "_RUNTIME_CONTEXT", ContextVarsRuntimeContext(), ) self.mock_runtime.start() # pylint: disable=invalid-name def tearDown(self) -> None: super().tearDown() self.mock_runtime.stop() python-opentelemetry-1.39.1/opentelemetry-api/tests/distributedcontext/000077500000000000000000000000001511654350100265555ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-api/tests/distributedcontext/__init__.py000066400000000000000000000011101511654350100306570ustar00rootroot00000000000000# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. python-opentelemetry-1.39.1/opentelemetry-api/tests/events/000077500000000000000000000000001511654350100241325ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-api/tests/events/test_event.py000066400000000000000000000013601511654350100266640ustar00rootroot00000000000000import unittest from opentelemetry._events import Event class TestEvent(unittest.TestCase): def test_event(self): event = Event("example", 123, attributes={"key": "value"}) self.assertEqual(event.name, "example") self.assertEqual(event.timestamp, 123) self.assertEqual( event.attributes, {"key": "value", "event.name": "example"} ) def test_event_name_copied_in_attributes(self): event = Event("name", 123) self.assertEqual(event.attributes, {"event.name": "name"}) def test_event_name_has_precedence_over_attributes(self): event = Event("name", 123, attributes={"event.name": "attr value"}) self.assertEqual(event.attributes, {"event.name": "name"}) python-opentelemetry-1.39.1/opentelemetry-api/tests/events/test_event_logger_provider.py000066400000000000000000000040531511654350100321370ustar00rootroot00000000000000# type:ignore import unittest from unittest.mock import Mock, patch import opentelemetry._events as events from opentelemetry._events import ( get_event_logger_provider, set_event_logger_provider, ) from opentelemetry.test.globals_test import EventsGlobalsTest class TestGlobals(EventsGlobalsTest, unittest.TestCase): @patch("opentelemetry._events._logger") def test_set_event_logger_provider(self, logger_mock): elp_mock = Mock() # pylint: disable=protected-access self.assertIsNone(events._EVENT_LOGGER_PROVIDER) set_event_logger_provider(elp_mock) self.assertIs(events._EVENT_LOGGER_PROVIDER, elp_mock) self.assertIs(get_event_logger_provider(), elp_mock) logger_mock.warning.assert_not_called() # pylint: disable=no-self-use @patch("opentelemetry._events._logger") def test_set_event_logger_provider_will_warn_second_call( self, logger_mock ): elp_mock = Mock() set_event_logger_provider(elp_mock) set_event_logger_provider(elp_mock) logger_mock.warning.assert_called_once_with( "Overriding of current EventLoggerProvider is not allowed" ) def test_get_event_logger_provider(self): # pylint: disable=protected-access self.assertIsNone(events._EVENT_LOGGER_PROVIDER) self.assertIsInstance( get_event_logger_provider(), events.ProxyEventLoggerProvider ) events._EVENT_LOGGER_PROVIDER = None with patch.dict( "os.environ", { "OTEL_PYTHON_EVENT_LOGGER_PROVIDER": "test_event_logger_provider" }, ): with patch("opentelemetry._events._load_provider", Mock()): with patch( "opentelemetry._events.cast", Mock(**{"return_value": "test_event_logger_provider"}), ): self.assertEqual( get_event_logger_provider(), "test_event_logger_provider", ) python-opentelemetry-1.39.1/opentelemetry-api/tests/events/test_proxy_event.py000066400000000000000000000032441511654350100301300ustar00rootroot00000000000000# pylint: disable=W0212,W0222,W0221 import typing import unittest import opentelemetry._events as events from opentelemetry.test.globals_test import EventsGlobalsTest from opentelemetry.util.types import _ExtendedAttributes class TestProvider(events.NoOpEventLoggerProvider): def get_event_logger( self, name: str, version: typing.Optional[str] = None, schema_url: typing.Optional[str] = None, attributes: typing.Optional[_ExtendedAttributes] = None, ) -> events.EventLogger: return LoggerTest(name) class LoggerTest(events.NoOpEventLogger): def emit(self, event: events.Event) -> None: pass class TestProxy(EventsGlobalsTest, unittest.TestCase): def test_proxy_logger(self): provider = events.get_event_logger_provider() # proxy provider self.assertIsInstance(provider, events.ProxyEventLoggerProvider) # provider returns proxy logger event_logger = provider.get_event_logger("proxy-test") self.assertIsInstance(event_logger, events.ProxyEventLogger) # set a real provider events.set_event_logger_provider(TestProvider()) # get_logger_provider() now returns the real provider self.assertIsInstance(events.get_event_logger_provider(), TestProvider) # logger provider now returns real instance self.assertIsInstance( events.get_event_logger_provider().get_event_logger("fresh"), LoggerTest, ) # references to the old provider still work but return real logger now real_logger = provider.get_event_logger("proxy-test") self.assertIsInstance(real_logger, LoggerTest) python-opentelemetry-1.39.1/opentelemetry-api/tests/logs/000077500000000000000000000000001511654350100235725ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-api/tests/logs/test_log_record.py000066400000000000000000000020111511654350100273140ustar00rootroot00000000000000# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from unittest.mock import patch from opentelemetry._logs import LogRecord OBSERVED_TIMESTAMP = "OBSERVED_TIMESTAMP" class TestLogRecord(unittest.TestCase): @patch("opentelemetry._logs._internal.time_ns") def test_log_record_observed_timestamp_default(self, time_ns_mock): # type: ignore time_ns_mock.return_value = OBSERVED_TIMESTAMP self.assertEqual(LogRecord().observed_timestamp, OBSERVED_TIMESTAMP) python-opentelemetry-1.39.1/opentelemetry-api/tests/logs/test_logger_provider.py000066400000000000000000000043311511654350100303750ustar00rootroot00000000000000# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # type:ignore import unittest from unittest.mock import Mock, patch import opentelemetry._logs._internal as logs_internal from opentelemetry._logs import get_logger_provider, set_logger_provider from opentelemetry.environment_variables import _OTEL_PYTHON_LOGGER_PROVIDER from opentelemetry.test.globals_test import reset_logging_globals class TestGlobals(unittest.TestCase): def setUp(self): super().tearDown() reset_logging_globals() def tearDown(self): super().tearDown() reset_logging_globals() def test_set_logger_provider(self): lp_mock = Mock() # pylint: disable=protected-access self.assertIsNone(logs_internal._LOGGER_PROVIDER) set_logger_provider(lp_mock) self.assertIs(logs_internal._LOGGER_PROVIDER, lp_mock) self.assertIs(get_logger_provider(), lp_mock) def test_get_logger_provider(self): # pylint: disable=protected-access self.assertIsNone(logs_internal._LOGGER_PROVIDER) self.assertIsInstance( get_logger_provider(), logs_internal.ProxyLoggerProvider ) logs_internal._LOGGER_PROVIDER = None with patch.dict( "os.environ", {_OTEL_PYTHON_LOGGER_PROVIDER: "test_logger_provider"}, ): with patch("opentelemetry._logs._internal._load_provider", Mock()): with patch( "opentelemetry._logs._internal.cast", Mock(**{"return_value": "test_logger_provider"}), ): self.assertEqual( get_logger_provider(), "test_logger_provider" ) python-opentelemetry-1.39.1/opentelemetry-api/tests/logs/test_proxy.py000066400000000000000000000047221511654350100263710ustar00rootroot00000000000000# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # pylint: disable=W0212,W0222,W0221 import typing import unittest import opentelemetry._logs._internal as _logs_internal from opentelemetry import _logs from opentelemetry.test.globals_test import LoggingGlobalsTest from opentelemetry.util.types import _ExtendedAttributes class TestProvider(_logs.NoOpLoggerProvider): def get_logger( self, name: str, version: typing.Optional[str] = None, schema_url: typing.Optional[str] = None, attributes: typing.Optional[_ExtendedAttributes] = None, ) -> _logs.Logger: return LoggerTest(name) class LoggerTest(_logs.NoOpLogger): def emit( self, record: typing.Optional[_logs.LogRecord] = None, *, timestamp=None, observed_timestamp=None, context=None, severity_number=None, severity_text=None, body=None, attributes=None, event_name=None, ) -> None: pass class TestProxy(LoggingGlobalsTest, unittest.TestCase): def test_proxy_logger(self): provider = _logs.get_logger_provider() # proxy provider self.assertIsInstance(provider, _logs_internal.ProxyLoggerProvider) # provider returns proxy logger logger = provider.get_logger("proxy-test") self.assertIsInstance(logger, _logs_internal.ProxyLogger) # set a real provider _logs.set_logger_provider(TestProvider()) # get_logger_provider() now returns the real provider self.assertIsInstance(_logs.get_logger_provider(), TestProvider) # logger provider now returns real instance self.assertIsInstance( _logs.get_logger_provider().get_logger("fresh"), LoggerTest ) # references to the old provider still work but return real logger now real_logger = provider.get_logger("proxy-test") self.assertIsInstance(real_logger, LoggerTest) python-opentelemetry-1.39.1/opentelemetry-api/tests/metrics/000077500000000000000000000000001511654350100242745ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-api/tests/metrics/test_instruments.py000066400000000000000000000571711511654350100303130ustar00rootroot00000000000000# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # type: ignore from inspect import Signature, isabstract, signature from unittest import TestCase from opentelemetry.metrics import ( Counter, Histogram, Instrument, Meter, NoOpCounter, NoOpHistogram, NoOpMeter, NoOpUpDownCounter, ObservableCounter, ObservableGauge, ObservableUpDownCounter, UpDownCounter, _Gauge, ) # FIXME Test that the instrument methods can be called concurrently safely. class ChildInstrument(Instrument): # pylint: disable=useless-parent-delegation def __init__(self, name, *args, unit="", description="", **kwargs): super().__init__( name, *args, unit=unit, description=description, **kwargs ) class TestCounter(TestCase): def test_create_counter(self): """ Test that the Counter can be created with create_counter. """ self.assertTrue( isinstance(NoOpMeter("name").create_counter("name"), Counter) ) def test_api_counter_abstract(self): """ Test that the API Counter is an abstract class. """ self.assertTrue(isabstract(Counter)) def test_create_counter_api(self): """ Test that the API for creating a counter accepts the name of the instrument. Test that the API for creating a counter accepts the unit of the instrument. Test that the API for creating a counter accepts the description of the """ create_counter_signature = signature(Meter.create_counter) self.assertIn("name", create_counter_signature.parameters.keys()) self.assertIs( create_counter_signature.parameters["name"].default, Signature.empty, ) create_counter_signature = signature(Meter.create_counter) self.assertIn("unit", create_counter_signature.parameters.keys()) self.assertIs(create_counter_signature.parameters["unit"].default, "") create_counter_signature = signature(Meter.create_counter) self.assertIn( "description", create_counter_signature.parameters.keys() ) self.assertIs( create_counter_signature.parameters["description"].default, "" ) def test_counter_add_method(self): """ Test that the counter has an add method. Test that the add method returns None. Test that the add method accepts optional attributes. Test that the add method accepts the increment amount. Test that the add method accepts only positive amounts. """ self.assertTrue(hasattr(Counter, "add")) self.assertIsNone(NoOpCounter("name").add(1)) add_signature = signature(Counter.add) self.assertIn("attributes", add_signature.parameters.keys()) self.assertIs(add_signature.parameters["attributes"].default, None) self.assertIn("amount", add_signature.parameters.keys()) self.assertIs( add_signature.parameters["amount"].default, Signature.empty ) class TestObservableCounter(TestCase): def test_create_observable_counter(self): """ Test that the ObservableCounter can be created with create_observable_counter. """ def callback(): yield self.assertTrue( isinstance( NoOpMeter("name").create_observable_counter( "name", callbacks=[callback()] ), ObservableCounter, ) ) def test_api_observable_counter_abstract(self): """ Test that the API ObservableCounter is an abstract class. """ self.assertTrue(isabstract(ObservableCounter)) def test_create_observable_counter_api(self): """ Test that the API for creating a observable_counter accepts the name of the instrument. Test that the API for creating a observable_counter accepts a sequence of callbacks. Test that the API for creating a observable_counter accepts the unit of the instrument. Test that the API for creating a observable_counter accepts the description of the instrument """ create_observable_counter_signature = signature( Meter.create_observable_counter ) self.assertIn( "name", create_observable_counter_signature.parameters.keys() ) self.assertIs( create_observable_counter_signature.parameters["name"].default, Signature.empty, ) create_observable_counter_signature = signature( Meter.create_observable_counter ) self.assertIn( "callbacks", create_observable_counter_signature.parameters.keys() ) self.assertIs( create_observable_counter_signature.parameters[ "callbacks" ].default, None, ) create_observable_counter_signature = signature( Meter.create_observable_counter ) self.assertIn( "unit", create_observable_counter_signature.parameters.keys() ) self.assertIs( create_observable_counter_signature.parameters["unit"].default, "" ) create_observable_counter_signature = signature( Meter.create_observable_counter ) self.assertIn( "description", create_observable_counter_signature.parameters.keys(), ) self.assertIs( create_observable_counter_signature.parameters[ "description" ].default, "", ) def test_observable_counter_generator(self): """ Test that the API for creating a asynchronous counter accepts a generator. Test that the generator function reports iterable of measurements. Test that there is a way to pass state to the generator. Test that the instrument accepts positive measurements. Test that the instrument does not accept negative measurements. """ create_observable_counter_signature = signature( Meter.create_observable_counter ) self.assertIn( "callbacks", create_observable_counter_signature.parameters.keys() ) self.assertIs( create_observable_counter_signature.parameters["name"].default, Signature.empty, ) class TestHistogram(TestCase): def test_create_histogram(self): """ Test that the Histogram can be created with create_histogram. """ self.assertTrue( isinstance(NoOpMeter("name").create_histogram("name"), Histogram) ) def test_api_histogram_abstract(self): """ Test that the API Histogram is an abstract class. """ self.assertTrue(isabstract(Histogram)) def test_create_histogram_api(self): """ Test that the API for creating a histogram accepts the name of the instrument. Test that the API for creating a histogram accepts the unit of the instrument. Test that the API for creating a histogram accepts the description of the """ create_histogram_signature = signature(Meter.create_histogram) self.assertIn("name", create_histogram_signature.parameters.keys()) self.assertIs( create_histogram_signature.parameters["name"].default, Signature.empty, ) create_histogram_signature = signature(Meter.create_histogram) self.assertIn("unit", create_histogram_signature.parameters.keys()) self.assertIs( create_histogram_signature.parameters["unit"].default, "" ) create_histogram_signature = signature(Meter.create_histogram) self.assertIn( "description", create_histogram_signature.parameters.keys() ) self.assertIs( create_histogram_signature.parameters["description"].default, "" ) def test_histogram_record_method(self): """ Test that the histogram has an record method. Test that the record method returns None. Test that the record method accepts optional attributes. Test that the record method accepts the increment amount. Test that the record method returns None. """ self.assertTrue(hasattr(Histogram, "record")) self.assertIsNone(NoOpHistogram("name").record(1)) record_signature = signature(Histogram.record) self.assertIn("attributes", record_signature.parameters.keys()) self.assertIs(record_signature.parameters["attributes"].default, None) self.assertIn("amount", record_signature.parameters.keys()) self.assertIs( record_signature.parameters["amount"].default, Signature.empty ) self.assertIsNone(NoOpHistogram("name").record(1)) class TestGauge(TestCase): def test_create_gauge(self): """ Test that the Gauge can be created with create_gauge. """ self.assertTrue( isinstance(NoOpMeter("name").create_gauge("name"), _Gauge) ) def test_api_gauge_abstract(self): """ Test that the API Gauge is an abstract class. """ self.assertTrue(isabstract(_Gauge)) def test_create_gauge_api(self): """ Test that the API for creating a gauge accepts the name of the instrument. Test that the API for creating a gauge accepts a sequence of callbacks. Test that the API for creating a gauge accepts the unit of the instrument. Test that the API for creating a gauge accepts the description of the instrument """ create_gauge_signature = signature(Meter.create_gauge) self.assertIn("name", create_gauge_signature.parameters.keys()) self.assertIs( create_gauge_signature.parameters["name"].default, Signature.empty, ) create_gauge_signature = signature(Meter.create_gauge) create_gauge_signature = signature(Meter.create_gauge) self.assertIn("unit", create_gauge_signature.parameters.keys()) self.assertIs(create_gauge_signature.parameters["unit"].default, "") create_gauge_signature = signature(Meter.create_gauge) self.assertIn("description", create_gauge_signature.parameters.keys()) self.assertIs( create_gauge_signature.parameters["description"].default, "", ) class TestObservableGauge(TestCase): def test_create_observable_gauge(self): """ Test that the ObservableGauge can be created with create_observable_gauge. """ def callback(): yield self.assertTrue( isinstance( NoOpMeter("name").create_observable_gauge( "name", [callback()] ), ObservableGauge, ) ) def test_api_observable_gauge_abstract(self): """ Test that the API ObservableGauge is an abstract class. """ self.assertTrue(isabstract(ObservableGauge)) def test_create_observable_gauge_api(self): """ Test that the API for creating a observable_gauge accepts the name of the instrument. Test that the API for creating a observable_gauge accepts a sequence of callbacks. Test that the API for creating a observable_gauge accepts the unit of the instrument. Test that the API for creating a observable_gauge accepts the description of the instrument """ create_observable_gauge_signature = signature( Meter.create_observable_gauge ) self.assertIn( "name", create_observable_gauge_signature.parameters.keys() ) self.assertIs( create_observable_gauge_signature.parameters["name"].default, Signature.empty, ) create_observable_gauge_signature = signature( Meter.create_observable_gauge ) self.assertIn( "callbacks", create_observable_gauge_signature.parameters.keys() ) self.assertIs( create_observable_gauge_signature.parameters["callbacks"].default, None, ) create_observable_gauge_signature = signature( Meter.create_observable_gauge ) self.assertIn( "unit", create_observable_gauge_signature.parameters.keys() ) self.assertIs( create_observable_gauge_signature.parameters["unit"].default, "" ) create_observable_gauge_signature = signature( Meter.create_observable_gauge ) self.assertIn( "description", create_observable_gauge_signature.parameters.keys() ) self.assertIs( create_observable_gauge_signature.parameters[ "description" ].default, "", ) def test_observable_gauge_callback(self): """ Test that the API for creating a asynchronous gauge accepts a sequence of callbacks. Test that the callback function reports measurements. Test that there is a way to pass state to the callback. """ create_observable_gauge_signature = signature( Meter.create_observable_gauge ) self.assertIn( "callbacks", create_observable_gauge_signature.parameters.keys() ) self.assertIs( create_observable_gauge_signature.parameters["name"].default, Signature.empty, ) class TestUpDownCounter(TestCase): def test_create_up_down_counter(self): """ Test that the UpDownCounter can be created with create_up_down_counter. """ self.assertTrue( isinstance( NoOpMeter("name").create_up_down_counter("name"), UpDownCounter, ) ) def test_api_up_down_counter_abstract(self): """ Test that the API UpDownCounter is an abstract class. """ self.assertTrue(isabstract(UpDownCounter)) def test_create_up_down_counter_api(self): """ Test that the API for creating a up_down_counter accepts the name of the instrument. Test that the API for creating a up_down_counter accepts the unit of the instrument. Test that the API for creating a up_down_counter accepts the description of the """ create_up_down_counter_signature = signature( Meter.create_up_down_counter ) self.assertIn( "name", create_up_down_counter_signature.parameters.keys() ) self.assertIs( create_up_down_counter_signature.parameters["name"].default, Signature.empty, ) create_up_down_counter_signature = signature( Meter.create_up_down_counter ) self.assertIn( "unit", create_up_down_counter_signature.parameters.keys() ) self.assertIs( create_up_down_counter_signature.parameters["unit"].default, "" ) create_up_down_counter_signature = signature( Meter.create_up_down_counter ) self.assertIn( "description", create_up_down_counter_signature.parameters.keys() ) self.assertIs( create_up_down_counter_signature.parameters["description"].default, "", ) def test_up_down_counter_add_method(self): """ Test that the up_down_counter has an add method. Test that the add method returns None. Test that the add method accepts optional attributes. Test that the add method accepts the increment or decrement amount. Test that the add method accepts positive and negative amounts. """ self.assertTrue(hasattr(UpDownCounter, "add")) self.assertIsNone(NoOpUpDownCounter("name").add(1)) add_signature = signature(UpDownCounter.add) self.assertIn("attributes", add_signature.parameters.keys()) self.assertIs(add_signature.parameters["attributes"].default, None) self.assertIn("amount", add_signature.parameters.keys()) self.assertIs( add_signature.parameters["amount"].default, Signature.empty ) class TestObservableUpDownCounter(TestCase): # pylint: disable=protected-access def test_create_observable_up_down_counter(self): """ Test that the ObservableUpDownCounter can be created with create_observable_up_down_counter. """ def callback(): yield self.assertTrue( isinstance( NoOpMeter("name").create_observable_up_down_counter( "name", [callback()] ), ObservableUpDownCounter, ) ) def test_api_observable_up_down_counter_abstract(self): """ Test that the API ObservableUpDownCounter is an abstract class. """ self.assertTrue(isabstract(ObservableUpDownCounter)) def test_create_observable_up_down_counter_api(self): """ Test that the API for creating a observable_up_down_counter accepts the name of the instrument. Test that the API for creating a observable_up_down_counter accepts a sequence of callbacks. Test that the API for creating a observable_up_down_counter accepts the unit of the instrument. Test that the API for creating a observable_up_down_counter accepts the description of the instrument """ create_observable_up_down_counter_signature = signature( Meter.create_observable_up_down_counter ) self.assertIn( "name", create_observable_up_down_counter_signature.parameters.keys(), ) self.assertIs( create_observable_up_down_counter_signature.parameters[ "name" ].default, Signature.empty, ) create_observable_up_down_counter_signature = signature( Meter.create_observable_up_down_counter ) self.assertIn( "callbacks", create_observable_up_down_counter_signature.parameters.keys(), ) self.assertIs( create_observable_up_down_counter_signature.parameters[ "callbacks" ].default, None, ) create_observable_up_down_counter_signature = signature( Meter.create_observable_up_down_counter ) self.assertIn( "unit", create_observable_up_down_counter_signature.parameters.keys(), ) self.assertIs( create_observable_up_down_counter_signature.parameters[ "unit" ].default, "", ) create_observable_up_down_counter_signature = signature( Meter.create_observable_up_down_counter ) self.assertIn( "description", create_observable_up_down_counter_signature.parameters.keys(), ) self.assertIs( create_observable_up_down_counter_signature.parameters[ "description" ].default, "", ) def test_observable_up_down_counter_callback(self): """ Test that the API for creating a asynchronous up_down_counter accepts a sequence of callbacks. Test that the callback function reports measurements. Test that there is a way to pass state to the callback. Test that the instrument accepts positive and negative values. """ create_observable_up_down_counter_signature = signature( Meter.create_observable_up_down_counter ) self.assertIn( "callbacks", create_observable_up_down_counter_signature.parameters.keys(), ) self.assertIs( create_observable_up_down_counter_signature.parameters[ "name" ].default, Signature.empty, ) def test_name_check(self): instrument = ChildInstrument("name") self.assertEqual( instrument._check_name_unit_description( "a" * 255, "unit", "description" )["name"], "a" * 255, ) self.assertEqual( instrument._check_name_unit_description( "a.", "unit", "description" )["name"], "a.", ) self.assertEqual( instrument._check_name_unit_description( "a-", "unit", "description" )["name"], "a-", ) self.assertEqual( instrument._check_name_unit_description( "a_", "unit", "description" )["name"], "a_", ) self.assertEqual( instrument._check_name_unit_description( "a/", "unit", "description" )["name"], "a/", ) # the old max length self.assertIsNotNone( instrument._check_name_unit_description( "a" * 64, "unit", "description" )["name"] ) self.assertIsNone( instrument._check_name_unit_description( "a" * 256, "unit", "description" )["name"] ) self.assertIsNone( instrument._check_name_unit_description( "Ñ", "unit", "description" )["name"] ) self.assertIsNone( instrument._check_name_unit_description( "_a", "unit", "description" )["name"] ) self.assertIsNone( instrument._check_name_unit_description( "1a", "unit", "description" )["name"] ) self.assertIsNone( instrument._check_name_unit_description("", "unit", "description")[ "name" ] ) def test_unit_check(self): instrument = ChildInstrument("name") self.assertEqual( instrument._check_name_unit_description( "name", "a" * 63, "description" )["unit"], "a" * 63, ) self.assertEqual( instrument._check_name_unit_description( "name", "{a}", "description" )["unit"], "{a}", ) self.assertIsNone( instrument._check_name_unit_description( "name", "a" * 64, "description" )["unit"] ) self.assertIsNone( instrument._check_name_unit_description( "name", "Ñ", "description" )["unit"] ) self.assertEqual( instrument._check_name_unit_description( "name", None, "description" )["unit"], "", ) def test_description_check(self): instrument = ChildInstrument("name") self.assertEqual( instrument._check_name_unit_description( "name", "unit", "description" )["description"], "description", ) self.assertEqual( instrument._check_name_unit_description("name", "unit", None)[ "description" ], "", ) python-opentelemetry-1.39.1/opentelemetry-api/tests/metrics/test_meter.py000066400000000000000000000146761511654350100270370ustar00rootroot00000000000000# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # type: ignore from logging import WARNING from unittest import TestCase from unittest.mock import Mock, patch from opentelemetry.metrics import Meter, NoOpMeter # FIXME Test that the meter methods can be called concurrently safely. class ChildMeter(Meter): # pylint: disable=signature-differs def create_counter(self, name, unit="", description=""): super().create_counter(name, unit=unit, description=description) def create_up_down_counter(self, name, unit="", description=""): super().create_up_down_counter( name, unit=unit, description=description ) def create_observable_counter( self, name, callbacks, unit="", description="" ): super().create_observable_counter( name, callbacks, unit=unit, description=description, ) def create_histogram( self, name, unit="", description="", *, explicit_bucket_boundaries_advisory=None, ): super().create_histogram( name, unit=unit, description=description, explicit_bucket_boundaries_advisory=explicit_bucket_boundaries_advisory, ) def create_gauge(self, name, unit="", description=""): super().create_gauge(name, unit=unit, description=description) def create_observable_gauge( self, name, callbacks, unit="", description="" ): super().create_observable_gauge( name, callbacks, unit=unit, description=description, ) def create_observable_up_down_counter( self, name, callbacks, unit="", description="" ): super().create_observable_up_down_counter( name, callbacks, unit=unit, description=description, ) class TestMeter(TestCase): # pylint: disable=no-member # TODO: convert to assertNoLogs instead of mocking logger when 3.10 is baseline @patch("opentelemetry.metrics._internal._logger") def test_repeated_instrument_names(self, logger_mock): try: test_meter = NoOpMeter("name") test_meter.create_counter("counter") test_meter.create_up_down_counter("up_down_counter") test_meter.create_observable_counter("observable_counter", Mock()) test_meter.create_histogram("histogram") test_meter.create_gauge("gauge") test_meter.create_observable_gauge("observable_gauge", Mock()) test_meter.create_observable_up_down_counter( "observable_up_down_counter", Mock() ) except Exception as error: # pylint: disable=broad-exception-caught self.fail(f"Unexpected exception raised {error}") for instrument_name in [ "counter", "up_down_counter", "histogram", "gauge", ]: getattr(test_meter, f"create_{instrument_name}")(instrument_name) logger_mock.warning.assert_not_called() for instrument_name in [ "observable_counter", "observable_gauge", "observable_up_down_counter", ]: getattr(test_meter, f"create_{instrument_name}")( instrument_name, Mock() ) logger_mock.warning.assert_not_called() def test_repeated_instrument_names_with_different_advisory(self): try: test_meter = NoOpMeter("name") test_meter.create_histogram( "histogram", explicit_bucket_boundaries_advisory=[1.0] ) except Exception as error: # pylint: disable=broad-exception-caught self.fail(f"Unexpected exception raised {error}") for instrument_name in [ "histogram", ]: with self.assertLogs(level=WARNING): getattr(test_meter, f"create_{instrument_name}")( instrument_name, ) def test_create_counter(self): """ Test that the meter provides a function to create a new Counter """ self.assertTrue(hasattr(Meter, "create_counter")) self.assertTrue(Meter.create_counter.__isabstractmethod__) def test_create_up_down_counter(self): """ Test that the meter provides a function to create a new UpDownCounter """ self.assertTrue(hasattr(Meter, "create_up_down_counter")) self.assertTrue(Meter.create_up_down_counter.__isabstractmethod__) def test_create_observable_counter(self): """ Test that the meter provides a function to create a new ObservableCounter """ self.assertTrue(hasattr(Meter, "create_observable_counter")) self.assertTrue(Meter.create_observable_counter.__isabstractmethod__) def test_create_histogram(self): """ Test that the meter provides a function to create a new Histogram """ self.assertTrue(hasattr(Meter, "create_histogram")) self.assertTrue(Meter.create_histogram.__isabstractmethod__) def test_create_gauge(self): """ Test that the meter provides a function to create a new Gauge """ self.assertTrue(hasattr(Meter, "create_gauge")) def test_create_observable_gauge(self): """ Test that the meter provides a function to create a new ObservableGauge """ self.assertTrue(hasattr(Meter, "create_observable_gauge")) self.assertTrue(Meter.create_observable_gauge.__isabstractmethod__) def test_create_observable_up_down_counter(self): """ Test that the meter provides a function to create a new ObservableUpDownCounter """ self.assertTrue(hasattr(Meter, "create_observable_up_down_counter")) self.assertTrue( Meter.create_observable_up_down_counter.__isabstractmethod__ ) python-opentelemetry-1.39.1/opentelemetry-api/tests/metrics/test_meter_provider.py000066400000000000000000000322221511654350100307340ustar00rootroot00000000000000# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # type: ignore # pylint: disable=protected-access from unittest import TestCase from unittest.mock import Mock, patch from pytest import fixture import opentelemetry.metrics._internal as metrics_internal from opentelemetry import metrics from opentelemetry.environment_variables import OTEL_PYTHON_METER_PROVIDER from opentelemetry.metrics import ( NoOpMeter, NoOpMeterProvider, get_meter_provider, set_meter_provider, ) from opentelemetry.metrics._internal import ( _ProxyMeter, _ProxyMeterProvider, get_meter, ) from opentelemetry.metrics._internal.instrument import ( _ProxyCounter, _ProxyGauge, _ProxyHistogram, _ProxyObservableCounter, _ProxyObservableGauge, _ProxyObservableUpDownCounter, _ProxyUpDownCounter, ) from opentelemetry.test.globals_test import ( MetricsGlobalsTest, reset_metrics_globals, ) # FIXME Test that the instrument methods can be called concurrently safely. @fixture def reset_meter_provider(): print(f"calling reset_metrics_globals() {reset_metrics_globals}") reset_metrics_globals() yield print("teardown - calling reset_metrics_globals()") reset_metrics_globals() # pylint: disable=redefined-outer-name def test_set_meter_provider(reset_meter_provider): """ Test that the API provides a way to set a global default MeterProvider """ mock = Mock() assert metrics_internal._METER_PROVIDER is None set_meter_provider(mock) assert metrics_internal._METER_PROVIDER is mock assert get_meter_provider() is mock def test_set_meter_provider_calls_proxy_provider(reset_meter_provider): with patch( "opentelemetry.metrics._internal._PROXY_METER_PROVIDER" ) as mock_proxy_mp: assert metrics_internal._PROXY_METER_PROVIDER is mock_proxy_mp mock_real_mp = Mock() set_meter_provider(mock_real_mp) mock_proxy_mp.on_set_meter_provider.assert_called_once_with( mock_real_mp ) def test_get_meter_provider(reset_meter_provider): """ Test that the API provides a way to get a global default MeterProvider """ assert metrics_internal._METER_PROVIDER is None assert isinstance(get_meter_provider(), _ProxyMeterProvider) metrics._METER_PROVIDER = None with patch.dict( "os.environ", {OTEL_PYTHON_METER_PROVIDER: "test_meter_provider"} ): with patch("opentelemetry.metrics._internal._load_provider", Mock()): with patch( "opentelemetry.metrics._internal.cast", Mock(**{"return_value": "test_meter_provider"}), ): assert get_meter_provider() == "test_meter_provider" class TestGetMeter(TestCase): def test_get_meter_parameters(self): """ Test that get_meter accepts name, version and schema_url """ try: NoOpMeterProvider().get_meter( "name", version="version", schema_url="schema_url" ) except Exception as error: # pylint: disable=broad-exception-caught self.fail(f"Unexpected exception raised: {error}") def test_invalid_name(self): """ Test that when an invalid name is specified a working meter implementation is returned as a fallback. Test that the fallback meter name property keeps its original invalid value. Test that a message is logged reporting the specified value for the fallback meter is invalid. """ meter = NoOpMeterProvider().get_meter("") self.assertTrue(isinstance(meter, NoOpMeter)) self.assertEqual(meter.name, "") meter = NoOpMeterProvider().get_meter(None) self.assertTrue(isinstance(meter, NoOpMeter)) self.assertEqual(meter.name, None) def test_get_meter_wrapper(self): """ `metrics._internal.get_meter` called with valid parameters and a NoOpMeterProvider should return a NoOpMeter with the same parameters. """ meter = get_meter( "name", version="version", meter_provider=NoOpMeterProvider(), schema_url="schema_url", attributes={"key": "value", "key2": 5, "key3": "value3"}, ) self.assertIsInstance(meter, NoOpMeter) self.assertEqual(meter.name, "name") self.assertEqual(meter.version, "version") self.assertEqual(meter.schema_url, "schema_url") class TestProxy(MetricsGlobalsTest, TestCase): def test_global_proxy_meter_provider(self): # Global get_meter_provider() should initially be a _ProxyMeterProvider # singleton proxy_meter_provider: _ProxyMeterProvider = get_meter_provider() self.assertIsInstance(proxy_meter_provider, _ProxyMeterProvider) self.assertIs(get_meter_provider(), proxy_meter_provider) def test_proxy_provider(self): proxy_meter_provider = _ProxyMeterProvider() # Should return a proxy meter when no real MeterProvider is set name = "foo" version = "1.2" schema_url = "schema_url" proxy_meter: _ProxyMeter = proxy_meter_provider.get_meter( name, version=version, schema_url=schema_url ) self.assertIsInstance(proxy_meter, _ProxyMeter) # After setting a real meter provider on the proxy, it should notify # it's _ProxyMeters which should create their own real Meters mock_real_mp = Mock() proxy_meter_provider.on_set_meter_provider(mock_real_mp) mock_real_mp.get_meter.assert_called_once_with( name, version, schema_url ) # After setting a real meter provider on the proxy, it should now return # new meters directly from the set real meter another_name = "bar" meter2 = proxy_meter_provider.get_meter(another_name) self.assertIsInstance(meter2, Mock) mock_real_mp.get_meter.assert_called_with(another_name, None, None) # pylint: disable=too-many-locals,too-many-statements def test_proxy_meter(self): meter_name = "foo" proxy_meter: _ProxyMeter = _ProxyMeterProvider().get_meter(meter_name) self.assertIsInstance(proxy_meter, _ProxyMeter) # Should be able to create proxy instruments name = "foo" unit = "s" description = "Foobar" callback = Mock() proxy_counter = proxy_meter.create_counter( name, unit=unit, description=description ) proxy_updowncounter = proxy_meter.create_up_down_counter( name, unit=unit, description=description ) proxy_histogram = proxy_meter.create_histogram( name, unit=unit, description=description ) proxy_gauge = proxy_meter.create_gauge( name, unit=unit, description=description ) proxy_observable_counter = proxy_meter.create_observable_counter( name, callbacks=[callback], unit=unit, description=description ) proxy_observable_updowncounter = ( proxy_meter.create_observable_up_down_counter( name, callbacks=[callback], unit=unit, description=description ) ) proxy_overvable_gauge = proxy_meter.create_observable_gauge( name, callbacks=[callback], unit=unit, description=description ) self.assertIsInstance(proxy_counter, _ProxyCounter) self.assertIsInstance(proxy_updowncounter, _ProxyUpDownCounter) self.assertIsInstance(proxy_histogram, _ProxyHistogram) self.assertIsInstance(proxy_gauge, _ProxyGauge) self.assertIsInstance( proxy_observable_counter, _ProxyObservableCounter ) self.assertIsInstance( proxy_observable_updowncounter, _ProxyObservableUpDownCounter ) self.assertIsInstance(proxy_overvable_gauge, _ProxyObservableGauge) # Synchronous proxy instruments should be usable amount = 12 attributes = {"foo": "bar"} proxy_counter.add(amount, attributes=attributes) proxy_updowncounter.add(amount, attributes=attributes) proxy_histogram.record(amount, attributes=attributes) proxy_gauge.set(amount, attributes=attributes) # Calling _ProxyMeterProvider.on_set_meter_provider() should cascade down # to the _ProxyInstruments which should create their own real instruments # from the real Meter to back their calls real_meter_provider = Mock() proxy_meter.on_set_meter_provider(real_meter_provider) real_meter_provider.get_meter.assert_called_once_with( meter_name, None, None ) real_meter: Mock = real_meter_provider.get_meter() real_meter.create_counter.assert_called_once_with( name, unit, description ) real_meter.create_up_down_counter.assert_called_once_with( name, unit, description ) real_meter.create_histogram.assert_called_once_with( name, unit, description, explicit_bucket_boundaries_advisory=None ) real_meter.create_gauge.assert_called_once_with( name, unit, description ) real_meter.create_observable_counter.assert_called_once_with( name, [callback], unit, description ) real_meter.create_observable_up_down_counter.assert_called_once_with( name, [callback], unit, description ) real_meter.create_observable_gauge.assert_called_once_with( name, [callback], unit, description ) # The synchronous instrument measurement methods should call through to # the real instruments real_counter: Mock = real_meter.create_counter() real_updowncounter: Mock = real_meter.create_up_down_counter() real_histogram: Mock = real_meter.create_histogram() real_gauge: Mock = real_meter.create_gauge() real_counter.assert_not_called() real_updowncounter.assert_not_called() real_histogram.assert_not_called() real_gauge.assert_not_called() proxy_counter.add(amount, attributes=attributes) real_counter.add.assert_called_once_with(amount, attributes, None) proxy_updowncounter.add(amount, attributes=attributes) real_updowncounter.add.assert_called_once_with( amount, attributes, None ) proxy_histogram.record(amount, attributes=attributes) real_histogram.record.assert_called_once_with(amount, attributes, None) proxy_gauge.set(amount, attributes=attributes) real_gauge.set.assert_called_once_with(amount, attributes, None) def test_proxy_meter_with_real_meter(self) -> None: # Creating new instruments on the _ProxyMeter with a real meter set # should create real instruments instead of proxies meter_name = "foo" proxy_meter: _ProxyMeter = _ProxyMeterProvider().get_meter(meter_name) self.assertIsInstance(proxy_meter, _ProxyMeter) real_meter_provider = Mock() proxy_meter.on_set_meter_provider(real_meter_provider) name = "foo" unit = "s" description = "Foobar" callback = Mock() counter = proxy_meter.create_counter( name, unit=unit, description=description ) updowncounter = proxy_meter.create_up_down_counter( name, unit=unit, description=description ) histogram = proxy_meter.create_histogram( name, unit=unit, description=description ) gauge = proxy_meter.create_gauge( name, unit=unit, description=description ) observable_counter = proxy_meter.create_observable_counter( name, callbacks=[callback], unit=unit, description=description ) observable_updowncounter = ( proxy_meter.create_observable_up_down_counter( name, callbacks=[callback], unit=unit, description=description ) ) observable_gauge = proxy_meter.create_observable_gauge( name, callbacks=[callback], unit=unit, description=description ) real_meter: Mock = real_meter_provider.get_meter() self.assertIs(counter, real_meter.create_counter()) self.assertIs(updowncounter, real_meter.create_up_down_counter()) self.assertIs(histogram, real_meter.create_histogram()) self.assertIs(gauge, real_meter.create_gauge()) self.assertIs( observable_counter, real_meter.create_observable_counter() ) self.assertIs( observable_updowncounter, real_meter.create_observable_up_down_counter(), ) self.assertIs(observable_gauge, real_meter.create_observable_gauge()) python-opentelemetry-1.39.1/opentelemetry-api/tests/metrics/test_observation.py000066400000000000000000000027711511654350100302470ustar00rootroot00000000000000# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from unittest import TestCase from opentelemetry.metrics import Observation class TestObservation(TestCase): def test_measurement_init(self): try: # int Observation(321, {"hello": "world"}) # float Observation(321.321, {"hello": "world"}) except Exception: # pylint: disable=broad-exception-caught self.fail( "Unexpected exception raised when instantiating Observation" ) def test_measurement_equality(self): self.assertEqual( Observation(321, {"hello": "world"}), Observation(321, {"hello": "world"}), ) self.assertNotEqual( Observation(321, {"hello": "world"}), Observation(321.321, {"hello": "world"}), ) self.assertNotEqual( Observation(321, {"baz": "world"}), Observation(321, {"hello": "world"}), ) python-opentelemetry-1.39.1/opentelemetry-api/tests/metrics/test_subclass_instantiation.py000066400000000000000000000136471511654350100325030ustar00rootroot00000000000000# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # type: ignore # NOTE: The tests in this file are intended to test the semver compatibility of the public API. # Any tests that fail here indicate that the public API has changed in a way that is not backwards compatible. # Either bump the major version of the API, or make the necessary changes to the API to remain semver compatible. # pylint: disable=useless-parent-delegation,arguments-differ from typing import Optional from opentelemetry.metrics import ( Asynchronous, Counter, Histogram, Instrument, Meter, MeterProvider, ObservableCounter, ObservableGauge, ObservableUpDownCounter, Synchronous, UpDownCounter, _Gauge, ) class MeterProviderImplTest(MeterProvider): def get_meter( self, name: str, version: Optional[str] = None, schema_url: Optional[str] = None, ) -> Meter: return super().get_meter(name, version, schema_url) def test_meter_provider_subclass_instantiation(): meter_provider = MeterProviderImplTest() assert isinstance(meter_provider, MeterProvider) class MeterImplTest(Meter): def create_counter(self, name, description, **kwargs): pass def create_up_down_counter(self, name, description, **kwargs): pass def create_observable_counter(self, name, description, **kwargs): pass def create_histogram(self, name, description, **kwargs): pass def create_observable_gauge(self, name, description, **kwargs): pass def create_observable_up_down_counter(self, name, description, **kwargs): pass def test_meter_subclass_instantiation(): meter = MeterImplTest("subclass_test") assert isinstance(meter, Meter) class SynchronousImplTest(Synchronous): def __init__( self, name: str, unit: str = "", description: str = "" ) -> None: super().__init__(name, unit, description) def test_synchronous_subclass_instantiation(): synchronous = SynchronousImplTest("subclass_test") assert isinstance(synchronous, Synchronous) class AsynchronousImplTest(Asynchronous): def __init__( self, name: str, unit: str = "", description: str = "" ) -> None: super().__init__(name, unit, description) def test_asynchronous_subclass_instantiation(): asynchronous = AsynchronousImplTest("subclass_test") assert isinstance(asynchronous, Asynchronous) class CounterImplTest(Counter): def __init__( self, name: str, unit: str = "", description: str = "" ) -> None: super().__init__(name, unit, description) def add(self, amount: int, **kwargs): pass def test_counter_subclass_instantiation(): counter = CounterImplTest("subclass_test") assert isinstance(counter, Counter) class UpDownCounterImplTest(UpDownCounter): def __init__( self, name: str, unit: str = "", description: str = "" ) -> None: super().__init__(name, unit, description) def add(self, amount: int, **kwargs): pass def test_up_down_counter_subclass_instantiation(): up_down_counter = UpDownCounterImplTest("subclass_test") assert isinstance(up_down_counter, UpDownCounter) class ObservableCounterImplTest(ObservableCounter): def __init__( self, name: str, unit: str = "", description: str = "" ) -> None: super().__init__(name, unit, description) def test_observable_counter_subclass_instantiation(): observable_counter = ObservableCounterImplTest("subclass_test") assert isinstance(observable_counter, ObservableCounter) class HistogramImplTest(Histogram): def __init__( self, name: str, unit: str = "", description: str = "" ) -> None: super().__init__(name, unit, description) def record(self, amount: int, **kwargs): pass def test_histogram_subclass_instantiation(): histogram = HistogramImplTest("subclass_test") assert isinstance(histogram, Histogram) class GaugeImplTest(_Gauge): def __init__( self, name: str, unit: str = "", description: str = "" ) -> None: super().__init__(name, unit, description) def set(self, amount: int, **kwargs): pass def test_gauge_subclass_instantiation(): gauge = GaugeImplTest("subclass_test") assert isinstance(gauge, _Gauge) class InstrumentImplTest(Instrument): def __init__( self, name: str, unit: str = "", description: str = "" ) -> None: super().__init__(name, unit, description) def test_instrument_subclass_instantiation(): instrument = InstrumentImplTest("subclass_test") assert isinstance(instrument, Instrument) class ObservableGaugeImplTest(ObservableGauge): def __init__( self, name: str, unit: str = "", description: str = "" ) -> None: super().__init__(name, unit, description) def test_observable_gauge_subclass_instantiation(): observable_gauge = ObservableGaugeImplTest("subclass_test") assert isinstance(observable_gauge, ObservableGauge) class ObservableUpDownCounterImplTest(ObservableUpDownCounter): def __init__( self, name: str, unit: str = "", description: str = "" ) -> None: super().__init__(name, unit, description) def test_observable_up_down_counter_subclass_instantiation(): observable_up_down_counter = ObservableUpDownCounterImplTest( "subclass_test" ) assert isinstance(observable_up_down_counter, ObservableUpDownCounter) python-opentelemetry-1.39.1/opentelemetry-api/tests/mypysmoke.py000066400000000000000000000013441511654350100252370ustar00rootroot00000000000000# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import opentelemetry.trace def dummy_check_mypy_returntype() -> opentelemetry.trace.TracerProvider: return opentelemetry.trace.get_tracer_provider() python-opentelemetry-1.39.1/opentelemetry-api/tests/propagators/000077500000000000000000000000001511654350100251675ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-api/tests/propagators/test_composite.py000066400000000000000000000104231511654350100306020ustar00rootroot00000000000000# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # type: ignore import unittest from unittest.mock import Mock from opentelemetry.propagators.composite import CompositePropagator def get_as_list(dict_object, key): value = dict_object.get(key) return [value] if value is not None else [] def mock_inject(name, value="data"): def wrapped(carrier=None, context=None, setter=None): carrier[name] = value setter.set({}, f"inject_field_{name}_0", None) setter.set({}, f"inject_field_{name}_1", None) return wrapped def mock_extract(name, value="context"): def wrapped(carrier=None, context=None, getter=None): new_context = context.copy() new_context[name] = value return new_context return wrapped def mock_fields(name): return {f"inject_field_{name}_0", f"inject_field_{name}_1"} class TestCompositePropagator(unittest.TestCase): @classmethod def setUpClass(cls): cls.mock_propagator_0 = Mock( inject=mock_inject("mock-0"), extract=mock_extract("mock-0"), fields=mock_fields("mock-0"), ) cls.mock_propagator_1 = Mock( inject=mock_inject("mock-1"), extract=mock_extract("mock-1"), fields=mock_fields("mock-1"), ) cls.mock_propagator_2 = Mock( inject=mock_inject("mock-0", value="data2"), extract=mock_extract("mock-0", value="context2"), fields=mock_fields("mock-0"), ) def test_no_propagators(self): propagator = CompositePropagator([]) new_carrier = {} propagator.inject(new_carrier) self.assertEqual(new_carrier, {}) context = propagator.extract( carrier=new_carrier, context={}, getter=get_as_list ) self.assertEqual(context, {}) def test_single_propagator(self): propagator = CompositePropagator([self.mock_propagator_0]) new_carrier = {} propagator.inject(new_carrier) self.assertEqual(new_carrier, {"mock-0": "data"}) context = propagator.extract( carrier=new_carrier, context={}, getter=get_as_list ) self.assertEqual(context, {"mock-0": "context"}) def test_multiple_propagators(self): propagator = CompositePropagator( [self.mock_propagator_0, self.mock_propagator_1] ) new_carrier = {} propagator.inject(new_carrier) self.assertEqual(new_carrier, {"mock-0": "data", "mock-1": "data"}) context = propagator.extract( carrier=new_carrier, context={}, getter=get_as_list ) self.assertEqual(context, {"mock-0": "context", "mock-1": "context"}) def test_multiple_propagators_same_key(self): # test that when multiple propagators extract/inject the same # key, the later propagator values are extracted/injected propagator = CompositePropagator( [self.mock_propagator_0, self.mock_propagator_2] ) new_carrier = {} propagator.inject(new_carrier) self.assertEqual(new_carrier, {"mock-0": "data2"}) context = propagator.extract( carrier=new_carrier, context={}, getter=get_as_list ) self.assertEqual(context, {"mock-0": "context2"}) def test_fields(self): propagator = CompositePropagator( [ self.mock_propagator_0, self.mock_propagator_1, self.mock_propagator_2, ] ) mock_setter = Mock() propagator.inject({}, setter=mock_setter) inject_fields = set() for mock_call in mock_setter.mock_calls: inject_fields.add(mock_call[1][1]) self.assertEqual(inject_fields, propagator.fields) python-opentelemetry-1.39.1/opentelemetry-api/tests/propagators/test_global_httptextformat.py000066400000000000000000000047061511654350100332240ustar00rootroot00000000000000# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # type: ignore import unittest from opentelemetry import baggage, trace from opentelemetry.propagate import extract, inject from opentelemetry.trace import get_current_span, set_span_in_context from opentelemetry.trace.span import format_span_id, format_trace_id class TestDefaultGlobalPropagator(unittest.TestCase): """Test ensures the default global composite propagator works as intended""" TRACE_ID = int("12345678901234567890123456789012", 16) # type:int SPAN_ID = int("1234567890123456", 16) # type:int def test_propagation(self): traceparent_value = ( f"00-{format_trace_id(self.TRACE_ID)}-" f"{format_span_id(self.SPAN_ID)}-00" ) tracestate_value = "foo=1,bar=2,baz=3" headers = { "baggage": ["key1=val1,key2=val2"], "traceparent": [traceparent_value], "tracestate": [tracestate_value], } ctx = extract(headers) baggage_entries = baggage.get_all(context=ctx) expected = {"key1": "val1", "key2": "val2"} self.assertEqual(baggage_entries, expected) span_context = get_current_span(context=ctx).get_span_context() self.assertEqual(span_context.trace_id, self.TRACE_ID) self.assertEqual(span_context.span_id, self.SPAN_ID) span = trace.NonRecordingSpan(span_context) ctx = baggage.set_baggage("key3", "val3") ctx = baggage.set_baggage("key4", "val4", context=ctx) ctx = set_span_in_context(span, context=ctx) output = {} inject(output, context=ctx) self.assertEqual(traceparent_value, output["traceparent"]) self.assertIn("key3=val3", output["baggage"]) self.assertIn("key4=val4", output["baggage"]) self.assertIn("foo=1", output["tracestate"]) self.assertIn("bar=2", output["tracestate"]) self.assertIn("baz=3", output["tracestate"]) python-opentelemetry-1.39.1/opentelemetry-api/tests/propagators/test_propagators.py000066400000000000000000000244411511654350100311460ustar00rootroot00000000000000# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # type: ignore from importlib import reload from os import environ from unittest import TestCase from unittest.mock import Mock, patch from opentelemetry import trace from opentelemetry.baggage.propagation import W3CBaggagePropagator from opentelemetry.context.context import Context from opentelemetry.environment_variables import OTEL_PROPAGATORS from opentelemetry.trace.propagation.tracecontext import ( TraceContextTextMapPropagator, ) class TestPropagators(TestCase): @patch("opentelemetry.propagators.composite.CompositePropagator") def test_default_composite_propagators(self, mock_compositehttppropagator): def test_propagators(propagators): propagators = {propagator.__class__ for propagator in propagators} self.assertEqual(len(propagators), 2) self.assertEqual( propagators, {TraceContextTextMapPropagator, W3CBaggagePropagator}, ) mock_compositehttppropagator.configure_mock( **{"side_effect": test_propagators} ) # pylint: disable=import-outside-toplevel import opentelemetry.propagate # noqa: PLC0415 reload(opentelemetry.propagate) @patch.dict(environ, {OTEL_PROPAGATORS: "None"}) @patch("opentelemetry.propagators.composite.CompositePropagator") def test_none_propogators(self, mock_compositehttppropagator): def test_propagators(propagators): propagators = {propagator.__class__ for propagator in propagators} self.assertEqual(len(propagators), 0) self.assertEqual( propagators, set(), ) mock_compositehttppropagator.configure_mock( **{"side_effect": test_propagators} ) # pylint: disable=import-outside-toplevel import opentelemetry.propagate # noqa: PLC0415 reload(opentelemetry.propagate) @patch.dict(environ, {OTEL_PROPAGATORS: "tracecontext, None"}) @patch("opentelemetry.propagators.composite.CompositePropagator") def test_multiple_propogators_with_none( self, mock_compositehttppropagator ): def test_propagators(propagators): propagators = {propagator.__class__ for propagator in propagators} self.assertEqual(len(propagators), 0) self.assertEqual( propagators, set(), ) mock_compositehttppropagator.configure_mock( **{"side_effect": test_propagators} ) # pylint: disable=import-outside-toplevel import opentelemetry.propagate # noqa: PLC0415 reload(opentelemetry.propagate) @patch.dict(environ, {OTEL_PROPAGATORS: "a, b, c "}) @patch("opentelemetry.propagators.composite.CompositePropagator") @patch("opentelemetry.util._importlib_metadata.entry_points") def test_non_default_propagators( self, mock_entry_points, mock_compositehttppropagator ): mock_entry_points.configure_mock( **{ "side_effect": [ [ Mock( **{ "load.return_value": Mock( **{"return_value": "a"} ) } ), ], [ Mock( **{ "load.return_value": Mock( **{"return_value": "b"} ) } ) ], [ Mock( **{ "load.return_value": Mock( **{"return_value": "c"} ) } ) ], ] } ) def test_propagators(propagators): self.assertEqual(propagators, ["a", "b", "c"]) mock_compositehttppropagator.configure_mock( **{"side_effect": test_propagators} ) # pylint: disable=import-outside-toplevel import opentelemetry.propagate # noqa: PLC0415 reload(opentelemetry.propagate) @patch.dict( environ, {OTEL_PROPAGATORS: "tracecontext , unknown , baggage"} ) def test_composite_propagators_error(self): with self.assertRaises(ValueError) as cm: # pylint: disable=import-outside-toplevel import opentelemetry.propagate # noqa: PLC0415 reload(opentelemetry.propagate) self.assertEqual( str(cm.exception), "Propagator unknown not found. It is either misspelled or not installed.", ) class TestTraceContextTextMapPropagator(TestCase): def setUp(self): self.propagator = TraceContextTextMapPropagator() def traceparent_helper( self, carrier, ): # We purposefully start with an empty context so we can test later if anything is added to it. initial_context = Context() context = self.propagator.extract(carrier, context=initial_context) self.assertIsNotNone(context) self.assertIsInstance(context, Context) return context def traceparent_helper_generator( self, version=0x00, trace_id=0x00000000000000000000000000000001, span_id=0x0000000000000001, trace_flags=0x00, suffix="", ): traceparent = f"{version:02x}-{trace_id:032x}-{span_id:016x}-{trace_flags:02x}{suffix}" carrier = {"traceparent": traceparent} return self.traceparent_helper(carrier) def valid_traceparent_helper( self, version=0x00, trace_id=0x00000000000000000000000000000001, span_id=0x0000000000000001, trace_flags=0x00, suffix="", assert_context_msg="A valid traceparent was provided, so the context should be non-empty.", ): context = self.traceparent_helper_generator( version=version, trace_id=trace_id, span_id=span_id, trace_flags=trace_flags, suffix=suffix, ) self.assertNotEqual( context, Context(), assert_context_msg, ) span = trace.get_current_span(context) self.assertIsNotNone(span) self.assertIsInstance(span, trace.span.Span) span_context = span.get_span_context() self.assertIsNotNone(span_context) self.assertIsInstance(span_context, trace.span.SpanContext) # Note: No version in SpanContext, it is only used locally in TraceContextTextMapPropagator self.assertEqual(span_context.trace_id, trace_id) self.assertEqual(span_context.span_id, span_id) self.assertEqual(span_context.trace_flags, trace_flags) self.assertIsInstance(span_context.trace_state, trace.TraceState) self.assertCountEqual(span_context.trace_state, []) self.assertEqual(span_context.is_remote, True) return context, span, span_context def invalid_traceparent_helper( self, version=0x00, trace_id=0x00000000000000000000000000000001, span_id=0x0000000000000001, trace_flags=0x00, suffix="", assert_context_msg="An invalid traceparent was provided, so the context should still be empty.", ): context = self.traceparent_helper_generator( version=version, trace_id=trace_id, span_id=span_id, trace_flags=trace_flags, suffix=suffix, ) self.assertEqual( context, Context(), assert_context_msg, ) return context def test_extract_nothing(self): context = self.traceparent_helper(carrier={}) self.assertEqual( context, {}, "We didn't provide a valid traceparent, so we should still have an empty Context.", ) def test_extract_simple_traceparent(self): self.valid_traceparent_helper() # https://www.w3.org/TR/trace-context/#version def test_extract_version_forbidden_ff(self): self.invalid_traceparent_helper( version=0xFF, assert_context_msg="We provided ann invalid traceparent with a forbidden version=0xff, so the context should still be empty.", ) # https://www.w3.org/TR/trace-context/#version-format def test_extract_version_00_with_unsupported_suffix(self): self.invalid_traceparent_helper( suffix="-f00", assert_context_msg="We provided an invalid traceparent with version=0x00 and suffix information which is not supported in this version, so the context should still be empty.", ) # https://www.w3.org/TR/trace-context/#versioning-of-traceparent # See the parsing of the sampled bit of flags. def test_extract_future_version_with_future_suffix_data(self): self.valid_traceparent_helper( version=0x99, suffix="-f00", assert_context_msg="We provided a traceparent that is possibly valid in the future with version=0x99 and suffix information, so the context be non-empty.", ) # https://www.w3.org/TR/trace-context/#trace-id def test_extract_trace_id_invalid_all_zeros(self): self.invalid_traceparent_helper(trace_id=0) # https://www.w3.org/TR/trace-context/#parent-id def test_extract_span_id_invalid_all_zeros(self): self.invalid_traceparent_helper(span_id=0) def test_extract_non_decimal_trace_flags(self): self.valid_traceparent_helper(trace_flags=0xA0) python-opentelemetry-1.39.1/opentelemetry-api/tests/propagators/test_w3cbaggagepropagator.py000066400000000000000000000217001511654350100326710ustar00rootroot00000000000000# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # type: ignore from logging import WARNING from unittest import TestCase from unittest.mock import Mock, patch from opentelemetry.baggage import get_all, set_baggage from opentelemetry.baggage.propagation import ( W3CBaggagePropagator, _format_baggage, ) from opentelemetry.context import get_current class TestW3CBaggagePropagator(TestCase): # pylint: disable=protected-access # pylint: disable=too-many-public-methods def setUp(self): self.propagator = W3CBaggagePropagator() def _extract(self, header_value): """Test helper""" header = {"baggage": [header_value]} return get_all(self.propagator.extract(header)) def _inject(self, values): """Test helper""" ctx = get_current() for k, v in values.items(): # pylint: disable=invalid-name ctx = set_baggage(k, v, context=ctx) output = {} self.propagator.inject(output, context=ctx) return output.get("baggage") def test_no_context_header(self): baggage_entries = get_all(self.propagator.extract({})) self.assertEqual(baggage_entries, {}) def test_empty_context_header(self): header = "" self.assertEqual(self._extract(header), {}) def test_valid_header(self): header = "key1=val1,key2=val2" expected = {"key1": "val1", "key2": "val2"} self.assertEqual(self._extract(header), expected) def test_invalid_header_with_space(self): header = "key1 = val1, key2 =val2 " self.assertEqual(self._extract(header), {}) def test_valid_header_with_properties(self): header = "key1=val1,key2=val2;prop=1;prop2;prop3=2" expected = {"key1": "val1", "key2": "val2;prop=1;prop2;prop3=2"} self.assertEqual(self._extract(header), expected) def test_valid_header_with_url_escaped_values(self): header = "key1=val1,key2=val2%3Aval3,key3=val4%40%23%24val5" expected = { "key1": "val1", "key2": "val2:val3", "key3": "val4@#$val5", } self.assertEqual(self._extract(header), expected) def test_header_with_invalid_value(self): header = "key1=val1,key2=val2,a,val3" with self.assertLogs(level=WARNING) as warning: self._extract(header) self.assertIn( "Baggage list-member `a` doesn't match the format", warning.output[0], ) def test_valid_header_with_empty_value(self): header = "key1=,key2=val2" expected = {"key1": "", "key2": "val2"} self.assertEqual(self._extract(header), expected) def test_invalid_header(self): self.assertEqual(self._extract("header1"), {}) self.assertEqual(self._extract(" = "), {}) def test_header_too_long(self): long_value = "s" * (W3CBaggagePropagator._MAX_HEADER_LENGTH + 1) header = f"key1={long_value}" expected = {} self.assertEqual(self._extract(header), expected) def test_header_contains_too_many_entries(self): header = ",".join( [f"key{k}=val" for k in range(W3CBaggagePropagator._MAX_PAIRS + 1)] ) self.assertEqual( len(self._extract(header)), W3CBaggagePropagator._MAX_PAIRS ) def test_header_contains_pair_too_long(self): long_value = "s" * (W3CBaggagePropagator._MAX_PAIR_LENGTH + 1) header = f"key1=value1,key2={long_value},key3=value3" expected = {"key1": "value1", "key3": "value3"} with self.assertLogs(level=WARNING) as warning: self.assertEqual(self._extract(header), expected) self.assertIn( "exceeded the maximum number of bytes per list-member", warning.output[0], ) def test_extract_unquote_plus(self): self.assertEqual( self._extract("keykey=value%5Evalue"), {"keykey": "value^value"} ) self.assertEqual( self._extract("key%23key=value%23value"), {"key#key": "value#value"}, ) def test_header_max_entries_skip_invalid_entry(self): with self.assertLogs(level=WARNING) as warning: self.assertEqual( self._extract( ",".join( [ ( f"key{index}=value{index}" if index != 2 else ( f"key{index}=" f"value{'s' * (W3CBaggagePropagator._MAX_PAIR_LENGTH + 1)}" ) ) for index in range( W3CBaggagePropagator._MAX_PAIRS + 1 ) ] ) ), { f"key{index}": f"value{index}" for index in range(W3CBaggagePropagator._MAX_PAIRS + 1) if index != 2 }, ) self.assertIn( "exceeded the maximum number of list-members", warning.output[0], ) with self.assertLogs(level=WARNING) as warning: self.assertEqual( self._extract( ",".join( [ ( f"key{index}=value{index}" if index != 2 else f"key{index}xvalue{index}" ) for index in range( W3CBaggagePropagator._MAX_PAIRS + 1 ) ] ) ), { f"key{index}": f"value{index}" for index in range(W3CBaggagePropagator._MAX_PAIRS + 1) if index != 2 }, ) self.assertIn( "exceeded the maximum number of list-members", warning.output[0], ) def test_inject_no_baggage_entries(self): values = {} output = self._inject(values) self.assertEqual(None, output) def test_inject_space_entries(self): self.assertEqual("key=val+ue", self._inject({"key": "val ue"})) def test_inject(self): values = { "key1": "val1", "key2": "val2", } output = self._inject(values) self.assertIn("key1=val1", output) self.assertIn("key2=val2", output) def test_inject_escaped_values(self): values = { "key1": "val1,val2", "key2": "val3=4", } output = self._inject(values) self.assertIn("key2=val3%3D4", output) def test_inject_non_string_values(self): values = { "key1": True, "key2": 123, "key3": 123.567, } output = self._inject(values) self.assertIn("key1=True", output) self.assertIn("key2=123", output) self.assertIn("key3=123.567", output) @patch("opentelemetry.baggage.propagation.get_all") @patch("opentelemetry.baggage.propagation._format_baggage") def test_fields(self, mock_format_baggage, mock_baggage): mock_setter = Mock() self.propagator.inject({}, setter=mock_setter) inject_fields = set() for mock_call in mock_setter.mock_calls: inject_fields.add(mock_call[1][1]) self.assertEqual(inject_fields, self.propagator.fields) def test__format_baggage(self): self.assertEqual( _format_baggage({"key key": "value value"}), "key+key=value+value" ) self.assertEqual( _format_baggage({"key/key": "value/value"}), "key%2Fkey=value%2Fvalue", ) @patch("opentelemetry.baggage._BAGGAGE_KEY", new="abc") def test_inject_extract(self): carrier = {} context = set_baggage( "transaction", "string with spaces", context=get_current() ) self.propagator.inject(carrier, context) context = self.propagator.extract(carrier) self.assertEqual( carrier, {"baggage": "transaction=string+with+spaces"} ) self.assertEqual( context, {"abc": {"transaction": "string with spaces"}} ) python-opentelemetry-1.39.1/opentelemetry-api/tests/test_implementation.py000066400000000000000000000043271511654350100272720ustar00rootroot00000000000000# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from opentelemetry import trace class TestAPIOnlyImplementation(unittest.TestCase): """ This test is in place to ensure the API is returning values that are valid. The same tests have been added to the SDK with different expected results. See issue for more details: https://github.com/open-telemetry/opentelemetry-python/issues/142 """ # TRACER def test_tracer(self): with self.assertRaises(TypeError): # pylint: disable=abstract-class-instantiated trace.TracerProvider() # type:ignore def test_default_tracer(self): tracer_provider = trace.NoOpTracerProvider() tracer = tracer_provider.get_tracer(__name__) with tracer.start_span("test") as span: self.assertEqual( span.get_span_context(), trace.INVALID_SPAN_CONTEXT ) self.assertEqual(span, trace.INVALID_SPAN) self.assertIs(span.is_recording(), False) with tracer.start_span("test2") as span2: self.assertEqual( span2.get_span_context(), trace.INVALID_SPAN_CONTEXT ) self.assertEqual(span2, trace.INVALID_SPAN) self.assertIs(span2.is_recording(), False) def test_span(self): with self.assertRaises(TypeError): # pylint: disable=abstract-class-instantiated trace.Span() # type:ignore def test_default_span(self): span = trace.NonRecordingSpan(trace.INVALID_SPAN_CONTEXT) self.assertEqual(span.get_span_context(), trace.INVALID_SPAN_CONTEXT) self.assertIs(span.is_recording(), False) python-opentelemetry-1.39.1/opentelemetry-api/tests/trace/000077500000000000000000000000001511654350100237245ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-api/tests/trace/__init__.py000066400000000000000000000011101511654350100260260ustar00rootroot00000000000000# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. python-opentelemetry-1.39.1/opentelemetry-api/tests/trace/propagation/000077500000000000000000000000001511654350100262475ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-api/tests/trace/propagation/test_textmap.py000066400000000000000000000025451511654350100313500ustar00rootroot00000000000000# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # type: ignore import unittest from opentelemetry.propagators.textmap import DefaultGetter class TestDefaultGetter(unittest.TestCase): def test_get_none(self): getter = DefaultGetter() carrier = {} val = getter.get(carrier, "test") self.assertIsNone(val) def test_get_str(self): getter = DefaultGetter() carrier = {"test": "val"} val = getter.get(carrier, "test") self.assertEqual(val, ["val"]) def test_get_iter(self): getter = DefaultGetter() carrier = {"test": ["val"]} val = getter.get(carrier, "test") self.assertEqual(val, ["val"]) def test_keys(self): getter = DefaultGetter() keys = getter.keys({"test": "val"}) self.assertEqual(keys, ["test"]) test_tracecontexthttptextformat.py000066400000000000000000000263261511654350100353330ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-api/tests/trace/propagation# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # type: ignore import typing import unittest from unittest.mock import Mock, patch from opentelemetry import trace from opentelemetry.context import Context from opentelemetry.trace.propagation import tracecontext from opentelemetry.trace.span import TraceState FORMAT = tracecontext.TraceContextTextMapPropagator() class TestTraceContextFormat(unittest.TestCase): TRACE_ID = int("12345678901234567890123456789012", 16) # type:int SPAN_ID = int("1234567890123456", 16) # type:int def test_no_traceparent_header(self): """When tracecontext headers are not present, a new SpanContext should be created. RFC 4.2.2: If no traceparent header is received, the vendor creates a new trace-id and parent-id that represents the current request. """ output: typing.Dict[str, typing.List[str]] = {} span = trace.get_current_span(FORMAT.extract(output)) self.assertIsInstance(span.get_span_context(), trace.SpanContext) def test_headers_with_tracestate(self): """When there is a traceparent and tracestate header, data from both should be added to the SpanContext. """ traceparent_value = ( f"00-{format(self.TRACE_ID, '032x')}-" f"{format(self.SPAN_ID, '016x')}-00" ) tracestate_value = "foo=1,bar=2,baz=3" span_context = trace.get_current_span( FORMAT.extract( { "traceparent": [traceparent_value], "tracestate": [tracestate_value], }, ) ).get_span_context() self.assertEqual(span_context.trace_id, self.TRACE_ID) self.assertEqual(span_context.span_id, self.SPAN_ID) self.assertEqual( span_context.trace_state, {"foo": "1", "bar": "2", "baz": "3"} ) self.assertTrue(span_context.is_remote) output: typing.Dict[str, str] = {} span = trace.NonRecordingSpan(span_context) ctx = trace.set_span_in_context(span) FORMAT.inject(output, context=ctx) self.assertEqual(output["traceparent"], traceparent_value) for pair in ["foo=1", "bar=2", "baz=3"]: self.assertIn(pair, output["tracestate"]) self.assertEqual(output["tracestate"].count(","), 2) def test_invalid_trace_id(self): """If the trace id is invalid, we must ignore the full traceparent header, and return a random, valid trace. Also ignore any tracestate. RFC 3.2.2.3 If the trace-id value is invalid (for example if it contains non-allowed characters or all zeros), vendors MUST ignore the traceparent. RFC 3.3 If the vendor failed to parse traceparent, it MUST NOT attempt to parse tracestate. Note that the opposite is not true: failure to parse tracestate MUST NOT affect the parsing of traceparent. """ span = trace.get_current_span( FORMAT.extract( { "traceparent": [ "00-00000000000000000000000000000000-1234567890123456-00" ], "tracestate": ["foo=1,bar=2,foo=3"], }, ) ) self.assertEqual(span.get_span_context(), trace.INVALID_SPAN_CONTEXT) def test_invalid_parent_id(self): """If the parent id is invalid, we must ignore the full traceparent header. Also ignore any tracestate. RFC 3.2.2.3 Vendors MUST ignore the traceparent when the parent-id is invalid (for example, if it contains non-lowercase hex characters). RFC 3.3 If the vendor failed to parse traceparent, it MUST NOT attempt to parse tracestate. Note that the opposite is not true: failure to parse tracestate MUST NOT affect the parsing of traceparent. """ span = trace.get_current_span( FORMAT.extract( { "traceparent": [ "00-00000000000000000000000000000000-0000000000000000-00" ], "tracestate": ["foo=1,bar=2,foo=3"], }, ) ) self.assertEqual(span.get_span_context(), trace.INVALID_SPAN_CONTEXT) def test_no_send_empty_tracestate(self): """If the tracestate is empty, do not set the header. RFC 3.3.1.1 Empty and whitespace-only list members are allowed. Vendors MUST accept empty tracestate headers but SHOULD avoid sending them. """ output: typing.Dict[str, str] = {} span = trace.NonRecordingSpan( trace.SpanContext(self.TRACE_ID, self.SPAN_ID, is_remote=False) ) ctx = trace.set_span_in_context(span) FORMAT.inject(output, context=ctx) self.assertTrue("traceparent" in output) self.assertFalse("tracestate" in output) def test_format_not_supported(self): """If the traceparent does not adhere to the supported format, discard it and create a new tracecontext. RFC 4.3 If the version cannot be parsed, return an invalid trace header. """ span = trace.get_current_span( FORMAT.extract( { "traceparent": [ "00-12345678901234567890123456789012-" "1234567890123456-00-residue" ], "tracestate": ["foo=1,bar=2,foo=3"], }, ) ) self.assertEqual(span.get_span_context(), trace.INVALID_SPAN_CONTEXT) def test_propagate_invalid_context(self): """Do not propagate invalid trace context.""" output: typing.Dict[str, str] = {} ctx = trace.set_span_in_context(trace.INVALID_SPAN) FORMAT.inject(output, context=ctx) self.assertFalse("traceparent" in output) def test_tracestate_empty_header(self): """Test tracestate with an additional empty header (should be ignored)""" span = trace.get_current_span( FORMAT.extract( { "traceparent": [ "00-12345678901234567890123456789012-1234567890123456-00" ], "tracestate": ["foo=1", ""], }, ) ) self.assertEqual(span.get_span_context().trace_state["foo"], "1") def test_tracestate_header_with_trailing_comma(self): """Do not propagate invalid trace context.""" span = trace.get_current_span( FORMAT.extract( { "traceparent": [ "00-12345678901234567890123456789012-1234567890123456-00" ], "tracestate": ["foo=1,"], }, ) ) self.assertEqual(span.get_span_context().trace_state["foo"], "1") def test_tracestate_keys(self): """Test for valid key patterns in the tracestate""" tracestate_value = ",".join( [ "1a-2f@foo=bar1", "1a-_*/2b@foo=bar2", "foo=bar3", "foo-_*/bar=bar4", ] ) span = trace.get_current_span( FORMAT.extract( { "traceparent": [ "00-12345678901234567890123456789012-" "1234567890123456-00" ], "tracestate": [tracestate_value], }, ) ) self.assertEqual( span.get_span_context().trace_state["1a-2f@foo"], "bar1" ) self.assertEqual( span.get_span_context().trace_state["1a-_*/2b@foo"], "bar2" ) self.assertEqual(span.get_span_context().trace_state["foo"], "bar3") self.assertEqual( span.get_span_context().trace_state["foo-_*/bar"], "bar4" ) @patch("opentelemetry.trace.INVALID_SPAN_CONTEXT") @patch("opentelemetry.trace.get_current_span") def test_fields(self, mock_get_current_span, mock_invalid_span_context): mock_get_current_span.configure_mock( return_value=Mock( **{ "get_span_context.return_value": Mock( **{ "trace_id": 1, "span_id": 2, "trace_flags": 3, "trace_state": TraceState([("a", "b")]), } ) } ) ) mock_setter = Mock() FORMAT.inject({}, setter=mock_setter) inject_fields = set() for mock_call in mock_setter.mock_calls: inject_fields.add(mock_call[1][1]) self.assertEqual(inject_fields, FORMAT.fields) def test_extract_no_trace_parent_to_explicit_ctx(self): carrier = {"tracestate": ["foo=1"]} orig_ctx = Context({"k1": "v1"}) ctx = FORMAT.extract(carrier, orig_ctx) self.assertDictEqual(orig_ctx, ctx) def test_extract_no_trace_parent_to_implicit_ctx(self): carrier = {"tracestate": ["foo=1"]} ctx = FORMAT.extract(carrier) self.assertDictEqual(Context(), ctx) def test_extract_invalid_trace_parent_to_explicit_ctx(self): trace_parent_headers = [ "invalid", "00-00000000000000000000000000000000-1234567890123456-00", "00-12345678901234567890123456789012-0000000000000000-00", "00-12345678901234567890123456789012-1234567890123456-00-residue", ] for trace_parent in trace_parent_headers: with self.subTest(trace_parent=trace_parent): carrier = { "traceparent": [trace_parent], "tracestate": ["foo=1"], } orig_ctx = Context({"k1": "v1"}) ctx = FORMAT.extract(carrier, orig_ctx) self.assertDictEqual(orig_ctx, ctx) def test_extract_invalid_trace_parent_to_implicit_ctx(self): trace_parent_headers = [ "invalid", "00-00000000000000000000000000000000-1234567890123456-00", "00-12345678901234567890123456789012-0000000000000000-00", "00-12345678901234567890123456789012-1234567890123456-00-residue", ] for trace_parent in trace_parent_headers: with self.subTest(trace_parent=trace_parent): carrier = { "traceparent": [trace_parent], "tracestate": ["foo=1"], } ctx = FORMAT.extract(carrier) self.assertDictEqual(Context(), ctx) python-opentelemetry-1.39.1/opentelemetry-api/tests/trace/test_defaultspan.py000066400000000000000000000023401511654350100276420ustar00rootroot00000000000000# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from opentelemetry import trace class TestNonRecordingSpan(unittest.TestCase): def test_ctor(self): context = trace.SpanContext( 1, 1, is_remote=False, trace_flags=trace.DEFAULT_TRACE_OPTIONS, trace_state=trace.DEFAULT_TRACE_STATE, ) span = trace.NonRecordingSpan(context) self.assertEqual(context, span.get_span_context()) def test_invalid_span(self): self.assertIsNotNone(trace.INVALID_SPAN) self.assertIsNotNone(trace.INVALID_SPAN.get_span_context()) self.assertFalse(trace.INVALID_SPAN.get_span_context().is_valid) python-opentelemetry-1.39.1/opentelemetry-api/tests/trace/test_globals.py000066400000000000000000000152671511654350100267730ustar00rootroot00000000000000# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from unittest.mock import Mock, patch from opentelemetry import context, trace from opentelemetry.test.concurrency_test import ConcurrencyTestBase, MockFunc from opentelemetry.test.globals_test import TraceGlobalsTest from opentelemetry.trace.status import Status, StatusCode class SpanTest(trace.NonRecordingSpan): has_ended = False recorded_exception = None recorded_status = Status(status_code=StatusCode.UNSET) def set_status(self, status, description=None): if isinstance(status, Status): self.recorded_status = status else: self.recorded_status = Status( status_code=status, description=description ) def end(self, end_time=None): self.has_ended = True def is_recording(self): return not self.has_ended def record_exception( self, exception, attributes=None, timestamp=None, escaped=False ): self.recorded_exception = exception class TestGlobals(TraceGlobalsTest, unittest.TestCase): @staticmethod @patch("opentelemetry.trace._TRACER_PROVIDER") def test_get_tracer(mock_tracer_provider): # type: ignore """trace.get_tracer should proxy to the global tracer provider.""" trace.get_tracer("foo", "var") mock_tracer_provider.get_tracer.assert_called_with( "foo", "var", None, None ) mock_provider = Mock() trace.get_tracer("foo", "var", mock_provider) mock_provider.get_tracer.assert_called_with("foo", "var", None, None) class TestGlobalsConcurrency(TraceGlobalsTest, ConcurrencyTestBase): @patch("opentelemetry.trace.logger") def test_set_tracer_provider_many_threads(self, mock_logger) -> None: # type: ignore mock_logger.warning = MockFunc() def do_concurrently() -> Mock: # first get a proxy tracer proxy_tracer = trace.ProxyTracerProvider().get_tracer("foo") # try to set the global tracer provider mock_tracer_provider = Mock(get_tracer=MockFunc()) trace.set_tracer_provider(mock_tracer_provider) # start a span through the proxy which will call through to the mock provider proxy_tracer.start_span("foo") return mock_tracer_provider num_threads = 100 mock_tracer_providers = self.run_with_many_threads( do_concurrently, num_threads=num_threads, ) # despite trying to set tracer provider many times, only one of the # mock_tracer_providers should have stuck and been called from # proxy_tracer.start_span() mock_tps_with_any_call = [ mock for mock in mock_tracer_providers if mock.get_tracer.call_count > 0 ] self.assertEqual(len(mock_tps_with_any_call), 1) self.assertEqual( mock_tps_with_any_call[0].get_tracer.call_count, num_threads ) # should have warned every time except for the successful set self.assertEqual(mock_logger.warning.call_count, num_threads - 1) class TestTracer(unittest.TestCase): def setUp(self): self.tracer = trace.NoOpTracer() def test_get_current_span(self): """NoOpTracer's start_span will also be retrievable via get_current_span """ self.assertEqual(trace.get_current_span(), trace.INVALID_SPAN) span = trace.NonRecordingSpan(trace.INVALID_SPAN_CONTEXT) ctx = trace.set_span_in_context(span) token = context.attach(ctx) try: self.assertIs(trace.get_current_span(), span) finally: context.detach(token) self.assertEqual(trace.get_current_span(), trace.INVALID_SPAN) class TestUseTracer(unittest.TestCase): def test_use_span(self): self.assertEqual(trace.get_current_span(), trace.INVALID_SPAN) span = trace.NonRecordingSpan(trace.INVALID_SPAN_CONTEXT) with trace.use_span(span): self.assertIs(trace.get_current_span(), span) self.assertEqual(trace.get_current_span(), trace.INVALID_SPAN) def test_use_span_end_on_exit(self): test_span = SpanTest(trace.INVALID_SPAN_CONTEXT) with trace.use_span(test_span): pass self.assertFalse(test_span.has_ended) with trace.use_span(test_span, end_on_exit=True): pass self.assertTrue(test_span.has_ended) def test_use_span_exception(self): class TestUseSpanException(Exception): pass test_span = SpanTest(trace.INVALID_SPAN_CONTEXT) exception = TestUseSpanException("test exception") with self.assertRaises(TestUseSpanException): with trace.use_span(test_span): raise exception self.assertEqual(test_span.recorded_exception, exception) def test_use_span_set_status(self): class TestUseSpanException(Exception): pass test_span = SpanTest(trace.INVALID_SPAN_CONTEXT) with self.assertRaises(TestUseSpanException): with trace.use_span(test_span): raise TestUseSpanException("test error") self.assertEqual( test_span.recorded_status.status_code, StatusCode.ERROR, ) self.assertEqual( test_span.recorded_status.description, "TestUseSpanException: test error", ) def test_use_span_base_exceptions(self): base_exception_classes = [ BaseException, GeneratorExit, SystemExit, KeyboardInterrupt, ] for exc_cls in base_exception_classes: with self.subTest(exc=exc_cls.__name__): test_span = SpanTest(trace.INVALID_SPAN_CONTEXT) with self.assertRaises(exc_cls): with trace.use_span(test_span): raise exc_cls() self.assertEqual( test_span.recorded_status.status_code, StatusCode.UNSET, ) self.assertIsNone(test_span.recorded_status.description) self.assertIsNone(test_span.recorded_exception) python-opentelemetry-1.39.1/opentelemetry-api/tests/trace/test_immutablespancontext.py000066400000000000000000000042031511654350100316020ustar00rootroot00000000000000# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from opentelemetry import trace from opentelemetry.trace import TraceFlags, TraceState class TestImmutableSpanContext(unittest.TestCase): def test_ctor(self): context = trace.SpanContext( 1, 1, is_remote=False, trace_flags=trace.DEFAULT_TRACE_OPTIONS, trace_state=trace.DEFAULT_TRACE_STATE, ) self.assertEqual(context.trace_id, 1) self.assertEqual(context.span_id, 1) self.assertEqual(context.is_remote, False) self.assertEqual(context.trace_flags, trace.DEFAULT_TRACE_OPTIONS) self.assertEqual(context.trace_state, trace.DEFAULT_TRACE_STATE) def test_attempt_change_attributes(self): context = trace.SpanContext( 1, 2, is_remote=False, trace_flags=trace.DEFAULT_TRACE_OPTIONS, trace_state=trace.DEFAULT_TRACE_STATE, ) # attempt to change the attribute values context.trace_id = 2 # type: ignore context.span_id = 3 # type: ignore context.is_remote = True # type: ignore context.trace_flags = TraceFlags(3) # type: ignore context.trace_state = TraceState([("test", "test")]) # type: ignore # check if attributes changed self.assertEqual(context.trace_id, 1) self.assertEqual(context.span_id, 2) self.assertEqual(context.is_remote, False) self.assertEqual(context.trace_flags, trace.DEFAULT_TRACE_OPTIONS) self.assertEqual(context.trace_state, trace.DEFAULT_TRACE_STATE) python-opentelemetry-1.39.1/opentelemetry-api/tests/trace/test_proxy.py000066400000000000000000000072541511654350100265260ustar00rootroot00000000000000# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # pylint: disable=W0212,W0222,W0221 import typing import unittest from opentelemetry import trace from opentelemetry.test.globals_test import TraceGlobalsTest from opentelemetry.trace.span import ( INVALID_SPAN_CONTEXT, NonRecordingSpan, Span, ) from opentelemetry.util._decorator import _agnosticcontextmanager from opentelemetry.util.types import Attributes class TestProvider(trace.NoOpTracerProvider): def get_tracer( self, instrumenting_module_name: str, instrumenting_library_version: typing.Optional[str] = None, schema_url: typing.Optional[str] = None, attributes: typing.Optional[Attributes] = None, ) -> trace.Tracer: return TestTracer() class TestTracer(trace.NoOpTracer): def start_span(self, *args, **kwargs): return SpanTest(INVALID_SPAN_CONTEXT) @_agnosticcontextmanager # pylint: disable=protected-access def start_as_current_span(self, *args, **kwargs): # type: ignore with trace.use_span(self.start_span(*args, **kwargs)) as span: # type: ignore yield span class SpanTest(NonRecordingSpan): pass class TestProxy(TraceGlobalsTest, unittest.TestCase): def test_proxy_tracer(self): provider = trace.get_tracer_provider() # proxy provider self.assertIsInstance(provider, trace.ProxyTracerProvider) # provider returns proxy tracer tracer = provider.get_tracer("proxy-test") self.assertIsInstance(tracer, trace.ProxyTracer) with tracer.start_span("span1") as span: self.assertIsInstance(span, trace.NonRecordingSpan) with tracer.start_as_current_span("span2") as span: self.assertIsInstance(span, trace.NonRecordingSpan) # set a real provider trace.set_tracer_provider(TestProvider()) # get_tracer_provider() now returns the real provider self.assertIsInstance(trace.get_tracer_provider(), TestProvider) # tracer provider now returns real instance self.assertIsInstance(trace.get_tracer_provider(), TestProvider) # references to the old provider still work but return real tracer now real_tracer = provider.get_tracer("proxy-test") self.assertIsInstance(real_tracer, TestTracer) # reference to old proxy tracer now delegates to a real tracer and # creates real spans with tracer.start_span("") as span: self.assertIsInstance(span, SpanTest) def test_late_config(self): # get a tracer and instrument a function as we would at the # root of a module tracer = trace.get_tracer("test") @tracer.start_as_current_span("span") def my_function() -> Span: return trace.get_current_span() # call function before configuring tracing provider, should # return INVALID_SPAN from the NoOpTracer self.assertEqual(my_function(), trace.INVALID_SPAN) # configure tracing provider trace.set_tracer_provider(TestProvider()) # call function again, we should now be getting a TestSpan self.assertIsInstance(my_function(), SpanTest) python-opentelemetry-1.39.1/opentelemetry-api/tests/trace/test_span_context.py000066400000000000000000000056501511654350100300500ustar00rootroot00000000000000# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import pickle import unittest from opentelemetry import trace class TestSpanContext(unittest.TestCase): def test_span_context_pickle(self): """ SpanContext needs to be pickleable to support multiprocessing so span can start as parent from the new spawned process """ sc = trace.SpanContext( 1, 2, is_remote=False, trace_flags=trace.DEFAULT_TRACE_OPTIONS, trace_state=trace.DEFAULT_TRACE_STATE, ) pickle_sc = pickle.loads(pickle.dumps(sc)) self.assertEqual(sc.trace_id, pickle_sc.trace_id) self.assertEqual(sc.span_id, pickle_sc.span_id) invalid_sc = trace.SpanContext( 9999999999999999999999999999999999999999999999999999999999999999999999999999, 9, is_remote=False, trace_flags=trace.DEFAULT_TRACE_OPTIONS, trace_state=trace.DEFAULT_TRACE_STATE, ) self.assertFalse(invalid_sc.is_valid) def test_trace_id_validity(self): trace_id_max_value = int("f" * 32, 16) span_id = 1 # valid trace IDs sc = trace.SpanContext(trace_id_max_value, span_id, is_remote=False) self.assertTrue(sc.is_valid) sc = trace.SpanContext(1, span_id, is_remote=False) self.assertTrue(sc.is_valid) # invalid trace IDs sc = trace.SpanContext(0, span_id, is_remote=False) self.assertFalse(sc.is_valid) sc = trace.SpanContext(-1, span_id, is_remote=False) self.assertFalse(sc.is_valid) sc = trace.SpanContext( trace_id_max_value + 1, span_id, is_remote=False ) self.assertFalse(sc.is_valid) def test_span_id_validity(self): span_id_max = int("f" * 16, 16) trace_id = 1 # valid span IDs sc = trace.SpanContext(trace_id, span_id_max, is_remote=False) self.assertTrue(sc.is_valid) sc = trace.SpanContext(trace_id, 1, is_remote=False) self.assertTrue(sc.is_valid) # invalid span IDs sc = trace.SpanContext(trace_id, 0, is_remote=False) self.assertFalse(sc.is_valid) sc = trace.SpanContext(trace_id, -1, is_remote=False) self.assertFalse(sc.is_valid) sc = trace.SpanContext(trace_id, span_id_max + 1, is_remote=False) self.assertFalse(sc.is_valid) python-opentelemetry-1.39.1/opentelemetry-api/tests/trace/test_status.py000066400000000000000000000054351511654350100266670ustar00rootroot00000000000000# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from logging import WARNING from opentelemetry.trace.status import Status, StatusCode class TestStatus(unittest.TestCase): def test_constructor(self): status = Status() self.assertIs(status.status_code, StatusCode.UNSET) self.assertIsNone(status.description) status = Status(StatusCode.ERROR, "unavailable") self.assertIs(status.status_code, StatusCode.ERROR) self.assertEqual(status.description, "unavailable") def test_invalid_description(self): with self.assertLogs(level=WARNING) as warning: status = Status( status_code=StatusCode.ERROR, description={"test": "val"}, # type: ignore ) self.assertIs(status.status_code, StatusCode.ERROR) self.assertEqual(status.description, None) self.assertIn( "Invalid status description type, expected str", warning.output[0], # type: ignore ) def test_description_and_non_error_status(self): with self.assertLogs(level=WARNING) as warning: status = Status( status_code=StatusCode.OK, description="status description" ) self.assertIs(status.status_code, StatusCode.OK) self.assertEqual(status.description, None) self.assertIn( "description should only be set when status_code is set to StatusCode.ERROR", warning.output[0], # type: ignore ) with self.assertLogs(level=WARNING) as warning: status = Status( status_code=StatusCode.UNSET, description="status description" ) self.assertIs(status.status_code, StatusCode.UNSET) self.assertEqual(status.description, None) self.assertIn( "description should only be set when status_code is set to StatusCode.ERROR", warning.output[0], # type: ignore ) status = Status( status_code=StatusCode.ERROR, description="status description" ) self.assertIs(status.status_code, StatusCode.ERROR) self.assertEqual(status.description, "status description") python-opentelemetry-1.39.1/opentelemetry-api/tests/trace/test_tracer.py000066400000000000000000000050701511654350100266170ustar00rootroot00000000000000# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import asyncio from unittest import TestCase from opentelemetry.trace import ( INVALID_SPAN, NoOpTracer, Span, Tracer, _agnosticcontextmanager, get_current_span, ) class TestTracer(TestCase): def setUp(self): self.tracer = NoOpTracer() def test_start_span(self): with self.tracer.start_span("") as span: self.assertIsInstance(span, Span) def test_start_as_current_span_context_manager(self): with self.tracer.start_as_current_span("") as span: self.assertIsInstance(span, Span) def test_start_as_current_span_decorator(self): # using a list to track the mock call order calls = [] class MockTracer(Tracer): def start_span(self, *args, **kwargs): return INVALID_SPAN @_agnosticcontextmanager # pylint: disable=protected-access def start_as_current_span(self, *args, **kwargs): # type: ignore calls.append(1) yield INVALID_SPAN calls.append(9) mock_tracer = MockTracer() # test 1 : sync function @mock_tracer.start_as_current_span("name") def function_sync(data: str) -> int: calls.append(5) return len(data) calls = [] res = function_sync("123") self.assertEqual(res, 3) self.assertEqual(calls, [1, 5, 9]) # test 2 : async function @mock_tracer.start_as_current_span("name") async def function_async(data: str) -> int: calls.append(5) return len(data) calls = [] res = asyncio.run(function_async("123")) self.assertEqual(res, 3) self.assertEqual(calls, [1, 5, 9]) def test_get_current_span(self): with self.tracer.start_as_current_span("test") as span: get_current_span().set_attribute("test", "test") self.assertEqual(span, INVALID_SPAN) self.assertFalse(hasattr("span", "attributes")) python-opentelemetry-1.39.1/opentelemetry-api/tests/trace/test_tracestate.py000066400000000000000000000102741511654350100275000ustar00rootroot00000000000000# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # pylint: disable=no-member import unittest from opentelemetry.trace.span import TraceState class TestTraceContextFormat(unittest.TestCase): def test_empty_tracestate(self): state = TraceState() self.assertEqual(len(state), 0) self.assertEqual(state.to_header(), "") def test_tracestate_valid_pairs(self): pairs = [("1a-2f@foo", "bar1"), ("foo-_*/bar", "bar4")] state = TraceState(pairs) self.assertEqual(len(state), 2) self.assertIsNotNone(state.get("foo-_*/bar")) self.assertEqual(state.get("foo-_*/bar"), "bar4") self.assertEqual(state.to_header(), "1a-2f@foo=bar1,foo-_*/bar=bar4") self.assertIsNone(state.get("random")) def test_tracestate_add_valid(self): state = TraceState() new_state = state.add("1a-2f@foo", "bar4") self.assertEqual(len(new_state), 1) self.assertEqual(new_state.get("1a-2f@foo"), "bar4") def test_tracestate_add_invalid(self): state = TraceState() new_state = state.add("%%%nsasa", "val") self.assertEqual(len(new_state), 0) new_state = new_state.add("key", "====val====") self.assertEqual(len(new_state), 0) self.assertEqual(new_state.to_header(), "") def test_tracestate_update_valid(self): state = TraceState([("a", "1")]) new_state = state.update("a", "2") self.assertEqual(new_state.get("a"), "2") new_state = new_state.add("b", "3") self.assertNotEqual(state, new_state) def test_tracestate_update_invalid(self): state = TraceState([("a", "1")]) new_state = state.update("a", "2=/") self.assertNotEqual(new_state.get("a"), "2=/") new_state = new_state.update("a", ",,2,,f") self.assertNotEqual(new_state.get("a"), ",,2,,f") self.assertEqual(new_state.get("a"), "1") def test_tracestate_delete_preserved(self): state = TraceState([("a", "1"), ("b", "2"), ("c", "3")]) new_state = state.delete("b") self.assertIsNone(new_state.get("b")) entries = list(new_state.items()) a_place = entries.index(("a", "1")) c_place = entries.index(("c", "3")) self.assertLessEqual(a_place, c_place) def test_tracestate_from_header(self): entries = [ "1a-2f@foo=bar1", "1a-_*/2b@foo=bar2", "foo=bar3", "foo-_*/bar=bar4", ] header_list = [",".join(entries)] state = TraceState.from_header(header_list) self.assertEqual(state.to_header(), ",".join(entries)) def test_tracestate_order_changed(self): entries = [ "1a-2f@foo=bar1", "1a-_*/2b@foo=bar2", "foo=bar3", "foo-_*/bar=bar4", ] header_list = [",".join(entries)] state = TraceState.from_header(header_list) new_state = state.update("foo", "bar33") entries = list(new_state.items()) # type: ignore foo_place = entries.index(("foo", "bar33")) # type: ignore prev_first_place = entries.index(("1a-2f@foo", "bar1")) # type: ignore self.assertLessEqual(foo_place, prev_first_place) def test_trace_contains(self): entries = [ "1a-2f@foo=bar1", "1a-_*/2b@foo=bar2", "foo=bar3", "foo-_*/bar=bar4", ] header_list = [",".join(entries)] state = TraceState.from_header(header_list) self.assertTrue("foo" in state) self.assertFalse("bar" in state) self.assertIsNone(state.get("bar")) with self.assertRaises(KeyError): state["bar"] # pylint:disable=W0104 python-opentelemetry-1.39.1/opentelemetry-api/tests/util/000077500000000000000000000000001511654350100236035ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-api/tests/util/test__importlib_metadata.py000066400000000000000000000076031511654350100312220ustar00rootroot00000000000000# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from unittest import TestCase from opentelemetry.metrics import MeterProvider from opentelemetry.util._importlib_metadata import ( EntryPoint, EntryPoints, version, ) from opentelemetry.util._importlib_metadata import ( entry_points as importlib_metadata_entry_points, ) class TestEntryPoints(TestCase): def test_entry_points(self): self.assertIsInstance( next( iter( importlib_metadata_entry_points( group="opentelemetry_meter_provider", name="default_meter_provider", ) ) ).load()(), MeterProvider, ) def test_uniform_behavior(self): """ Test that entry_points behaves the same regardless of the Python version. """ entry_points = importlib_metadata_entry_points() self.assertIsInstance(entry_points, EntryPoints) entry_points = entry_points.select(group="opentelemetry_propagator") self.assertIsInstance(entry_points, EntryPoints) entry_points = entry_points.select(name="baggage") self.assertIsInstance(entry_points, EntryPoints) entry_point = next(iter(entry_points)) self.assertIsInstance(entry_point, EntryPoint) self.assertEqual(entry_point.name, "baggage") self.assertEqual(entry_point.group, "opentelemetry_propagator") self.assertEqual( entry_point.value, "opentelemetry.baggage.propagation:W3CBaggagePropagator", ) entry_points = importlib_metadata_entry_points( group="opentelemetry_propagator" ) self.assertIsInstance(entry_points, EntryPoints) entry_points = entry_points.select(name="baggage") self.assertIsInstance(entry_points, EntryPoints) entry_point = next(iter(entry_points)) self.assertIsInstance(entry_point, EntryPoint) self.assertEqual(entry_point.name, "baggage") self.assertEqual(entry_point.group, "opentelemetry_propagator") self.assertEqual( entry_point.value, "opentelemetry.baggage.propagation:W3CBaggagePropagator", ) entry_points = importlib_metadata_entry_points(name="baggage") self.assertIsInstance(entry_points, EntryPoints) entry_point = next(iter(entry_points)) self.assertIsInstance(entry_point, EntryPoint) self.assertEqual(entry_point.name, "baggage") self.assertEqual(entry_point.group, "opentelemetry_propagator") self.assertEqual( entry_point.value, "opentelemetry.baggage.propagation:W3CBaggagePropagator", ) entry_points = importlib_metadata_entry_points(group="abc") self.assertIsInstance(entry_points, EntryPoints) self.assertEqual(len(entry_points), 0) entry_points = importlib_metadata_entry_points( group="opentelemetry_propagator", name="abc" ) self.assertIsInstance(entry_points, EntryPoints) self.assertEqual(len(entry_points), 0) entry_points = importlib_metadata_entry_points(group="abc", name="abc") self.assertIsInstance(entry_points, EntryPoints) self.assertEqual(len(entry_points), 0) self.assertIsInstance(version("opentelemetry-api"), str) python-opentelemetry-1.39.1/opentelemetry-api/tests/util/test__providers.py000066400000000000000000000033531511654350100273740ustar00rootroot00000000000000# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from importlib import reload from os import environ from unittest import TestCase from unittest.mock import Mock, patch from opentelemetry.util import _providers class Test_Providers(TestCase): # pylint: disable=invalid-name @patch.dict( environ, { # type: ignore "provider_environment_variable": "mock_provider_environment_variable" }, ) @patch("opentelemetry.util._importlib_metadata.entry_points") def test__providers(self, mock_entry_points): reload(_providers) mock_entry_points.configure_mock( **{ "side_effect": [ [ Mock( **{ "load.return_value": Mock( **{"return_value": "a"} ) } ), ], ] } ) self.assertEqual( _providers._load_provider( # pylint: disable=protected-access "provider_environment_variable", "provider" ), "a", ) python-opentelemetry-1.39.1/opentelemetry-api/tests/util/test_contextmanager.py000066400000000000000000000041601511654350100302340ustar00rootroot00000000000000# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import asyncio import unittest from typing import Callable, Iterator from opentelemetry.util._decorator import _agnosticcontextmanager @_agnosticcontextmanager def cm() -> Iterator[int]: yield 3 @_agnosticcontextmanager def cm_call_when_done(f: Callable[[], None]) -> Iterator[int]: yield 3 f() class TestContextManager(unittest.TestCase): def test_sync_with(self): with cm() as val: self.assertEqual(val, 3) def test_decorate_sync_func(self): @cm() def sync_func(a: str) -> str: return a + a res = sync_func("a") self.assertEqual(res, "aa") def test_decorate_async_func(self): # Test that a universal context manager decorating an async function runs it's cleanup # code after the entire async function coroutine finishes. This silently fails when # using the normal @contextmanager decorator, which runs it's __exit__() after the # un-started coroutine is returned. # # To see this behavior, change cm_call_when_done() to # be decorated with @contextmanager. events = [] @cm_call_when_done(lambda: events.append("cm_done")) async def async_func(a: str) -> str: events.append("start_async_func") await asyncio.sleep(0) events.append("finish_sleep") return a + a res = asyncio.run(async_func("a")) self.assertEqual(res, "aa") self.assertEqual( events, ["start_async_func", "finish_sleep", "cm_done"] ) python-opentelemetry-1.39.1/opentelemetry-api/tests/util/test_once.py000066400000000000000000000032761511654350100261500ustar00rootroot00000000000000# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from opentelemetry.test.concurrency_test import ConcurrencyTestBase, MockFunc from opentelemetry.util._once import Once class TestOnce(ConcurrencyTestBase): def test_once_single_thread(self): once_func = MockFunc() once = Once() self.assertEqual(once_func.call_count, 0) # first call should run called = once.do_once(once_func) # type: ignore[reportArgumentType] self.assertTrue(called) self.assertEqual(once_func.call_count, 1) # subsequent calls do nothing called = once.do_once(once_func) # type: ignore[reportArgumentType] self.assertFalse(called) self.assertEqual(once_func.call_count, 1) def test_once_many_threads(self): once_func = MockFunc() once = Once() def run_concurrently() -> bool: return once.do_once(once_func) # type: ignore[reportArgumentType] results = self.run_with_many_threads(run_concurrently, num_threads=100) self.assertEqual(once_func.call_count, 1) # check that only one of the threads got True self.assertEqual(results.count(True), 1) python-opentelemetry-1.39.1/opentelemetry-api/tests/util/test_re.py000066400000000000000000000107561511654350100256330ustar00rootroot00000000000000# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # type: ignore import unittest from opentelemetry.util.re import parse_env_headers class TestParseHeaders(unittest.TestCase): @staticmethod def _common_test_cases(): return [ # invalid header name ("=value", [], True), ("}key=value", [], True), ("@key()=value", [], True), ("/key=value", [], True), # invalid header value ("name=\\", [], True), ('name=value"', [], True), ("name=;value", [], True), # different header values ("name=", [("name", "")], False), ("name===value=", [("name", "==value=")], False), # url-encoded headers ("key=value%20with%20space", [("key", "value with space")], False), ("key%21=value", [("key!", "value")], False), ("%20key%20=%20value%20", [("key", "value")], False), # header name case normalization ("Key=Value", [("key", "Value")], False), # mix of valid and invalid headers ( "name1=value1,invalidName, name2 = value2 , name3=value3==", [ ( "name1", "value1", ), ("name2", "value2"), ("name3", "value3=="), ], True, ), ( "=name=valu3; key1; key2, content = application, red=\tvelvet; cake", [("content", "application")], True, ), ] def test_parse_env_headers(self): inp = self._common_test_cases() + [ # invalid header value ("key=value othervalue", [], True), ] for case_ in inp: headers, expected, warn = case_ with self.subTest(headers=headers): if warn: with self.assertLogs(level="WARNING") as cm: self.assertEqual( parse_env_headers(headers), dict(expected) ) self.assertTrue( "Header format invalid! Header values in environment " "variables must be URL encoded per the OpenTelemetry " "Protocol Exporter specification:" in cm.records[0].message, ) else: self.assertEqual( parse_env_headers(headers), dict(expected) ) def test_parse_env_headers_liberal(self): inp = self._common_test_cases() + [ # valid header value ("key=value othervalue", [("key", "value othervalue")], False), ( "key=value Other_Value==", [("key", "value Other_Value==")], False, ), ] for case_ in inp: headers, expected, warn = case_ with self.subTest(headers=headers): if warn: with self.assertLogs(level="WARNING") as cm: self.assertEqual( parse_env_headers(headers, liberal=True), dict(expected), ) self.assertTrue( "Header format invalid! Header values in environment " "variables must be URL encoded per the OpenTelemetry " "Protocol Exporter specification or a comma separated " "list of name=value occurrences:" in cm.records[0].message, ) else: self.assertEqual( parse_env_headers(headers, liberal=True), dict(expected), ) python-opentelemetry-1.39.1/opentelemetry-proto/000077500000000000000000000000001511654350100220565ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-proto/LICENSE000066400000000000000000000261351511654350100230720ustar00rootroot00000000000000 Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. Copyright [yyyy] [name of copyright owner] Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. python-opentelemetry-1.39.1/opentelemetry-proto/README.rst000066400000000000000000000024371511654350100235530ustar00rootroot00000000000000OpenTelemetry Python Proto ========================== |pypi| .. |pypi| image:: https://badge.fury.io/py/opentelemetry-proto.svg :target: https://pypi.org/project/opentelemetry-proto/ This library contains the generated code for OpenTelemetry protobuf data model. The code in the current package was generated using the v1.7.0 release_ of opentelemetry-proto. .. _release: https://github.com/open-telemetry/opentelemetry-proto/releases/tag/v1.7.0 Installation ------------ :: pip install opentelemetry-proto Code Generation --------------- These files were generated automatically from code in opentelemetry-proto_. To regenerate the code, run ``../scripts/proto_codegen.sh``. To build against a new release or specific commit of opentelemetry-proto_, update the ``PROTO_REPO_BRANCH_OR_COMMIT`` variable in ``../scripts/proto_codegen.sh``. Then run the script and commit the changes as well as any fixes needed in the OTLP exporter. .. _opentelemetry-proto: https://github.com/open-telemetry/opentelemetry-proto References ---------- * `OpenTelemetry Project `_ * `OpenTelemetry Proto `_ * `proto_codegen.sh script `_ python-opentelemetry-1.39.1/opentelemetry-proto/pyproject.toml000066400000000000000000000023201511654350100247670ustar00rootroot00000000000000[build-system] requires = ["hatchling"] build-backend = "hatchling.build" [project] name = "opentelemetry-proto" dynamic = ["version"] description = "OpenTelemetry Python Proto" readme = "README.rst" license = "Apache-2.0" requires-python = ">=3.9" authors = [ { name = "OpenTelemetry Authors", email = "cncf-opentelemetry-contributors@lists.cncf.io" }, ] classifiers = [ "Development Status :: 5 - Production/Stable", "Framework :: OpenTelemetry", "Intended Audience :: Developers", "Programming Language :: Python", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.9", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", "Programming Language :: Python :: 3.12", "Programming Language :: Python :: 3.13", ] dependencies = [ "protobuf>=5.0, < 7.0", ] [project.urls] Homepage = "https://github.com/open-telemetry/opentelemetry-python/tree/main/opentelemetry-proto" Repository = "https://github.com/open-telemetry/opentelemetry-python" [tool.hatch.version] path = "src/opentelemetry/proto/version/__init__.py" [tool.hatch.build.targets.sdist] include = [ "/src", "/tests", ] [tool.hatch.build.targets.wheel] packages = ["src/opentelemetry"] python-opentelemetry-1.39.1/opentelemetry-proto/src/000077500000000000000000000000001511654350100226455ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-proto/src/opentelemetry/000077500000000000000000000000001511654350100255415ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-proto/src/opentelemetry/proto/000077500000000000000000000000001511654350100267045ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-proto/src/opentelemetry/proto/__init__.py000066400000000000000000000000001511654350100310030ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-proto/src/opentelemetry/proto/collector/000077500000000000000000000000001511654350100306725ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-proto/src/opentelemetry/proto/collector/__init__.py000066400000000000000000000000001511654350100327710ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-proto/src/opentelemetry/proto/collector/logs/000077500000000000000000000000001511654350100316365ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-proto/src/opentelemetry/proto/collector/logs/v1/000077500000000000000000000000001511654350100321645ustar00rootroot00000000000000logs_service_pb2.py000066400000000000000000000051311511654350100357060ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-proto/src/opentelemetry/proto/collector/logs/v1# -*- coding: utf-8 -*- # Generated by the protocol buffer compiler. DO NOT EDIT! # source: opentelemetry/proto/collector/logs/v1/logs_service.proto # Protobuf Python Version: 5.26.1 """Generated protocol buffer code.""" from google.protobuf import descriptor as _descriptor from google.protobuf import descriptor_pool as _descriptor_pool from google.protobuf import symbol_database as _symbol_database from google.protobuf.internal import builder as _builder # @@protoc_insertion_point(imports) _sym_db = _symbol_database.Default() from opentelemetry.proto.logs.v1 import logs_pb2 as opentelemetry_dot_proto_dot_logs_dot_v1_dot_logs__pb2 DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n8opentelemetry/proto/collector/logs/v1/logs_service.proto\x12%opentelemetry.proto.collector.logs.v1\x1a&opentelemetry/proto/logs/v1/logs.proto\"\\\n\x18\x45xportLogsServiceRequest\x12@\n\rresource_logs\x18\x01 \x03(\x0b\x32).opentelemetry.proto.logs.v1.ResourceLogs\"u\n\x19\x45xportLogsServiceResponse\x12X\n\x0fpartial_success\x18\x01 \x01(\x0b\x32?.opentelemetry.proto.collector.logs.v1.ExportLogsPartialSuccess\"O\n\x18\x45xportLogsPartialSuccess\x12\x1c\n\x14rejected_log_records\x18\x01 \x01(\x03\x12\x15\n\rerror_message\x18\x02 \x01(\t2\x9d\x01\n\x0bLogsService\x12\x8d\x01\n\x06\x45xport\x12?.opentelemetry.proto.collector.logs.v1.ExportLogsServiceRequest\x1a@.opentelemetry.proto.collector.logs.v1.ExportLogsServiceResponse\"\x00\x42\x98\x01\n(io.opentelemetry.proto.collector.logs.v1B\x10LogsServiceProtoP\x01Z0go.opentelemetry.io/proto/otlp/collector/logs/v1\xaa\x02%OpenTelemetry.Proto.Collector.Logs.V1b\x06proto3') _globals = globals() _builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) _builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'opentelemetry.proto.collector.logs.v1.logs_service_pb2', _globals) if not _descriptor._USE_C_DESCRIPTORS: _globals['DESCRIPTOR']._loaded_options = None _globals['DESCRIPTOR']._serialized_options = b'\n(io.opentelemetry.proto.collector.logs.v1B\020LogsServiceProtoP\001Z0go.opentelemetry.io/proto/otlp/collector/logs/v1\252\002%OpenTelemetry.Proto.Collector.Logs.V1' _globals['_EXPORTLOGSSERVICEREQUEST']._serialized_start=139 _globals['_EXPORTLOGSSERVICEREQUEST']._serialized_end=231 _globals['_EXPORTLOGSSERVICERESPONSE']._serialized_start=233 _globals['_EXPORTLOGSSERVICERESPONSE']._serialized_end=350 _globals['_EXPORTLOGSPARTIALSUCCESS']._serialized_start=352 _globals['_EXPORTLOGSPARTIALSUCCESS']._serialized_end=431 _globals['_LOGSSERVICE']._serialized_start=434 _globals['_LOGSSERVICE']._serialized_end=591 # @@protoc_insertion_point(module_scope) logs_service_pb2.pyi000066400000000000000000000115161511654350100360630ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-proto/src/opentelemetry/proto/collector/logs/v1""" @generated by mypy-protobuf. Do not edit manually! isort:skip_file Copyright 2020, OpenTelemetry Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ import builtins import collections.abc import google.protobuf.descriptor import google.protobuf.internal.containers import google.protobuf.message import opentelemetry.proto.logs.v1.logs_pb2 import sys if sys.version_info >= (3, 8): import typing as typing_extensions else: import typing_extensions DESCRIPTOR: google.protobuf.descriptor.FileDescriptor @typing_extensions.final class ExportLogsServiceRequest(google.protobuf.message.Message): DESCRIPTOR: google.protobuf.descriptor.Descriptor RESOURCE_LOGS_FIELD_NUMBER: builtins.int @property def resource_logs(self) -> google.protobuf.internal.containers.RepeatedCompositeFieldContainer[opentelemetry.proto.logs.v1.logs_pb2.ResourceLogs]: """An array of ResourceLogs. For data coming from a single resource this array will typically contain one element. Intermediary nodes (such as OpenTelemetry Collector) that receive data from multiple origins typically batch the data before forwarding further and in that case this array will contain multiple elements. """ def __init__( self, *, resource_logs: collections.abc.Iterable[opentelemetry.proto.logs.v1.logs_pb2.ResourceLogs] | None = ..., ) -> None: ... def ClearField(self, field_name: typing_extensions.Literal["resource_logs", b"resource_logs"]) -> None: ... global___ExportLogsServiceRequest = ExportLogsServiceRequest @typing_extensions.final class ExportLogsServiceResponse(google.protobuf.message.Message): DESCRIPTOR: google.protobuf.descriptor.Descriptor PARTIAL_SUCCESS_FIELD_NUMBER: builtins.int @property def partial_success(self) -> global___ExportLogsPartialSuccess: """The details of a partially successful export request. If the request is only partially accepted (i.e. when the server accepts only parts of the data and rejects the rest) the server MUST initialize the `partial_success` field and MUST set the `rejected_` with the number of items it rejected. Servers MAY also make use of the `partial_success` field to convey warnings/suggestions to senders even when the request was fully accepted. In such cases, the `rejected_` MUST have a value of `0` and the `error_message` MUST be non-empty. A `partial_success` message with an empty value (rejected_ = 0 and `error_message` = "") is equivalent to it not being set/present. Senders SHOULD interpret it the same way as in the full success case. """ def __init__( self, *, partial_success: global___ExportLogsPartialSuccess | None = ..., ) -> None: ... def HasField(self, field_name: typing_extensions.Literal["partial_success", b"partial_success"]) -> builtins.bool: ... def ClearField(self, field_name: typing_extensions.Literal["partial_success", b"partial_success"]) -> None: ... global___ExportLogsServiceResponse = ExportLogsServiceResponse @typing_extensions.final class ExportLogsPartialSuccess(google.protobuf.message.Message): DESCRIPTOR: google.protobuf.descriptor.Descriptor REJECTED_LOG_RECORDS_FIELD_NUMBER: builtins.int ERROR_MESSAGE_FIELD_NUMBER: builtins.int rejected_log_records: builtins.int """The number of rejected log records. A `rejected_` field holding a `0` value indicates that the request was fully accepted. """ error_message: builtins.str """A developer-facing human-readable message in English. It should be used either to explain why the server rejected parts of the data during a partial success or to convey warnings/suggestions during a full success. The message should offer guidance on how users can address such issues. error_message is an optional field. An error_message with an empty value is equivalent to it not being set. """ def __init__( self, *, rejected_log_records: builtins.int = ..., error_message: builtins.str = ..., ) -> None: ... def ClearField(self, field_name: typing_extensions.Literal["error_message", b"error_message", "rejected_log_records", b"rejected_log_records"]) -> None: ... global___ExportLogsPartialSuccess = ExportLogsPartialSuccess logs_service_pb2_grpc.py000066400000000000000000000112521511654350100367220ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-proto/src/opentelemetry/proto/collector/logs/v1# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! """Client and server classes corresponding to protobuf-defined services.""" import grpc import warnings from opentelemetry.proto.collector.logs.v1 import logs_service_pb2 as opentelemetry_dot_proto_dot_collector_dot_logs_dot_v1_dot_logs__service__pb2 GRPC_GENERATED_VERSION = '1.63.2' GRPC_VERSION = grpc.__version__ EXPECTED_ERROR_RELEASE = '1.65.0' SCHEDULED_RELEASE_DATE = 'June 25, 2024' _version_not_supported = False try: from grpc._utilities import first_version_is_lower _version_not_supported = first_version_is_lower(GRPC_VERSION, GRPC_GENERATED_VERSION) except ImportError: _version_not_supported = True if _version_not_supported: warnings.warn( f'The grpc package installed is at version {GRPC_VERSION},' + f' but the generated code in opentelemetry/proto/collector/logs/v1/logs_service_pb2_grpc.py depends on' + f' grpcio>={GRPC_GENERATED_VERSION}.' + f' Please upgrade your grpc module to grpcio>={GRPC_GENERATED_VERSION}' + f' or downgrade your generated code using grpcio-tools<={GRPC_VERSION}.' + f' This warning will become an error in {EXPECTED_ERROR_RELEASE},' + f' scheduled for release on {SCHEDULED_RELEASE_DATE}.', RuntimeWarning ) class LogsServiceStub(object): """Service that can be used to push logs between one Application instrumented with OpenTelemetry and an collector, or between an collector and a central collector (in this case logs are sent/received to/from multiple Applications). """ def __init__(self, channel): """Constructor. Args: channel: A grpc.Channel. """ self.Export = channel.unary_unary( '/opentelemetry.proto.collector.logs.v1.LogsService/Export', request_serializer=opentelemetry_dot_proto_dot_collector_dot_logs_dot_v1_dot_logs__service__pb2.ExportLogsServiceRequest.SerializeToString, response_deserializer=opentelemetry_dot_proto_dot_collector_dot_logs_dot_v1_dot_logs__service__pb2.ExportLogsServiceResponse.FromString, _registered_method=True) class LogsServiceServicer(object): """Service that can be used to push logs between one Application instrumented with OpenTelemetry and an collector, or between an collector and a central collector (in this case logs are sent/received to/from multiple Applications). """ def Export(self, request, context): """Missing associated documentation comment in .proto file.""" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def add_LogsServiceServicer_to_server(servicer, server): rpc_method_handlers = { 'Export': grpc.unary_unary_rpc_method_handler( servicer.Export, request_deserializer=opentelemetry_dot_proto_dot_collector_dot_logs_dot_v1_dot_logs__service__pb2.ExportLogsServiceRequest.FromString, response_serializer=opentelemetry_dot_proto_dot_collector_dot_logs_dot_v1_dot_logs__service__pb2.ExportLogsServiceResponse.SerializeToString, ), } generic_handler = grpc.method_handlers_generic_handler( 'opentelemetry.proto.collector.logs.v1.LogsService', rpc_method_handlers) server.add_generic_rpc_handlers((generic_handler,)) # This class is part of an EXPERIMENTAL API. class LogsService(object): """Service that can be used to push logs between one Application instrumented with OpenTelemetry and an collector, or between an collector and a central collector (in this case logs are sent/received to/from multiple Applications). """ @staticmethod def Export(request, target, options=(), channel_credentials=None, call_credentials=None, insecure=False, compression=None, wait_for_ready=None, timeout=None, metadata=None): return grpc.experimental.unary_unary( request, target, '/opentelemetry.proto.collector.logs.v1.LogsService/Export', opentelemetry_dot_proto_dot_collector_dot_logs_dot_v1_dot_logs__service__pb2.ExportLogsServiceRequest.SerializeToString, opentelemetry_dot_proto_dot_collector_dot_logs_dot_v1_dot_logs__service__pb2.ExportLogsServiceResponse.FromString, options, channel_credentials, insecure, call_credentials, compression, wait_for_ready, timeout, metadata, _registered_method=True) python-opentelemetry-1.39.1/opentelemetry-proto/src/opentelemetry/proto/collector/metrics/000077500000000000000000000000001511654350100323405ustar00rootroot00000000000000__init__.py000066400000000000000000000000001511654350100343600ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-proto/src/opentelemetry/proto/collector/metricspython-opentelemetry-1.39.1/opentelemetry-proto/src/opentelemetry/proto/collector/metrics/v1/000077500000000000000000000000001511654350100326665ustar00rootroot00000000000000__init__.py000066400000000000000000000000001511654350100347060ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-proto/src/opentelemetry/proto/collector/metrics/v1metrics_service_pb2.py000066400000000000000000000053411511654350100371150ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-proto/src/opentelemetry/proto/collector/metrics/v1# -*- coding: utf-8 -*- # Generated by the protocol buffer compiler. DO NOT EDIT! # source: opentelemetry/proto/collector/metrics/v1/metrics_service.proto # Protobuf Python Version: 5.26.1 """Generated protocol buffer code.""" from google.protobuf import descriptor as _descriptor from google.protobuf import descriptor_pool as _descriptor_pool from google.protobuf import symbol_database as _symbol_database from google.protobuf.internal import builder as _builder # @@protoc_insertion_point(imports) _sym_db = _symbol_database.Default() from opentelemetry.proto.metrics.v1 import metrics_pb2 as opentelemetry_dot_proto_dot_metrics_dot_v1_dot_metrics__pb2 DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n>opentelemetry/proto/collector/metrics/v1/metrics_service.proto\x12(opentelemetry.proto.collector.metrics.v1\x1a,opentelemetry/proto/metrics/v1/metrics.proto\"h\n\x1b\x45xportMetricsServiceRequest\x12I\n\x10resource_metrics\x18\x01 \x03(\x0b\x32/.opentelemetry.proto.metrics.v1.ResourceMetrics\"~\n\x1c\x45xportMetricsServiceResponse\x12^\n\x0fpartial_success\x18\x01 \x01(\x0b\x32\x45.opentelemetry.proto.collector.metrics.v1.ExportMetricsPartialSuccess\"R\n\x1b\x45xportMetricsPartialSuccess\x12\x1c\n\x14rejected_data_points\x18\x01 \x01(\x03\x12\x15\n\rerror_message\x18\x02 \x01(\t2\xac\x01\n\x0eMetricsService\x12\x99\x01\n\x06\x45xport\x12\x45.opentelemetry.proto.collector.metrics.v1.ExportMetricsServiceRequest\x1a\x46.opentelemetry.proto.collector.metrics.v1.ExportMetricsServiceResponse\"\x00\x42\xa4\x01\n+io.opentelemetry.proto.collector.metrics.v1B\x13MetricsServiceProtoP\x01Z3go.opentelemetry.io/proto/otlp/collector/metrics/v1\xaa\x02(OpenTelemetry.Proto.Collector.Metrics.V1b\x06proto3') _globals = globals() _builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) _builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'opentelemetry.proto.collector.metrics.v1.metrics_service_pb2', _globals) if not _descriptor._USE_C_DESCRIPTORS: _globals['DESCRIPTOR']._loaded_options = None _globals['DESCRIPTOR']._serialized_options = b'\n+io.opentelemetry.proto.collector.metrics.v1B\023MetricsServiceProtoP\001Z3go.opentelemetry.io/proto/otlp/collector/metrics/v1\252\002(OpenTelemetry.Proto.Collector.Metrics.V1' _globals['_EXPORTMETRICSSERVICEREQUEST']._serialized_start=154 _globals['_EXPORTMETRICSSERVICEREQUEST']._serialized_end=258 _globals['_EXPORTMETRICSSERVICERESPONSE']._serialized_start=260 _globals['_EXPORTMETRICSSERVICERESPONSE']._serialized_end=386 _globals['_EXPORTMETRICSPARTIALSUCCESS']._serialized_start=388 _globals['_EXPORTMETRICSPARTIALSUCCESS']._serialized_end=470 _globals['_METRICSSERVICE']._serialized_start=473 _globals['_METRICSSERVICE']._serialized_end=645 # @@protoc_insertion_point(module_scope) metrics_service_pb2.pyi000066400000000000000000000116311511654350100372650ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-proto/src/opentelemetry/proto/collector/metrics/v1""" @generated by mypy-protobuf. Do not edit manually! isort:skip_file Copyright 2019, OpenTelemetry Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ import builtins import collections.abc import google.protobuf.descriptor import google.protobuf.internal.containers import google.protobuf.message import opentelemetry.proto.metrics.v1.metrics_pb2 import sys if sys.version_info >= (3, 8): import typing as typing_extensions else: import typing_extensions DESCRIPTOR: google.protobuf.descriptor.FileDescriptor @typing_extensions.final class ExportMetricsServiceRequest(google.protobuf.message.Message): DESCRIPTOR: google.protobuf.descriptor.Descriptor RESOURCE_METRICS_FIELD_NUMBER: builtins.int @property def resource_metrics(self) -> google.protobuf.internal.containers.RepeatedCompositeFieldContainer[opentelemetry.proto.metrics.v1.metrics_pb2.ResourceMetrics]: """An array of ResourceMetrics. For data coming from a single resource this array will typically contain one element. Intermediary nodes (such as OpenTelemetry Collector) that receive data from multiple origins typically batch the data before forwarding further and in that case this array will contain multiple elements. """ def __init__( self, *, resource_metrics: collections.abc.Iterable[opentelemetry.proto.metrics.v1.metrics_pb2.ResourceMetrics] | None = ..., ) -> None: ... def ClearField(self, field_name: typing_extensions.Literal["resource_metrics", b"resource_metrics"]) -> None: ... global___ExportMetricsServiceRequest = ExportMetricsServiceRequest @typing_extensions.final class ExportMetricsServiceResponse(google.protobuf.message.Message): DESCRIPTOR: google.protobuf.descriptor.Descriptor PARTIAL_SUCCESS_FIELD_NUMBER: builtins.int @property def partial_success(self) -> global___ExportMetricsPartialSuccess: """The details of a partially successful export request. If the request is only partially accepted (i.e. when the server accepts only parts of the data and rejects the rest) the server MUST initialize the `partial_success` field and MUST set the `rejected_` with the number of items it rejected. Servers MAY also make use of the `partial_success` field to convey warnings/suggestions to senders even when the request was fully accepted. In such cases, the `rejected_` MUST have a value of `0` and the `error_message` MUST be non-empty. A `partial_success` message with an empty value (rejected_ = 0 and `error_message` = "") is equivalent to it not being set/present. Senders SHOULD interpret it the same way as in the full success case. """ def __init__( self, *, partial_success: global___ExportMetricsPartialSuccess | None = ..., ) -> None: ... def HasField(self, field_name: typing_extensions.Literal["partial_success", b"partial_success"]) -> builtins.bool: ... def ClearField(self, field_name: typing_extensions.Literal["partial_success", b"partial_success"]) -> None: ... global___ExportMetricsServiceResponse = ExportMetricsServiceResponse @typing_extensions.final class ExportMetricsPartialSuccess(google.protobuf.message.Message): DESCRIPTOR: google.protobuf.descriptor.Descriptor REJECTED_DATA_POINTS_FIELD_NUMBER: builtins.int ERROR_MESSAGE_FIELD_NUMBER: builtins.int rejected_data_points: builtins.int """The number of rejected data points. A `rejected_` field holding a `0` value indicates that the request was fully accepted. """ error_message: builtins.str """A developer-facing human-readable message in English. It should be used either to explain why the server rejected parts of the data during a partial success or to convey warnings/suggestions during a full success. The message should offer guidance on how users can address such issues. error_message is an optional field. An error_message with an empty value is equivalent to it not being set. """ def __init__( self, *, rejected_data_points: builtins.int = ..., error_message: builtins.str = ..., ) -> None: ... def ClearField(self, field_name: typing_extensions.Literal["error_message", b"error_message", "rejected_data_points", b"rejected_data_points"]) -> None: ... global___ExportMetricsPartialSuccess = ExportMetricsPartialSuccess metrics_service_pb2_grpc.py000066400000000000000000000111071511654350100401250ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-proto/src/opentelemetry/proto/collector/metrics/v1# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! """Client and server classes corresponding to protobuf-defined services.""" import grpc import warnings from opentelemetry.proto.collector.metrics.v1 import metrics_service_pb2 as opentelemetry_dot_proto_dot_collector_dot_metrics_dot_v1_dot_metrics__service__pb2 GRPC_GENERATED_VERSION = '1.63.2' GRPC_VERSION = grpc.__version__ EXPECTED_ERROR_RELEASE = '1.65.0' SCHEDULED_RELEASE_DATE = 'June 25, 2024' _version_not_supported = False try: from grpc._utilities import first_version_is_lower _version_not_supported = first_version_is_lower(GRPC_VERSION, GRPC_GENERATED_VERSION) except ImportError: _version_not_supported = True if _version_not_supported: warnings.warn( f'The grpc package installed is at version {GRPC_VERSION},' + f' but the generated code in opentelemetry/proto/collector/metrics/v1/metrics_service_pb2_grpc.py depends on' + f' grpcio>={GRPC_GENERATED_VERSION}.' + f' Please upgrade your grpc module to grpcio>={GRPC_GENERATED_VERSION}' + f' or downgrade your generated code using grpcio-tools<={GRPC_VERSION}.' + f' This warning will become an error in {EXPECTED_ERROR_RELEASE},' + f' scheduled for release on {SCHEDULED_RELEASE_DATE}.', RuntimeWarning ) class MetricsServiceStub(object): """Service that can be used to push metrics between one Application instrumented with OpenTelemetry and a collector, or between a collector and a central collector. """ def __init__(self, channel): """Constructor. Args: channel: A grpc.Channel. """ self.Export = channel.unary_unary( '/opentelemetry.proto.collector.metrics.v1.MetricsService/Export', request_serializer=opentelemetry_dot_proto_dot_collector_dot_metrics_dot_v1_dot_metrics__service__pb2.ExportMetricsServiceRequest.SerializeToString, response_deserializer=opentelemetry_dot_proto_dot_collector_dot_metrics_dot_v1_dot_metrics__service__pb2.ExportMetricsServiceResponse.FromString, _registered_method=True) class MetricsServiceServicer(object): """Service that can be used to push metrics between one Application instrumented with OpenTelemetry and a collector, or between a collector and a central collector. """ def Export(self, request, context): """Missing associated documentation comment in .proto file.""" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def add_MetricsServiceServicer_to_server(servicer, server): rpc_method_handlers = { 'Export': grpc.unary_unary_rpc_method_handler( servicer.Export, request_deserializer=opentelemetry_dot_proto_dot_collector_dot_metrics_dot_v1_dot_metrics__service__pb2.ExportMetricsServiceRequest.FromString, response_serializer=opentelemetry_dot_proto_dot_collector_dot_metrics_dot_v1_dot_metrics__service__pb2.ExportMetricsServiceResponse.SerializeToString, ), } generic_handler = grpc.method_handlers_generic_handler( 'opentelemetry.proto.collector.metrics.v1.MetricsService', rpc_method_handlers) server.add_generic_rpc_handlers((generic_handler,)) # This class is part of an EXPERIMENTAL API. class MetricsService(object): """Service that can be used to push metrics between one Application instrumented with OpenTelemetry and a collector, or between a collector and a central collector. """ @staticmethod def Export(request, target, options=(), channel_credentials=None, call_credentials=None, insecure=False, compression=None, wait_for_ready=None, timeout=None, metadata=None): return grpc.experimental.unary_unary( request, target, '/opentelemetry.proto.collector.metrics.v1.MetricsService/Export', opentelemetry_dot_proto_dot_collector_dot_metrics_dot_v1_dot_metrics__service__pb2.ExportMetricsServiceRequest.SerializeToString, opentelemetry_dot_proto_dot_collector_dot_metrics_dot_v1_dot_metrics__service__pb2.ExportMetricsServiceResponse.FromString, options, channel_credentials, insecure, call_credentials, compression, wait_for_ready, timeout, metadata, _registered_method=True) python-opentelemetry-1.39.1/opentelemetry-proto/src/opentelemetry/proto/collector/profiles/000077500000000000000000000000001511654350100325155ustar00rootroot00000000000000v1development/000077500000000000000000000000001511654350100352275ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-proto/src/opentelemetry/proto/collector/profilesprofiles_service_pb2.py000066400000000000000000000060711511654350100417130ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-proto/src/opentelemetry/proto/collector/profiles/v1development# -*- coding: utf-8 -*- # Generated by the protocol buffer compiler. DO NOT EDIT! # source: opentelemetry/proto/collector/profiles/v1development/profiles_service.proto # Protobuf Python Version: 5.26.1 """Generated protocol buffer code.""" from google.protobuf import descriptor as _descriptor from google.protobuf import descriptor_pool as _descriptor_pool from google.protobuf import symbol_database as _symbol_database from google.protobuf.internal import builder as _builder # @@protoc_insertion_point(imports) _sym_db = _symbol_database.Default() from opentelemetry.proto.profiles.v1development import profiles_pb2 as opentelemetry_dot_proto_dot_profiles_dot_v1development_dot_profiles__pb2 DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\nKopentelemetry/proto/collector/profiles/v1development/profiles_service.proto\x12\x34opentelemetry.proto.collector.profiles.v1development\x1a\x39opentelemetry/proto/profiles/v1development/profiles.proto\"\xcb\x01\n\x1c\x45xportProfilesServiceRequest\x12W\n\x11resource_profiles\x18\x01 \x03(\x0b\x32<.opentelemetry.proto.profiles.v1development.ResourceProfiles\x12R\n\ndictionary\x18\x02 \x01(\x0b\x32>.opentelemetry.proto.profiles.v1development.ProfilesDictionary\"\x8c\x01\n\x1d\x45xportProfilesServiceResponse\x12k\n\x0fpartial_success\x18\x01 \x01(\x0b\x32R.opentelemetry.proto.collector.profiles.v1development.ExportProfilesPartialSuccess\"P\n\x1c\x45xportProfilesPartialSuccess\x12\x19\n\x11rejected_profiles\x18\x01 \x01(\x03\x12\x15\n\rerror_message\x18\x02 \x01(\t2\xc7\x01\n\x0fProfilesService\x12\xb3\x01\n\x06\x45xport\x12R.opentelemetry.proto.collector.profiles.v1development.ExportProfilesServiceRequest\x1aS.opentelemetry.proto.collector.profiles.v1development.ExportProfilesServiceResponse\"\x00\x42\xc9\x01\n7io.opentelemetry.proto.collector.profiles.v1developmentB\x14ProfilesServiceProtoP\x01Z?go.opentelemetry.io/proto/otlp/collector/profiles/v1development\xaa\x02\x34OpenTelemetry.Proto.Collector.Profiles.V1Developmentb\x06proto3') _globals = globals() _builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) _builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'opentelemetry.proto.collector.profiles.v1development.profiles_service_pb2', _globals) if not _descriptor._USE_C_DESCRIPTORS: _globals['DESCRIPTOR']._loaded_options = None _globals['DESCRIPTOR']._serialized_options = b'\n7io.opentelemetry.proto.collector.profiles.v1developmentB\024ProfilesServiceProtoP\001Z?go.opentelemetry.io/proto/otlp/collector/profiles/v1development\252\0024OpenTelemetry.Proto.Collector.Profiles.V1Development' _globals['_EXPORTPROFILESSERVICEREQUEST']._serialized_start=193 _globals['_EXPORTPROFILESSERVICEREQUEST']._serialized_end=396 _globals['_EXPORTPROFILESSERVICERESPONSE']._serialized_start=399 _globals['_EXPORTPROFILESSERVICERESPONSE']._serialized_end=539 _globals['_EXPORTPROFILESPARTIALSUCCESS']._serialized_start=541 _globals['_EXPORTPROFILESPARTIALSUCCESS']._serialized_end=621 _globals['_PROFILESSERVICE']._serialized_start=624 _globals['_PROFILESSERVICE']._serialized_end=823 # @@protoc_insertion_point(module_scope) profiles_service_pb2.pyi000066400000000000000000000127041511654350100420640ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-proto/src/opentelemetry/proto/collector/profiles/v1development""" @generated by mypy-protobuf. Do not edit manually! isort:skip_file Copyright 2023, OpenTelemetry Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ import builtins import collections.abc import google.protobuf.descriptor import google.protobuf.internal.containers import google.protobuf.message import opentelemetry.proto.profiles.v1development.profiles_pb2 import sys if sys.version_info >= (3, 8): import typing as typing_extensions else: import typing_extensions DESCRIPTOR: google.protobuf.descriptor.FileDescriptor @typing_extensions.final class ExportProfilesServiceRequest(google.protobuf.message.Message): DESCRIPTOR: google.protobuf.descriptor.Descriptor RESOURCE_PROFILES_FIELD_NUMBER: builtins.int DICTIONARY_FIELD_NUMBER: builtins.int @property def resource_profiles(self) -> google.protobuf.internal.containers.RepeatedCompositeFieldContainer[opentelemetry.proto.profiles.v1development.profiles_pb2.ResourceProfiles]: """An array of ResourceProfiles. For data coming from a single resource this array will typically contain one element. Intermediary nodes (such as OpenTelemetry Collector) that receive data from multiple origins typically batch the data before forwarding further and in that case this array will contain multiple elements. """ @property def dictionary(self) -> opentelemetry.proto.profiles.v1development.profiles_pb2.ProfilesDictionary: """The reference table containing all data shared by profiles across the message being sent.""" def __init__( self, *, resource_profiles: collections.abc.Iterable[opentelemetry.proto.profiles.v1development.profiles_pb2.ResourceProfiles] | None = ..., dictionary: opentelemetry.proto.profiles.v1development.profiles_pb2.ProfilesDictionary | None = ..., ) -> None: ... def HasField(self, field_name: typing_extensions.Literal["dictionary", b"dictionary"]) -> builtins.bool: ... def ClearField(self, field_name: typing_extensions.Literal["dictionary", b"dictionary", "resource_profiles", b"resource_profiles"]) -> None: ... global___ExportProfilesServiceRequest = ExportProfilesServiceRequest @typing_extensions.final class ExportProfilesServiceResponse(google.protobuf.message.Message): DESCRIPTOR: google.protobuf.descriptor.Descriptor PARTIAL_SUCCESS_FIELD_NUMBER: builtins.int @property def partial_success(self) -> global___ExportProfilesPartialSuccess: """The details of a partially successful export request. If the request is only partially accepted (i.e. when the server accepts only parts of the data and rejects the rest) the server MUST initialize the `partial_success` field and MUST set the `rejected_` with the number of items it rejected. Servers MAY also make use of the `partial_success` field to convey warnings/suggestions to senders even when the request was fully accepted. In such cases, the `rejected_` MUST have a value of `0` and the `error_message` MUST be non-empty. A `partial_success` message with an empty value (rejected_ = 0 and `error_message` = "") is equivalent to it not being set/present. Senders SHOULD interpret it the same way as in the full success case. """ def __init__( self, *, partial_success: global___ExportProfilesPartialSuccess | None = ..., ) -> None: ... def HasField(self, field_name: typing_extensions.Literal["partial_success", b"partial_success"]) -> builtins.bool: ... def ClearField(self, field_name: typing_extensions.Literal["partial_success", b"partial_success"]) -> None: ... global___ExportProfilesServiceResponse = ExportProfilesServiceResponse @typing_extensions.final class ExportProfilesPartialSuccess(google.protobuf.message.Message): DESCRIPTOR: google.protobuf.descriptor.Descriptor REJECTED_PROFILES_FIELD_NUMBER: builtins.int ERROR_MESSAGE_FIELD_NUMBER: builtins.int rejected_profiles: builtins.int """The number of rejected profiles. A `rejected_` field holding a `0` value indicates that the request was fully accepted. """ error_message: builtins.str """A developer-facing human-readable message in English. It should be used either to explain why the server rejected parts of the data during a partial success or to convey warnings/suggestions during a full success. The message should offer guidance on how users can address such issues. error_message is an optional field. An error_message with an empty value is equivalent to it not being set. """ def __init__( self, *, rejected_profiles: builtins.int = ..., error_message: builtins.str = ..., ) -> None: ... def ClearField(self, field_name: typing_extensions.Literal["error_message", b"error_message", "rejected_profiles", b"rejected_profiles"]) -> None: ... global___ExportProfilesPartialSuccess = ExportProfilesPartialSuccess profiles_service_pb2_grpc.py000066400000000000000000000113441511654350100427250ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-proto/src/opentelemetry/proto/collector/profiles/v1development# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! """Client and server classes corresponding to protobuf-defined services.""" import grpc import warnings from opentelemetry.proto.collector.profiles.v1development import profiles_service_pb2 as opentelemetry_dot_proto_dot_collector_dot_profiles_dot_v1development_dot_profiles__service__pb2 GRPC_GENERATED_VERSION = '1.63.2' GRPC_VERSION = grpc.__version__ EXPECTED_ERROR_RELEASE = '1.65.0' SCHEDULED_RELEASE_DATE = 'June 25, 2024' _version_not_supported = False try: from grpc._utilities import first_version_is_lower _version_not_supported = first_version_is_lower(GRPC_VERSION, GRPC_GENERATED_VERSION) except ImportError: _version_not_supported = True if _version_not_supported: warnings.warn( f'The grpc package installed is at version {GRPC_VERSION},' + f' but the generated code in opentelemetry/proto/collector/profiles/v1development/profiles_service_pb2_grpc.py depends on' + f' grpcio>={GRPC_GENERATED_VERSION}.' + f' Please upgrade your grpc module to grpcio>={GRPC_GENERATED_VERSION}' + f' or downgrade your generated code using grpcio-tools<={GRPC_VERSION}.' + f' This warning will become an error in {EXPECTED_ERROR_RELEASE},' + f' scheduled for release on {SCHEDULED_RELEASE_DATE}.', RuntimeWarning ) class ProfilesServiceStub(object): """Service that can be used to push profiles between one Application instrumented with OpenTelemetry and a collector, or between a collector and a central collector. """ def __init__(self, channel): """Constructor. Args: channel: A grpc.Channel. """ self.Export = channel.unary_unary( '/opentelemetry.proto.collector.profiles.v1development.ProfilesService/Export', request_serializer=opentelemetry_dot_proto_dot_collector_dot_profiles_dot_v1development_dot_profiles__service__pb2.ExportProfilesServiceRequest.SerializeToString, response_deserializer=opentelemetry_dot_proto_dot_collector_dot_profiles_dot_v1development_dot_profiles__service__pb2.ExportProfilesServiceResponse.FromString, _registered_method=True) class ProfilesServiceServicer(object): """Service that can be used to push profiles between one Application instrumented with OpenTelemetry and a collector, or between a collector and a central collector. """ def Export(self, request, context): """Missing associated documentation comment in .proto file.""" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def add_ProfilesServiceServicer_to_server(servicer, server): rpc_method_handlers = { 'Export': grpc.unary_unary_rpc_method_handler( servicer.Export, request_deserializer=opentelemetry_dot_proto_dot_collector_dot_profiles_dot_v1development_dot_profiles__service__pb2.ExportProfilesServiceRequest.FromString, response_serializer=opentelemetry_dot_proto_dot_collector_dot_profiles_dot_v1development_dot_profiles__service__pb2.ExportProfilesServiceResponse.SerializeToString, ), } generic_handler = grpc.method_handlers_generic_handler( 'opentelemetry.proto.collector.profiles.v1development.ProfilesService', rpc_method_handlers) server.add_generic_rpc_handlers((generic_handler,)) # This class is part of an EXPERIMENTAL API. class ProfilesService(object): """Service that can be used to push profiles between one Application instrumented with OpenTelemetry and a collector, or between a collector and a central collector. """ @staticmethod def Export(request, target, options=(), channel_credentials=None, call_credentials=None, insecure=False, compression=None, wait_for_ready=None, timeout=None, metadata=None): return grpc.experimental.unary_unary( request, target, '/opentelemetry.proto.collector.profiles.v1development.ProfilesService/Export', opentelemetry_dot_proto_dot_collector_dot_profiles_dot_v1development_dot_profiles__service__pb2.ExportProfilesServiceRequest.SerializeToString, opentelemetry_dot_proto_dot_collector_dot_profiles_dot_v1development_dot_profiles__service__pb2.ExportProfilesServiceResponse.FromString, options, channel_credentials, insecure, call_credentials, compression, wait_for_ready, timeout, metadata, _registered_method=True) python-opentelemetry-1.39.1/opentelemetry-proto/src/opentelemetry/proto/collector/trace/000077500000000000000000000000001511654350100317705ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-proto/src/opentelemetry/proto/collector/trace/__init__.py000066400000000000000000000000001511654350100340670ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-proto/src/opentelemetry/proto/collector/trace/v1/000077500000000000000000000000001511654350100323165ustar00rootroot00000000000000__init__.py000066400000000000000000000000001511654350100343360ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-proto/src/opentelemetry/proto/collector/trace/v1trace_service_pb2.py000066400000000000000000000052121511654350100361720ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-proto/src/opentelemetry/proto/collector/trace/v1# -*- coding: utf-8 -*- # Generated by the protocol buffer compiler. DO NOT EDIT! # source: opentelemetry/proto/collector/trace/v1/trace_service.proto # Protobuf Python Version: 5.26.1 """Generated protocol buffer code.""" from google.protobuf import descriptor as _descriptor from google.protobuf import descriptor_pool as _descriptor_pool from google.protobuf import symbol_database as _symbol_database from google.protobuf.internal import builder as _builder # @@protoc_insertion_point(imports) _sym_db = _symbol_database.Default() from opentelemetry.proto.trace.v1 import trace_pb2 as opentelemetry_dot_proto_dot_trace_dot_v1_dot_trace__pb2 DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n:opentelemetry/proto/collector/trace/v1/trace_service.proto\x12&opentelemetry.proto.collector.trace.v1\x1a(opentelemetry/proto/trace/v1/trace.proto\"`\n\x19\x45xportTraceServiceRequest\x12\x43\n\x0eresource_spans\x18\x01 \x03(\x0b\x32+.opentelemetry.proto.trace.v1.ResourceSpans\"x\n\x1a\x45xportTraceServiceResponse\x12Z\n\x0fpartial_success\x18\x01 \x01(\x0b\x32\x41.opentelemetry.proto.collector.trace.v1.ExportTracePartialSuccess\"J\n\x19\x45xportTracePartialSuccess\x12\x16\n\x0erejected_spans\x18\x01 \x01(\x03\x12\x15\n\rerror_message\x18\x02 \x01(\t2\xa2\x01\n\x0cTraceService\x12\x91\x01\n\x06\x45xport\x12\x41.opentelemetry.proto.collector.trace.v1.ExportTraceServiceRequest\x1a\x42.opentelemetry.proto.collector.trace.v1.ExportTraceServiceResponse\"\x00\x42\x9c\x01\n)io.opentelemetry.proto.collector.trace.v1B\x11TraceServiceProtoP\x01Z1go.opentelemetry.io/proto/otlp/collector/trace/v1\xaa\x02&OpenTelemetry.Proto.Collector.Trace.V1b\x06proto3') _globals = globals() _builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) _builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'opentelemetry.proto.collector.trace.v1.trace_service_pb2', _globals) if not _descriptor._USE_C_DESCRIPTORS: _globals['DESCRIPTOR']._loaded_options = None _globals['DESCRIPTOR']._serialized_options = b'\n)io.opentelemetry.proto.collector.trace.v1B\021TraceServiceProtoP\001Z1go.opentelemetry.io/proto/otlp/collector/trace/v1\252\002&OpenTelemetry.Proto.Collector.Trace.V1' _globals['_EXPORTTRACESERVICEREQUEST']._serialized_start=144 _globals['_EXPORTTRACESERVICEREQUEST']._serialized_end=240 _globals['_EXPORTTRACESERVICERESPONSE']._serialized_start=242 _globals['_EXPORTTRACESERVICERESPONSE']._serialized_end=362 _globals['_EXPORTTRACEPARTIALSUCCESS']._serialized_start=364 _globals['_EXPORTTRACEPARTIALSUCCESS']._serialized_end=438 _globals['_TRACESERVICE']._serialized_start=441 _globals['_TRACESERVICE']._serialized_end=603 # @@protoc_insertion_point(module_scope) trace_service_pb2.pyi000066400000000000000000000115031511654350100363430ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-proto/src/opentelemetry/proto/collector/trace/v1""" @generated by mypy-protobuf. Do not edit manually! isort:skip_file Copyright 2019, OpenTelemetry Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ import builtins import collections.abc import google.protobuf.descriptor import google.protobuf.internal.containers import google.protobuf.message import opentelemetry.proto.trace.v1.trace_pb2 import sys if sys.version_info >= (3, 8): import typing as typing_extensions else: import typing_extensions DESCRIPTOR: google.protobuf.descriptor.FileDescriptor @typing_extensions.final class ExportTraceServiceRequest(google.protobuf.message.Message): DESCRIPTOR: google.protobuf.descriptor.Descriptor RESOURCE_SPANS_FIELD_NUMBER: builtins.int @property def resource_spans(self) -> google.protobuf.internal.containers.RepeatedCompositeFieldContainer[opentelemetry.proto.trace.v1.trace_pb2.ResourceSpans]: """An array of ResourceSpans. For data coming from a single resource this array will typically contain one element. Intermediary nodes (such as OpenTelemetry Collector) that receive data from multiple origins typically batch the data before forwarding further and in that case this array will contain multiple elements. """ def __init__( self, *, resource_spans: collections.abc.Iterable[opentelemetry.proto.trace.v1.trace_pb2.ResourceSpans] | None = ..., ) -> None: ... def ClearField(self, field_name: typing_extensions.Literal["resource_spans", b"resource_spans"]) -> None: ... global___ExportTraceServiceRequest = ExportTraceServiceRequest @typing_extensions.final class ExportTraceServiceResponse(google.protobuf.message.Message): DESCRIPTOR: google.protobuf.descriptor.Descriptor PARTIAL_SUCCESS_FIELD_NUMBER: builtins.int @property def partial_success(self) -> global___ExportTracePartialSuccess: """The details of a partially successful export request. If the request is only partially accepted (i.e. when the server accepts only parts of the data and rejects the rest) the server MUST initialize the `partial_success` field and MUST set the `rejected_` with the number of items it rejected. Servers MAY also make use of the `partial_success` field to convey warnings/suggestions to senders even when the request was fully accepted. In such cases, the `rejected_` MUST have a value of `0` and the `error_message` MUST be non-empty. A `partial_success` message with an empty value (rejected_ = 0 and `error_message` = "") is equivalent to it not being set/present. Senders SHOULD interpret it the same way as in the full success case. """ def __init__( self, *, partial_success: global___ExportTracePartialSuccess | None = ..., ) -> None: ... def HasField(self, field_name: typing_extensions.Literal["partial_success", b"partial_success"]) -> builtins.bool: ... def ClearField(self, field_name: typing_extensions.Literal["partial_success", b"partial_success"]) -> None: ... global___ExportTraceServiceResponse = ExportTraceServiceResponse @typing_extensions.final class ExportTracePartialSuccess(google.protobuf.message.Message): DESCRIPTOR: google.protobuf.descriptor.Descriptor REJECTED_SPANS_FIELD_NUMBER: builtins.int ERROR_MESSAGE_FIELD_NUMBER: builtins.int rejected_spans: builtins.int """The number of rejected spans. A `rejected_` field holding a `0` value indicates that the request was fully accepted. """ error_message: builtins.str """A developer-facing human-readable message in English. It should be used either to explain why the server rejected parts of the data during a partial success or to convey warnings/suggestions during a full success. The message should offer guidance on how users can address such issues. error_message is an optional field. An error_message with an empty value is equivalent to it not being set. """ def __init__( self, *, rejected_spans: builtins.int = ..., error_message: builtins.str = ..., ) -> None: ... def ClearField(self, field_name: typing_extensions.Literal["error_message", b"error_message", "rejected_spans", b"rejected_spans"]) -> None: ... global___ExportTracePartialSuccess = ExportTracePartialSuccess trace_service_pb2_grpc.py000066400000000000000000000113141511654350100372050ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-proto/src/opentelemetry/proto/collector/trace/v1# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! """Client and server classes corresponding to protobuf-defined services.""" import grpc import warnings from opentelemetry.proto.collector.trace.v1 import trace_service_pb2 as opentelemetry_dot_proto_dot_collector_dot_trace_dot_v1_dot_trace__service__pb2 GRPC_GENERATED_VERSION = '1.63.2' GRPC_VERSION = grpc.__version__ EXPECTED_ERROR_RELEASE = '1.65.0' SCHEDULED_RELEASE_DATE = 'June 25, 2024' _version_not_supported = False try: from grpc._utilities import first_version_is_lower _version_not_supported = first_version_is_lower(GRPC_VERSION, GRPC_GENERATED_VERSION) except ImportError: _version_not_supported = True if _version_not_supported: warnings.warn( f'The grpc package installed is at version {GRPC_VERSION},' + f' but the generated code in opentelemetry/proto/collector/trace/v1/trace_service_pb2_grpc.py depends on' + f' grpcio>={GRPC_GENERATED_VERSION}.' + f' Please upgrade your grpc module to grpcio>={GRPC_GENERATED_VERSION}' + f' or downgrade your generated code using grpcio-tools<={GRPC_VERSION}.' + f' This warning will become an error in {EXPECTED_ERROR_RELEASE},' + f' scheduled for release on {SCHEDULED_RELEASE_DATE}.', RuntimeWarning ) class TraceServiceStub(object): """Service that can be used to push spans between one Application instrumented with OpenTelemetry and a collector, or between a collector and a central collector (in this case spans are sent/received to/from multiple Applications). """ def __init__(self, channel): """Constructor. Args: channel: A grpc.Channel. """ self.Export = channel.unary_unary( '/opentelemetry.proto.collector.trace.v1.TraceService/Export', request_serializer=opentelemetry_dot_proto_dot_collector_dot_trace_dot_v1_dot_trace__service__pb2.ExportTraceServiceRequest.SerializeToString, response_deserializer=opentelemetry_dot_proto_dot_collector_dot_trace_dot_v1_dot_trace__service__pb2.ExportTraceServiceResponse.FromString, _registered_method=True) class TraceServiceServicer(object): """Service that can be used to push spans between one Application instrumented with OpenTelemetry and a collector, or between a collector and a central collector (in this case spans are sent/received to/from multiple Applications). """ def Export(self, request, context): """Missing associated documentation comment in .proto file.""" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def add_TraceServiceServicer_to_server(servicer, server): rpc_method_handlers = { 'Export': grpc.unary_unary_rpc_method_handler( servicer.Export, request_deserializer=opentelemetry_dot_proto_dot_collector_dot_trace_dot_v1_dot_trace__service__pb2.ExportTraceServiceRequest.FromString, response_serializer=opentelemetry_dot_proto_dot_collector_dot_trace_dot_v1_dot_trace__service__pb2.ExportTraceServiceResponse.SerializeToString, ), } generic_handler = grpc.method_handlers_generic_handler( 'opentelemetry.proto.collector.trace.v1.TraceService', rpc_method_handlers) server.add_generic_rpc_handlers((generic_handler,)) # This class is part of an EXPERIMENTAL API. class TraceService(object): """Service that can be used to push spans between one Application instrumented with OpenTelemetry and a collector, or between a collector and a central collector (in this case spans are sent/received to/from multiple Applications). """ @staticmethod def Export(request, target, options=(), channel_credentials=None, call_credentials=None, insecure=False, compression=None, wait_for_ready=None, timeout=None, metadata=None): return grpc.experimental.unary_unary( request, target, '/opentelemetry.proto.collector.trace.v1.TraceService/Export', opentelemetry_dot_proto_dot_collector_dot_trace_dot_v1_dot_trace__service__pb2.ExportTraceServiceRequest.SerializeToString, opentelemetry_dot_proto_dot_collector_dot_trace_dot_v1_dot_trace__service__pb2.ExportTraceServiceResponse.FromString, options, channel_credentials, insecure, call_credentials, compression, wait_for_ready, timeout, metadata, _registered_method=True) python-opentelemetry-1.39.1/opentelemetry-proto/src/opentelemetry/proto/common/000077500000000000000000000000001511654350100301745ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-proto/src/opentelemetry/proto/common/__init__.py000066400000000000000000000000001511654350100322730ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-proto/src/opentelemetry/proto/common/v1/000077500000000000000000000000001511654350100305225ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-proto/src/opentelemetry/proto/common/v1/__init__.py000066400000000000000000000000001511654350100326210ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-proto/src/opentelemetry/proto/common/v1/common_pb2.py000066400000000000000000000061061511654350100331320ustar00rootroot00000000000000# -*- coding: utf-8 -*- # Generated by the protocol buffer compiler. DO NOT EDIT! # source: opentelemetry/proto/common/v1/common.proto # Protobuf Python Version: 5.26.1 """Generated protocol buffer code.""" from google.protobuf import descriptor as _descriptor from google.protobuf import descriptor_pool as _descriptor_pool from google.protobuf import symbol_database as _symbol_database from google.protobuf.internal import builder as _builder # @@protoc_insertion_point(imports) _sym_db = _symbol_database.Default() DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n*opentelemetry/proto/common/v1/common.proto\x12\x1dopentelemetry.proto.common.v1\"\x8c\x02\n\x08\x41nyValue\x12\x16\n\x0cstring_value\x18\x01 \x01(\tH\x00\x12\x14\n\nbool_value\x18\x02 \x01(\x08H\x00\x12\x13\n\tint_value\x18\x03 \x01(\x03H\x00\x12\x16\n\x0c\x64ouble_value\x18\x04 \x01(\x01H\x00\x12@\n\x0b\x61rray_value\x18\x05 \x01(\x0b\x32).opentelemetry.proto.common.v1.ArrayValueH\x00\x12\x43\n\x0ckvlist_value\x18\x06 \x01(\x0b\x32+.opentelemetry.proto.common.v1.KeyValueListH\x00\x12\x15\n\x0b\x62ytes_value\x18\x07 \x01(\x0cH\x00\x42\x07\n\x05value\"E\n\nArrayValue\x12\x37\n\x06values\x18\x01 \x03(\x0b\x32\'.opentelemetry.proto.common.v1.AnyValue\"G\n\x0cKeyValueList\x12\x37\n\x06values\x18\x01 \x03(\x0b\x32\'.opentelemetry.proto.common.v1.KeyValue\"O\n\x08KeyValue\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x36\n\x05value\x18\x02 \x01(\x0b\x32\'.opentelemetry.proto.common.v1.AnyValue\"\x94\x01\n\x14InstrumentationScope\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0f\n\x07version\x18\x02 \x01(\t\x12;\n\nattributes\x18\x03 \x03(\x0b\x32\'.opentelemetry.proto.common.v1.KeyValue\x12 \n\x18\x64ropped_attributes_count\x18\x04 \x01(\r\"X\n\tEntityRef\x12\x12\n\nschema_url\x18\x01 \x01(\t\x12\x0c\n\x04type\x18\x02 \x01(\t\x12\x0f\n\x07id_keys\x18\x03 \x03(\t\x12\x18\n\x10\x64\x65scription_keys\x18\x04 \x03(\tB{\n io.opentelemetry.proto.common.v1B\x0b\x43ommonProtoP\x01Z(go.opentelemetry.io/proto/otlp/common/v1\xaa\x02\x1dOpenTelemetry.Proto.Common.V1b\x06proto3') _globals = globals() _builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) _builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'opentelemetry.proto.common.v1.common_pb2', _globals) if not _descriptor._USE_C_DESCRIPTORS: _globals['DESCRIPTOR']._loaded_options = None _globals['DESCRIPTOR']._serialized_options = b'\n io.opentelemetry.proto.common.v1B\013CommonProtoP\001Z(go.opentelemetry.io/proto/otlp/common/v1\252\002\035OpenTelemetry.Proto.Common.V1' _globals['_ANYVALUE']._serialized_start=78 _globals['_ANYVALUE']._serialized_end=346 _globals['_ARRAYVALUE']._serialized_start=348 _globals['_ARRAYVALUE']._serialized_end=417 _globals['_KEYVALUELIST']._serialized_start=419 _globals['_KEYVALUELIST']._serialized_end=490 _globals['_KEYVALUE']._serialized_start=492 _globals['_KEYVALUE']._serialized_end=571 _globals['_INSTRUMENTATIONSCOPE']._serialized_start=574 _globals['_INSTRUMENTATIONSCOPE']._serialized_end=722 _globals['_ENTITYREF']._serialized_start=724 _globals['_ENTITYREF']._serialized_end=812 # @@protoc_insertion_point(module_scope) python-opentelemetry-1.39.1/opentelemetry-proto/src/opentelemetry/proto/common/v1/common_pb2.pyi000066400000000000000000000234631511654350100333100ustar00rootroot00000000000000""" @generated by mypy-protobuf. Do not edit manually! isort:skip_file Copyright 2019, OpenTelemetry Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ import builtins import collections.abc import google.protobuf.descriptor import google.protobuf.internal.containers import google.protobuf.message import sys if sys.version_info >= (3, 8): import typing as typing_extensions else: import typing_extensions DESCRIPTOR: google.protobuf.descriptor.FileDescriptor @typing_extensions.final class AnyValue(google.protobuf.message.Message): """AnyValue is used to represent any type of attribute value. AnyValue may contain a primitive value such as a string or integer or it may contain an arbitrary nested object containing arrays, key-value lists and primitives. """ DESCRIPTOR: google.protobuf.descriptor.Descriptor STRING_VALUE_FIELD_NUMBER: builtins.int BOOL_VALUE_FIELD_NUMBER: builtins.int INT_VALUE_FIELD_NUMBER: builtins.int DOUBLE_VALUE_FIELD_NUMBER: builtins.int ARRAY_VALUE_FIELD_NUMBER: builtins.int KVLIST_VALUE_FIELD_NUMBER: builtins.int BYTES_VALUE_FIELD_NUMBER: builtins.int string_value: builtins.str bool_value: builtins.bool int_value: builtins.int double_value: builtins.float @property def array_value(self) -> global___ArrayValue: ... @property def kvlist_value(self) -> global___KeyValueList: ... bytes_value: builtins.bytes def __init__( self, *, string_value: builtins.str = ..., bool_value: builtins.bool = ..., int_value: builtins.int = ..., double_value: builtins.float = ..., array_value: global___ArrayValue | None = ..., kvlist_value: global___KeyValueList | None = ..., bytes_value: builtins.bytes = ..., ) -> None: ... def HasField(self, field_name: typing_extensions.Literal["array_value", b"array_value", "bool_value", b"bool_value", "bytes_value", b"bytes_value", "double_value", b"double_value", "int_value", b"int_value", "kvlist_value", b"kvlist_value", "string_value", b"string_value", "value", b"value"]) -> builtins.bool: ... def ClearField(self, field_name: typing_extensions.Literal["array_value", b"array_value", "bool_value", b"bool_value", "bytes_value", b"bytes_value", "double_value", b"double_value", "int_value", b"int_value", "kvlist_value", b"kvlist_value", "string_value", b"string_value", "value", b"value"]) -> None: ... def WhichOneof(self, oneof_group: typing_extensions.Literal["value", b"value"]) -> typing_extensions.Literal["string_value", "bool_value", "int_value", "double_value", "array_value", "kvlist_value", "bytes_value"] | None: ... global___AnyValue = AnyValue @typing_extensions.final class ArrayValue(google.protobuf.message.Message): """ArrayValue is a list of AnyValue messages. We need ArrayValue as a message since oneof in AnyValue does not allow repeated fields. """ DESCRIPTOR: google.protobuf.descriptor.Descriptor VALUES_FIELD_NUMBER: builtins.int @property def values(self) -> google.protobuf.internal.containers.RepeatedCompositeFieldContainer[global___AnyValue]: """Array of values. The array may be empty (contain 0 elements).""" def __init__( self, *, values: collections.abc.Iterable[global___AnyValue] | None = ..., ) -> None: ... def ClearField(self, field_name: typing_extensions.Literal["values", b"values"]) -> None: ... global___ArrayValue = ArrayValue @typing_extensions.final class KeyValueList(google.protobuf.message.Message): """KeyValueList is a list of KeyValue messages. We need KeyValueList as a message since `oneof` in AnyValue does not allow repeated fields. Everywhere else where we need a list of KeyValue messages (e.g. in Span) we use `repeated KeyValue` directly to avoid unnecessary extra wrapping (which slows down the protocol). The 2 approaches are semantically equivalent. """ DESCRIPTOR: google.protobuf.descriptor.Descriptor VALUES_FIELD_NUMBER: builtins.int @property def values(self) -> google.protobuf.internal.containers.RepeatedCompositeFieldContainer[global___KeyValue]: """A collection of key/value pairs of key-value pairs. The list may be empty (may contain 0 elements). The keys MUST be unique (it is not allowed to have more than one value with the same key). """ def __init__( self, *, values: collections.abc.Iterable[global___KeyValue] | None = ..., ) -> None: ... def ClearField(self, field_name: typing_extensions.Literal["values", b"values"]) -> None: ... global___KeyValueList = KeyValueList @typing_extensions.final class KeyValue(google.protobuf.message.Message): """KeyValue is a key-value pair that is used to store Span attributes, Link attributes, etc. """ DESCRIPTOR: google.protobuf.descriptor.Descriptor KEY_FIELD_NUMBER: builtins.int VALUE_FIELD_NUMBER: builtins.int key: builtins.str @property def value(self) -> global___AnyValue: ... def __init__( self, *, key: builtins.str = ..., value: global___AnyValue | None = ..., ) -> None: ... def HasField(self, field_name: typing_extensions.Literal["value", b"value"]) -> builtins.bool: ... def ClearField(self, field_name: typing_extensions.Literal["key", b"key", "value", b"value"]) -> None: ... global___KeyValue = KeyValue @typing_extensions.final class InstrumentationScope(google.protobuf.message.Message): """InstrumentationScope is a message representing the instrumentation scope information such as the fully qualified name and version. """ DESCRIPTOR: google.protobuf.descriptor.Descriptor NAME_FIELD_NUMBER: builtins.int VERSION_FIELD_NUMBER: builtins.int ATTRIBUTES_FIELD_NUMBER: builtins.int DROPPED_ATTRIBUTES_COUNT_FIELD_NUMBER: builtins.int name: builtins.str """An empty instrumentation scope name means the name is unknown.""" version: builtins.str @property def attributes(self) -> google.protobuf.internal.containers.RepeatedCompositeFieldContainer[global___KeyValue]: """Additional attributes that describe the scope. [Optional]. Attribute keys MUST be unique (it is not allowed to have more than one attribute with the same key). """ dropped_attributes_count: builtins.int def __init__( self, *, name: builtins.str = ..., version: builtins.str = ..., attributes: collections.abc.Iterable[global___KeyValue] | None = ..., dropped_attributes_count: builtins.int = ..., ) -> None: ... def ClearField(self, field_name: typing_extensions.Literal["attributes", b"attributes", "dropped_attributes_count", b"dropped_attributes_count", "name", b"name", "version", b"version"]) -> None: ... global___InstrumentationScope = InstrumentationScope @typing_extensions.final class EntityRef(google.protobuf.message.Message): """A reference to an Entity. Entity represents an object of interest associated with produced telemetry: e.g spans, metrics, profiles, or logs. Status: [Development] """ DESCRIPTOR: google.protobuf.descriptor.Descriptor SCHEMA_URL_FIELD_NUMBER: builtins.int TYPE_FIELD_NUMBER: builtins.int ID_KEYS_FIELD_NUMBER: builtins.int DESCRIPTION_KEYS_FIELD_NUMBER: builtins.int schema_url: builtins.str """The Schema URL, if known. This is the identifier of the Schema that the entity data is recorded in. To learn more about Schema URL see https://opentelemetry.io/docs/specs/otel/schemas/#schema-url This schema_url applies to the data in this message and to the Resource attributes referenced by id_keys and description_keys. TODO: discuss if we are happy with this somewhat complicated definition of what the schema_url applies to. This field obsoletes the schema_url field in ResourceMetrics/ResourceSpans/ResourceLogs. """ type: builtins.str """Defines the type of the entity. MUST not change during the lifetime of the entity. For example: "service" or "host". This field is required and MUST not be empty for valid entities. """ @property def id_keys(self) -> google.protobuf.internal.containers.RepeatedScalarFieldContainer[builtins.str]: """Attribute Keys that identify the entity. MUST not change during the lifetime of the entity. The Id must contain at least one attribute. These keys MUST exist in the containing {message}.attributes. """ @property def description_keys(self) -> google.protobuf.internal.containers.RepeatedScalarFieldContainer[builtins.str]: """Descriptive (non-identifying) attribute keys of the entity. MAY change over the lifetime of the entity. MAY be empty. These attribute keys are not part of entity's identity. These keys MUST exist in the containing {message}.attributes. """ def __init__( self, *, schema_url: builtins.str = ..., type: builtins.str = ..., id_keys: collections.abc.Iterable[builtins.str] | None = ..., description_keys: collections.abc.Iterable[builtins.str] | None = ..., ) -> None: ... def ClearField(self, field_name: typing_extensions.Literal["description_keys", b"description_keys", "id_keys", b"id_keys", "schema_url", b"schema_url", "type", b"type"]) -> None: ... global___EntityRef = EntityRef python-opentelemetry-1.39.1/opentelemetry-proto/src/opentelemetry/proto/logs/000077500000000000000000000000001511654350100276505ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-proto/src/opentelemetry/proto/logs/v1/000077500000000000000000000000001511654350100301765ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-proto/src/opentelemetry/proto/logs/v1/logs_pb2.py000066400000000000000000000111451511654350100322610ustar00rootroot00000000000000# -*- coding: utf-8 -*- # Generated by the protocol buffer compiler. DO NOT EDIT! # source: opentelemetry/proto/logs/v1/logs.proto # Protobuf Python Version: 5.26.1 """Generated protocol buffer code.""" from google.protobuf import descriptor as _descriptor from google.protobuf import descriptor_pool as _descriptor_pool from google.protobuf import symbol_database as _symbol_database from google.protobuf.internal import builder as _builder # @@protoc_insertion_point(imports) _sym_db = _symbol_database.Default() from opentelemetry.proto.common.v1 import common_pb2 as opentelemetry_dot_proto_dot_common_dot_v1_dot_common__pb2 from opentelemetry.proto.resource.v1 import resource_pb2 as opentelemetry_dot_proto_dot_resource_dot_v1_dot_resource__pb2 DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n&opentelemetry/proto/logs/v1/logs.proto\x12\x1bopentelemetry.proto.logs.v1\x1a*opentelemetry/proto/common/v1/common.proto\x1a.opentelemetry/proto/resource/v1/resource.proto\"L\n\x08LogsData\x12@\n\rresource_logs\x18\x01 \x03(\x0b\x32).opentelemetry.proto.logs.v1.ResourceLogs\"\xa3\x01\n\x0cResourceLogs\x12;\n\x08resource\x18\x01 \x01(\x0b\x32).opentelemetry.proto.resource.v1.Resource\x12:\n\nscope_logs\x18\x02 \x03(\x0b\x32&.opentelemetry.proto.logs.v1.ScopeLogs\x12\x12\n\nschema_url\x18\x03 \x01(\tJ\x06\x08\xe8\x07\x10\xe9\x07\"\xa0\x01\n\tScopeLogs\x12\x42\n\x05scope\x18\x01 \x01(\x0b\x32\x33.opentelemetry.proto.common.v1.InstrumentationScope\x12;\n\x0blog_records\x18\x02 \x03(\x0b\x32&.opentelemetry.proto.logs.v1.LogRecord\x12\x12\n\nschema_url\x18\x03 \x01(\t\"\x83\x03\n\tLogRecord\x12\x16\n\x0etime_unix_nano\x18\x01 \x01(\x06\x12\x1f\n\x17observed_time_unix_nano\x18\x0b \x01(\x06\x12\x44\n\x0fseverity_number\x18\x02 \x01(\x0e\x32+.opentelemetry.proto.logs.v1.SeverityNumber\x12\x15\n\rseverity_text\x18\x03 \x01(\t\x12\x35\n\x04\x62ody\x18\x05 \x01(\x0b\x32\'.opentelemetry.proto.common.v1.AnyValue\x12;\n\nattributes\x18\x06 \x03(\x0b\x32\'.opentelemetry.proto.common.v1.KeyValue\x12 \n\x18\x64ropped_attributes_count\x18\x07 \x01(\r\x12\r\n\x05\x66lags\x18\x08 \x01(\x07\x12\x10\n\x08trace_id\x18\t \x01(\x0c\x12\x0f\n\x07span_id\x18\n \x01(\x0c\x12\x12\n\nevent_name\x18\x0c \x01(\tJ\x04\x08\x04\x10\x05*\xc3\x05\n\x0eSeverityNumber\x12\x1f\n\x1bSEVERITY_NUMBER_UNSPECIFIED\x10\x00\x12\x19\n\x15SEVERITY_NUMBER_TRACE\x10\x01\x12\x1a\n\x16SEVERITY_NUMBER_TRACE2\x10\x02\x12\x1a\n\x16SEVERITY_NUMBER_TRACE3\x10\x03\x12\x1a\n\x16SEVERITY_NUMBER_TRACE4\x10\x04\x12\x19\n\x15SEVERITY_NUMBER_DEBUG\x10\x05\x12\x1a\n\x16SEVERITY_NUMBER_DEBUG2\x10\x06\x12\x1a\n\x16SEVERITY_NUMBER_DEBUG3\x10\x07\x12\x1a\n\x16SEVERITY_NUMBER_DEBUG4\x10\x08\x12\x18\n\x14SEVERITY_NUMBER_INFO\x10\t\x12\x19\n\x15SEVERITY_NUMBER_INFO2\x10\n\x12\x19\n\x15SEVERITY_NUMBER_INFO3\x10\x0b\x12\x19\n\x15SEVERITY_NUMBER_INFO4\x10\x0c\x12\x18\n\x14SEVERITY_NUMBER_WARN\x10\r\x12\x19\n\x15SEVERITY_NUMBER_WARN2\x10\x0e\x12\x19\n\x15SEVERITY_NUMBER_WARN3\x10\x0f\x12\x19\n\x15SEVERITY_NUMBER_WARN4\x10\x10\x12\x19\n\x15SEVERITY_NUMBER_ERROR\x10\x11\x12\x1a\n\x16SEVERITY_NUMBER_ERROR2\x10\x12\x12\x1a\n\x16SEVERITY_NUMBER_ERROR3\x10\x13\x12\x1a\n\x16SEVERITY_NUMBER_ERROR4\x10\x14\x12\x19\n\x15SEVERITY_NUMBER_FATAL\x10\x15\x12\x1a\n\x16SEVERITY_NUMBER_FATAL2\x10\x16\x12\x1a\n\x16SEVERITY_NUMBER_FATAL3\x10\x17\x12\x1a\n\x16SEVERITY_NUMBER_FATAL4\x10\x18*Y\n\x0eLogRecordFlags\x12\x1f\n\x1bLOG_RECORD_FLAGS_DO_NOT_USE\x10\x00\x12&\n!LOG_RECORD_FLAGS_TRACE_FLAGS_MASK\x10\xff\x01\x42s\n\x1eio.opentelemetry.proto.logs.v1B\tLogsProtoP\x01Z&go.opentelemetry.io/proto/otlp/logs/v1\xaa\x02\x1bOpenTelemetry.Proto.Logs.V1b\x06proto3') _globals = globals() _builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) _builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'opentelemetry.proto.logs.v1.logs_pb2', _globals) if not _descriptor._USE_C_DESCRIPTORS: _globals['DESCRIPTOR']._loaded_options = None _globals['DESCRIPTOR']._serialized_options = b'\n\036io.opentelemetry.proto.logs.v1B\tLogsProtoP\001Z&go.opentelemetry.io/proto/otlp/logs/v1\252\002\033OpenTelemetry.Proto.Logs.V1' _globals['_SEVERITYNUMBER']._serialized_start=961 _globals['_SEVERITYNUMBER']._serialized_end=1668 _globals['_LOGRECORDFLAGS']._serialized_start=1670 _globals['_LOGRECORDFLAGS']._serialized_end=1759 _globals['_LOGSDATA']._serialized_start=163 _globals['_LOGSDATA']._serialized_end=239 _globals['_RESOURCELOGS']._serialized_start=242 _globals['_RESOURCELOGS']._serialized_end=405 _globals['_SCOPELOGS']._serialized_start=408 _globals['_SCOPELOGS']._serialized_end=568 _globals['_LOGRECORD']._serialized_start=571 _globals['_LOGRECORD']._serialized_end=958 # @@protoc_insertion_point(module_scope) python-opentelemetry-1.39.1/opentelemetry-proto/src/opentelemetry/proto/logs/v1/logs_pb2.pyi000066400000000000000000000421561511654350100324400ustar00rootroot00000000000000""" @generated by mypy-protobuf. Do not edit manually! isort:skip_file Copyright 2020, OpenTelemetry Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ import builtins import collections.abc import google.protobuf.descriptor import google.protobuf.internal.containers import google.protobuf.internal.enum_type_wrapper import google.protobuf.message import opentelemetry.proto.common.v1.common_pb2 import opentelemetry.proto.resource.v1.resource_pb2 import sys import typing if sys.version_info >= (3, 10): import typing as typing_extensions else: import typing_extensions DESCRIPTOR: google.protobuf.descriptor.FileDescriptor class _SeverityNumber: ValueType = typing.NewType("ValueType", builtins.int) V: typing_extensions.TypeAlias = ValueType class _SeverityNumberEnumTypeWrapper(google.protobuf.internal.enum_type_wrapper._EnumTypeWrapper[_SeverityNumber.ValueType], builtins.type): DESCRIPTOR: google.protobuf.descriptor.EnumDescriptor SEVERITY_NUMBER_UNSPECIFIED: _SeverityNumber.ValueType # 0 """UNSPECIFIED is the default SeverityNumber, it MUST NOT be used.""" SEVERITY_NUMBER_TRACE: _SeverityNumber.ValueType # 1 SEVERITY_NUMBER_TRACE2: _SeverityNumber.ValueType # 2 SEVERITY_NUMBER_TRACE3: _SeverityNumber.ValueType # 3 SEVERITY_NUMBER_TRACE4: _SeverityNumber.ValueType # 4 SEVERITY_NUMBER_DEBUG: _SeverityNumber.ValueType # 5 SEVERITY_NUMBER_DEBUG2: _SeverityNumber.ValueType # 6 SEVERITY_NUMBER_DEBUG3: _SeverityNumber.ValueType # 7 SEVERITY_NUMBER_DEBUG4: _SeverityNumber.ValueType # 8 SEVERITY_NUMBER_INFO: _SeverityNumber.ValueType # 9 SEVERITY_NUMBER_INFO2: _SeverityNumber.ValueType # 10 SEVERITY_NUMBER_INFO3: _SeverityNumber.ValueType # 11 SEVERITY_NUMBER_INFO4: _SeverityNumber.ValueType # 12 SEVERITY_NUMBER_WARN: _SeverityNumber.ValueType # 13 SEVERITY_NUMBER_WARN2: _SeverityNumber.ValueType # 14 SEVERITY_NUMBER_WARN3: _SeverityNumber.ValueType # 15 SEVERITY_NUMBER_WARN4: _SeverityNumber.ValueType # 16 SEVERITY_NUMBER_ERROR: _SeverityNumber.ValueType # 17 SEVERITY_NUMBER_ERROR2: _SeverityNumber.ValueType # 18 SEVERITY_NUMBER_ERROR3: _SeverityNumber.ValueType # 19 SEVERITY_NUMBER_ERROR4: _SeverityNumber.ValueType # 20 SEVERITY_NUMBER_FATAL: _SeverityNumber.ValueType # 21 SEVERITY_NUMBER_FATAL2: _SeverityNumber.ValueType # 22 SEVERITY_NUMBER_FATAL3: _SeverityNumber.ValueType # 23 SEVERITY_NUMBER_FATAL4: _SeverityNumber.ValueType # 24 class SeverityNumber(_SeverityNumber, metaclass=_SeverityNumberEnumTypeWrapper): """Possible values for LogRecord.SeverityNumber.""" SEVERITY_NUMBER_UNSPECIFIED: SeverityNumber.ValueType # 0 """UNSPECIFIED is the default SeverityNumber, it MUST NOT be used.""" SEVERITY_NUMBER_TRACE: SeverityNumber.ValueType # 1 SEVERITY_NUMBER_TRACE2: SeverityNumber.ValueType # 2 SEVERITY_NUMBER_TRACE3: SeverityNumber.ValueType # 3 SEVERITY_NUMBER_TRACE4: SeverityNumber.ValueType # 4 SEVERITY_NUMBER_DEBUG: SeverityNumber.ValueType # 5 SEVERITY_NUMBER_DEBUG2: SeverityNumber.ValueType # 6 SEVERITY_NUMBER_DEBUG3: SeverityNumber.ValueType # 7 SEVERITY_NUMBER_DEBUG4: SeverityNumber.ValueType # 8 SEVERITY_NUMBER_INFO: SeverityNumber.ValueType # 9 SEVERITY_NUMBER_INFO2: SeverityNumber.ValueType # 10 SEVERITY_NUMBER_INFO3: SeverityNumber.ValueType # 11 SEVERITY_NUMBER_INFO4: SeverityNumber.ValueType # 12 SEVERITY_NUMBER_WARN: SeverityNumber.ValueType # 13 SEVERITY_NUMBER_WARN2: SeverityNumber.ValueType # 14 SEVERITY_NUMBER_WARN3: SeverityNumber.ValueType # 15 SEVERITY_NUMBER_WARN4: SeverityNumber.ValueType # 16 SEVERITY_NUMBER_ERROR: SeverityNumber.ValueType # 17 SEVERITY_NUMBER_ERROR2: SeverityNumber.ValueType # 18 SEVERITY_NUMBER_ERROR3: SeverityNumber.ValueType # 19 SEVERITY_NUMBER_ERROR4: SeverityNumber.ValueType # 20 SEVERITY_NUMBER_FATAL: SeverityNumber.ValueType # 21 SEVERITY_NUMBER_FATAL2: SeverityNumber.ValueType # 22 SEVERITY_NUMBER_FATAL3: SeverityNumber.ValueType # 23 SEVERITY_NUMBER_FATAL4: SeverityNumber.ValueType # 24 global___SeverityNumber = SeverityNumber class _LogRecordFlags: ValueType = typing.NewType("ValueType", builtins.int) V: typing_extensions.TypeAlias = ValueType class _LogRecordFlagsEnumTypeWrapper(google.protobuf.internal.enum_type_wrapper._EnumTypeWrapper[_LogRecordFlags.ValueType], builtins.type): DESCRIPTOR: google.protobuf.descriptor.EnumDescriptor LOG_RECORD_FLAGS_DO_NOT_USE: _LogRecordFlags.ValueType # 0 """The zero value for the enum. Should not be used for comparisons. Instead use bitwise "and" with the appropriate mask as shown above. """ LOG_RECORD_FLAGS_TRACE_FLAGS_MASK: _LogRecordFlags.ValueType # 255 """Bits 0-7 are used for trace flags.""" class LogRecordFlags(_LogRecordFlags, metaclass=_LogRecordFlagsEnumTypeWrapper): """LogRecordFlags represents constants used to interpret the LogRecord.flags field, which is protobuf 'fixed32' type and is to be used as bit-fields. Each non-zero value defined in this enum is a bit-mask. To extract the bit-field, for example, use an expression like: (logRecord.flags & LOG_RECORD_FLAGS_TRACE_FLAGS_MASK) """ LOG_RECORD_FLAGS_DO_NOT_USE: LogRecordFlags.ValueType # 0 """The zero value for the enum. Should not be used for comparisons. Instead use bitwise "and" with the appropriate mask as shown above. """ LOG_RECORD_FLAGS_TRACE_FLAGS_MASK: LogRecordFlags.ValueType # 255 """Bits 0-7 are used for trace flags.""" global___LogRecordFlags = LogRecordFlags @typing_extensions.final class LogsData(google.protobuf.message.Message): """LogsData represents the logs data that can be stored in a persistent storage, OR can be embedded by other protocols that transfer OTLP logs data but do not implement the OTLP protocol. The main difference between this message and collector protocol is that in this message there will not be any "control" or "metadata" specific to OTLP protocol. When new fields are added into this message, the OTLP request MUST be updated as well. """ DESCRIPTOR: google.protobuf.descriptor.Descriptor RESOURCE_LOGS_FIELD_NUMBER: builtins.int @property def resource_logs(self) -> google.protobuf.internal.containers.RepeatedCompositeFieldContainer[global___ResourceLogs]: """An array of ResourceLogs. For data coming from a single resource this array will typically contain one element. Intermediary nodes that receive data from multiple origins typically batch the data before forwarding further and in that case this array will contain multiple elements. """ def __init__( self, *, resource_logs: collections.abc.Iterable[global___ResourceLogs] | None = ..., ) -> None: ... def ClearField(self, field_name: typing_extensions.Literal["resource_logs", b"resource_logs"]) -> None: ... global___LogsData = LogsData @typing_extensions.final class ResourceLogs(google.protobuf.message.Message): """A collection of ScopeLogs from a Resource.""" DESCRIPTOR: google.protobuf.descriptor.Descriptor RESOURCE_FIELD_NUMBER: builtins.int SCOPE_LOGS_FIELD_NUMBER: builtins.int SCHEMA_URL_FIELD_NUMBER: builtins.int @property def resource(self) -> opentelemetry.proto.resource.v1.resource_pb2.Resource: """The resource for the logs in this message. If this field is not set then resource info is unknown. """ @property def scope_logs(self) -> google.protobuf.internal.containers.RepeatedCompositeFieldContainer[global___ScopeLogs]: """A list of ScopeLogs that originate from a resource.""" schema_url: builtins.str """The Schema URL, if known. This is the identifier of the Schema that the resource data is recorded in. Notably, the last part of the URL path is the version number of the schema: http[s]://server[:port]/path/. To learn more about Schema URL see https://opentelemetry.io/docs/specs/otel/schemas/#schema-url This schema_url applies to the data in the "resource" field. It does not apply to the data in the "scope_logs" field which have their own schema_url field. """ def __init__( self, *, resource: opentelemetry.proto.resource.v1.resource_pb2.Resource | None = ..., scope_logs: collections.abc.Iterable[global___ScopeLogs] | None = ..., schema_url: builtins.str = ..., ) -> None: ... def HasField(self, field_name: typing_extensions.Literal["resource", b"resource"]) -> builtins.bool: ... def ClearField(self, field_name: typing_extensions.Literal["resource", b"resource", "schema_url", b"schema_url", "scope_logs", b"scope_logs"]) -> None: ... global___ResourceLogs = ResourceLogs @typing_extensions.final class ScopeLogs(google.protobuf.message.Message): """A collection of Logs produced by a Scope.""" DESCRIPTOR: google.protobuf.descriptor.Descriptor SCOPE_FIELD_NUMBER: builtins.int LOG_RECORDS_FIELD_NUMBER: builtins.int SCHEMA_URL_FIELD_NUMBER: builtins.int @property def scope(self) -> opentelemetry.proto.common.v1.common_pb2.InstrumentationScope: """The instrumentation scope information for the logs in this message. Semantically when InstrumentationScope isn't set, it is equivalent with an empty instrumentation scope name (unknown). """ @property def log_records(self) -> google.protobuf.internal.containers.RepeatedCompositeFieldContainer[global___LogRecord]: """A list of log records.""" schema_url: builtins.str """The Schema URL, if known. This is the identifier of the Schema that the log data is recorded in. Notably, the last part of the URL path is the version number of the schema: http[s]://server[:port]/path/. To learn more about Schema URL see https://opentelemetry.io/docs/specs/otel/schemas/#schema-url This schema_url applies to all logs in the "logs" field. """ def __init__( self, *, scope: opentelemetry.proto.common.v1.common_pb2.InstrumentationScope | None = ..., log_records: collections.abc.Iterable[global___LogRecord] | None = ..., schema_url: builtins.str = ..., ) -> None: ... def HasField(self, field_name: typing_extensions.Literal["scope", b"scope"]) -> builtins.bool: ... def ClearField(self, field_name: typing_extensions.Literal["log_records", b"log_records", "schema_url", b"schema_url", "scope", b"scope"]) -> None: ... global___ScopeLogs = ScopeLogs @typing_extensions.final class LogRecord(google.protobuf.message.Message): """A log record according to OpenTelemetry Log Data Model: https://github.com/open-telemetry/oteps/blob/main/text/logs/0097-log-data-model.md """ DESCRIPTOR: google.protobuf.descriptor.Descriptor TIME_UNIX_NANO_FIELD_NUMBER: builtins.int OBSERVED_TIME_UNIX_NANO_FIELD_NUMBER: builtins.int SEVERITY_NUMBER_FIELD_NUMBER: builtins.int SEVERITY_TEXT_FIELD_NUMBER: builtins.int BODY_FIELD_NUMBER: builtins.int ATTRIBUTES_FIELD_NUMBER: builtins.int DROPPED_ATTRIBUTES_COUNT_FIELD_NUMBER: builtins.int FLAGS_FIELD_NUMBER: builtins.int TRACE_ID_FIELD_NUMBER: builtins.int SPAN_ID_FIELD_NUMBER: builtins.int EVENT_NAME_FIELD_NUMBER: builtins.int time_unix_nano: builtins.int """time_unix_nano is the time when the event occurred. Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January 1970. Value of 0 indicates unknown or missing timestamp. """ observed_time_unix_nano: builtins.int """Time when the event was observed by the collection system. For events that originate in OpenTelemetry (e.g. using OpenTelemetry Logging SDK) this timestamp is typically set at the generation time and is equal to Timestamp. For events originating externally and collected by OpenTelemetry (e.g. using Collector) this is the time when OpenTelemetry's code observed the event measured by the clock of the OpenTelemetry code. This field MUST be set once the event is observed by OpenTelemetry. For converting OpenTelemetry log data to formats that support only one timestamp or when receiving OpenTelemetry log data by recipients that support only one timestamp internally the following logic is recommended: - Use time_unix_nano if it is present, otherwise use observed_time_unix_nano. Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January 1970. Value of 0 indicates unknown or missing timestamp. """ severity_number: global___SeverityNumber.ValueType """Numerical value of the severity, normalized to values described in Log Data Model. [Optional]. """ severity_text: builtins.str """The severity text (also known as log level). The original string representation as it is known at the source. [Optional]. """ @property def body(self) -> opentelemetry.proto.common.v1.common_pb2.AnyValue: """A value containing the body of the log record. Can be for example a human-readable string message (including multi-line) describing the event in a free form or it can be a structured data composed of arrays and maps of other values. [Optional]. """ @property def attributes(self) -> google.protobuf.internal.containers.RepeatedCompositeFieldContainer[opentelemetry.proto.common.v1.common_pb2.KeyValue]: """Additional attributes that describe the specific event occurrence. [Optional]. Attribute keys MUST be unique (it is not allowed to have more than one attribute with the same key). """ dropped_attributes_count: builtins.int flags: builtins.int """Flags, a bit field. 8 least significant bits are the trace flags as defined in W3C Trace Context specification. 24 most significant bits are reserved and must be set to 0. Readers must not assume that 24 most significant bits will be zero and must correctly mask the bits when reading 8-bit trace flag (use flags & LOG_RECORD_FLAGS_TRACE_FLAGS_MASK). [Optional]. """ trace_id: builtins.bytes """A unique identifier for a trace. All logs from the same trace share the same `trace_id`. The ID is a 16-byte array. An ID with all zeroes OR of length other than 16 bytes is considered invalid (empty string in OTLP/JSON is zero-length and thus is also invalid). This field is optional. The receivers SHOULD assume that the log record is not associated with a trace if any of the following is true: - the field is not present, - the field contains an invalid value. """ span_id: builtins.bytes """A unique identifier for a span within a trace, assigned when the span is created. The ID is an 8-byte array. An ID with all zeroes OR of length other than 8 bytes is considered invalid (empty string in OTLP/JSON is zero-length and thus is also invalid). This field is optional. If the sender specifies a valid span_id then it SHOULD also specify a valid trace_id. The receivers SHOULD assume that the log record is not associated with a span if any of the following is true: - the field is not present, - the field contains an invalid value. """ event_name: builtins.str """A unique identifier of event category/type. All events with the same event_name are expected to conform to the same schema for both their attributes and their body. Recommended to be fully qualified and short (no longer than 256 characters). Presence of event_name on the log record identifies this record as an event. [Optional]. """ def __init__( self, *, time_unix_nano: builtins.int = ..., observed_time_unix_nano: builtins.int = ..., severity_number: global___SeverityNumber.ValueType = ..., severity_text: builtins.str = ..., body: opentelemetry.proto.common.v1.common_pb2.AnyValue | None = ..., attributes: collections.abc.Iterable[opentelemetry.proto.common.v1.common_pb2.KeyValue] | None = ..., dropped_attributes_count: builtins.int = ..., flags: builtins.int = ..., trace_id: builtins.bytes = ..., span_id: builtins.bytes = ..., event_name: builtins.str = ..., ) -> None: ... def HasField(self, field_name: typing_extensions.Literal["body", b"body"]) -> builtins.bool: ... def ClearField(self, field_name: typing_extensions.Literal["attributes", b"attributes", "body", b"body", "dropped_attributes_count", b"dropped_attributes_count", "event_name", b"event_name", "flags", b"flags", "observed_time_unix_nano", b"observed_time_unix_nano", "severity_number", b"severity_number", "severity_text", b"severity_text", "span_id", b"span_id", "time_unix_nano", b"time_unix_nano", "trace_id", b"trace_id"]) -> None: ... global___LogRecord = LogRecord python-opentelemetry-1.39.1/opentelemetry-proto/src/opentelemetry/proto/metrics/000077500000000000000000000000001511654350100303525ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-proto/src/opentelemetry/proto/metrics/__init__.py000066400000000000000000000000001511654350100324510ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-proto/src/opentelemetry/proto/metrics/v1/000077500000000000000000000000001511654350100307005ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-proto/src/opentelemetry/proto/metrics/v1/__init__.py000066400000000000000000000000001511654350100327770ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-proto/src/opentelemetry/proto/metrics/v1/metrics_pb2.py000066400000000000000000000223741511654350100334730ustar00rootroot00000000000000# -*- coding: utf-8 -*- # Generated by the protocol buffer compiler. DO NOT EDIT! # source: opentelemetry/proto/metrics/v1/metrics.proto # Protobuf Python Version: 5.26.1 """Generated protocol buffer code.""" from google.protobuf import descriptor as _descriptor from google.protobuf import descriptor_pool as _descriptor_pool from google.protobuf import symbol_database as _symbol_database from google.protobuf.internal import builder as _builder # @@protoc_insertion_point(imports) _sym_db = _symbol_database.Default() from opentelemetry.proto.common.v1 import common_pb2 as opentelemetry_dot_proto_dot_common_dot_v1_dot_common__pb2 from opentelemetry.proto.resource.v1 import resource_pb2 as opentelemetry_dot_proto_dot_resource_dot_v1_dot_resource__pb2 DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n,opentelemetry/proto/metrics/v1/metrics.proto\x12\x1eopentelemetry.proto.metrics.v1\x1a*opentelemetry/proto/common/v1/common.proto\x1a.opentelemetry/proto/resource/v1/resource.proto\"X\n\x0bMetricsData\x12I\n\x10resource_metrics\x18\x01 \x03(\x0b\x32/.opentelemetry.proto.metrics.v1.ResourceMetrics\"\xaf\x01\n\x0fResourceMetrics\x12;\n\x08resource\x18\x01 \x01(\x0b\x32).opentelemetry.proto.resource.v1.Resource\x12\x43\n\rscope_metrics\x18\x02 \x03(\x0b\x32,.opentelemetry.proto.metrics.v1.ScopeMetrics\x12\x12\n\nschema_url\x18\x03 \x01(\tJ\x06\x08\xe8\x07\x10\xe9\x07\"\x9f\x01\n\x0cScopeMetrics\x12\x42\n\x05scope\x18\x01 \x01(\x0b\x32\x33.opentelemetry.proto.common.v1.InstrumentationScope\x12\x37\n\x07metrics\x18\x02 \x03(\x0b\x32&.opentelemetry.proto.metrics.v1.Metric\x12\x12\n\nschema_url\x18\x03 \x01(\t\"\xcd\x03\n\x06Metric\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x13\n\x0b\x64\x65scription\x18\x02 \x01(\t\x12\x0c\n\x04unit\x18\x03 \x01(\t\x12\x36\n\x05gauge\x18\x05 \x01(\x0b\x32%.opentelemetry.proto.metrics.v1.GaugeH\x00\x12\x32\n\x03sum\x18\x07 \x01(\x0b\x32#.opentelemetry.proto.metrics.v1.SumH\x00\x12>\n\thistogram\x18\t \x01(\x0b\x32).opentelemetry.proto.metrics.v1.HistogramH\x00\x12U\n\x15\x65xponential_histogram\x18\n \x01(\x0b\x32\x34.opentelemetry.proto.metrics.v1.ExponentialHistogramH\x00\x12:\n\x07summary\x18\x0b \x01(\x0b\x32\'.opentelemetry.proto.metrics.v1.SummaryH\x00\x12\x39\n\x08metadata\x18\x0c \x03(\x0b\x32\'.opentelemetry.proto.common.v1.KeyValueB\x06\n\x04\x64\x61taJ\x04\x08\x04\x10\x05J\x04\x08\x06\x10\x07J\x04\x08\x08\x10\t\"M\n\x05Gauge\x12\x44\n\x0b\x64\x61ta_points\x18\x01 \x03(\x0b\x32/.opentelemetry.proto.metrics.v1.NumberDataPoint\"\xba\x01\n\x03Sum\x12\x44\n\x0b\x64\x61ta_points\x18\x01 \x03(\x0b\x32/.opentelemetry.proto.metrics.v1.NumberDataPoint\x12W\n\x17\x61ggregation_temporality\x18\x02 \x01(\x0e\x32\x36.opentelemetry.proto.metrics.v1.AggregationTemporality\x12\x14\n\x0cis_monotonic\x18\x03 \x01(\x08\"\xad\x01\n\tHistogram\x12G\n\x0b\x64\x61ta_points\x18\x01 \x03(\x0b\x32\x32.opentelemetry.proto.metrics.v1.HistogramDataPoint\x12W\n\x17\x61ggregation_temporality\x18\x02 \x01(\x0e\x32\x36.opentelemetry.proto.metrics.v1.AggregationTemporality\"\xc3\x01\n\x14\x45xponentialHistogram\x12R\n\x0b\x64\x61ta_points\x18\x01 \x03(\x0b\x32=.opentelemetry.proto.metrics.v1.ExponentialHistogramDataPoint\x12W\n\x17\x61ggregation_temporality\x18\x02 \x01(\x0e\x32\x36.opentelemetry.proto.metrics.v1.AggregationTemporality\"P\n\x07Summary\x12\x45\n\x0b\x64\x61ta_points\x18\x01 \x03(\x0b\x32\x30.opentelemetry.proto.metrics.v1.SummaryDataPoint\"\x86\x02\n\x0fNumberDataPoint\x12;\n\nattributes\x18\x07 \x03(\x0b\x32\'.opentelemetry.proto.common.v1.KeyValue\x12\x1c\n\x14start_time_unix_nano\x18\x02 \x01(\x06\x12\x16\n\x0etime_unix_nano\x18\x03 \x01(\x06\x12\x13\n\tas_double\x18\x04 \x01(\x01H\x00\x12\x10\n\x06\x61s_int\x18\x06 \x01(\x10H\x00\x12;\n\texemplars\x18\x05 \x03(\x0b\x32(.opentelemetry.proto.metrics.v1.Exemplar\x12\r\n\x05\x66lags\x18\x08 \x01(\rB\x07\n\x05valueJ\x04\x08\x01\x10\x02\"\xe6\x02\n\x12HistogramDataPoint\x12;\n\nattributes\x18\t \x03(\x0b\x32\'.opentelemetry.proto.common.v1.KeyValue\x12\x1c\n\x14start_time_unix_nano\x18\x02 \x01(\x06\x12\x16\n\x0etime_unix_nano\x18\x03 \x01(\x06\x12\r\n\x05\x63ount\x18\x04 \x01(\x06\x12\x10\n\x03sum\x18\x05 \x01(\x01H\x00\x88\x01\x01\x12\x15\n\rbucket_counts\x18\x06 \x03(\x06\x12\x17\n\x0f\x65xplicit_bounds\x18\x07 \x03(\x01\x12;\n\texemplars\x18\x08 \x03(\x0b\x32(.opentelemetry.proto.metrics.v1.Exemplar\x12\r\n\x05\x66lags\x18\n \x01(\r\x12\x10\n\x03min\x18\x0b \x01(\x01H\x01\x88\x01\x01\x12\x10\n\x03max\x18\x0c \x01(\x01H\x02\x88\x01\x01\x42\x06\n\x04_sumB\x06\n\x04_minB\x06\n\x04_maxJ\x04\x08\x01\x10\x02\"\xda\x04\n\x1d\x45xponentialHistogramDataPoint\x12;\n\nattributes\x18\x01 \x03(\x0b\x32\'.opentelemetry.proto.common.v1.KeyValue\x12\x1c\n\x14start_time_unix_nano\x18\x02 \x01(\x06\x12\x16\n\x0etime_unix_nano\x18\x03 \x01(\x06\x12\r\n\x05\x63ount\x18\x04 \x01(\x06\x12\x10\n\x03sum\x18\x05 \x01(\x01H\x00\x88\x01\x01\x12\r\n\x05scale\x18\x06 \x01(\x11\x12\x12\n\nzero_count\x18\x07 \x01(\x06\x12W\n\x08positive\x18\x08 \x01(\x0b\x32\x45.opentelemetry.proto.metrics.v1.ExponentialHistogramDataPoint.Buckets\x12W\n\x08negative\x18\t \x01(\x0b\x32\x45.opentelemetry.proto.metrics.v1.ExponentialHistogramDataPoint.Buckets\x12\r\n\x05\x66lags\x18\n \x01(\r\x12;\n\texemplars\x18\x0b \x03(\x0b\x32(.opentelemetry.proto.metrics.v1.Exemplar\x12\x10\n\x03min\x18\x0c \x01(\x01H\x01\x88\x01\x01\x12\x10\n\x03max\x18\r \x01(\x01H\x02\x88\x01\x01\x12\x16\n\x0ezero_threshold\x18\x0e \x01(\x01\x1a\x30\n\x07\x42uckets\x12\x0e\n\x06offset\x18\x01 \x01(\x11\x12\x15\n\rbucket_counts\x18\x02 \x03(\x04\x42\x06\n\x04_sumB\x06\n\x04_minB\x06\n\x04_max\"\xc5\x02\n\x10SummaryDataPoint\x12;\n\nattributes\x18\x07 \x03(\x0b\x32\'.opentelemetry.proto.common.v1.KeyValue\x12\x1c\n\x14start_time_unix_nano\x18\x02 \x01(\x06\x12\x16\n\x0etime_unix_nano\x18\x03 \x01(\x06\x12\r\n\x05\x63ount\x18\x04 \x01(\x06\x12\x0b\n\x03sum\x18\x05 \x01(\x01\x12Y\n\x0fquantile_values\x18\x06 \x03(\x0b\x32@.opentelemetry.proto.metrics.v1.SummaryDataPoint.ValueAtQuantile\x12\r\n\x05\x66lags\x18\x08 \x01(\r\x1a\x32\n\x0fValueAtQuantile\x12\x10\n\x08quantile\x18\x01 \x01(\x01\x12\r\n\x05value\x18\x02 \x01(\x01J\x04\x08\x01\x10\x02\"\xc1\x01\n\x08\x45xemplar\x12\x44\n\x13\x66iltered_attributes\x18\x07 \x03(\x0b\x32\'.opentelemetry.proto.common.v1.KeyValue\x12\x16\n\x0etime_unix_nano\x18\x02 \x01(\x06\x12\x13\n\tas_double\x18\x03 \x01(\x01H\x00\x12\x10\n\x06\x61s_int\x18\x06 \x01(\x10H\x00\x12\x0f\n\x07span_id\x18\x04 \x01(\x0c\x12\x10\n\x08trace_id\x18\x05 \x01(\x0c\x42\x07\n\x05valueJ\x04\x08\x01\x10\x02*\x8c\x01\n\x16\x41ggregationTemporality\x12\'\n#AGGREGATION_TEMPORALITY_UNSPECIFIED\x10\x00\x12!\n\x1d\x41GGREGATION_TEMPORALITY_DELTA\x10\x01\x12&\n\"AGGREGATION_TEMPORALITY_CUMULATIVE\x10\x02*^\n\x0e\x44\x61taPointFlags\x12\x1f\n\x1b\x44\x41TA_POINT_FLAGS_DO_NOT_USE\x10\x00\x12+\n\'DATA_POINT_FLAGS_NO_RECORDED_VALUE_MASK\x10\x01\x42\x7f\n!io.opentelemetry.proto.metrics.v1B\x0cMetricsProtoP\x01Z)go.opentelemetry.io/proto/otlp/metrics/v1\xaa\x02\x1eOpenTelemetry.Proto.Metrics.V1b\x06proto3') _globals = globals() _builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) _builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'opentelemetry.proto.metrics.v1.metrics_pb2', _globals) if not _descriptor._USE_C_DESCRIPTORS: _globals['DESCRIPTOR']._loaded_options = None _globals['DESCRIPTOR']._serialized_options = b'\n!io.opentelemetry.proto.metrics.v1B\014MetricsProtoP\001Z)go.opentelemetry.io/proto/otlp/metrics/v1\252\002\036OpenTelemetry.Proto.Metrics.V1' _globals['_AGGREGATIONTEMPORALITY']._serialized_start=3546 _globals['_AGGREGATIONTEMPORALITY']._serialized_end=3686 _globals['_DATAPOINTFLAGS']._serialized_start=3688 _globals['_DATAPOINTFLAGS']._serialized_end=3782 _globals['_METRICSDATA']._serialized_start=172 _globals['_METRICSDATA']._serialized_end=260 _globals['_RESOURCEMETRICS']._serialized_start=263 _globals['_RESOURCEMETRICS']._serialized_end=438 _globals['_SCOPEMETRICS']._serialized_start=441 _globals['_SCOPEMETRICS']._serialized_end=600 _globals['_METRIC']._serialized_start=603 _globals['_METRIC']._serialized_end=1064 _globals['_GAUGE']._serialized_start=1066 _globals['_GAUGE']._serialized_end=1143 _globals['_SUM']._serialized_start=1146 _globals['_SUM']._serialized_end=1332 _globals['_HISTOGRAM']._serialized_start=1335 _globals['_HISTOGRAM']._serialized_end=1508 _globals['_EXPONENTIALHISTOGRAM']._serialized_start=1511 _globals['_EXPONENTIALHISTOGRAM']._serialized_end=1706 _globals['_SUMMARY']._serialized_start=1708 _globals['_SUMMARY']._serialized_end=1788 _globals['_NUMBERDATAPOINT']._serialized_start=1791 _globals['_NUMBERDATAPOINT']._serialized_end=2053 _globals['_HISTOGRAMDATAPOINT']._serialized_start=2056 _globals['_HISTOGRAMDATAPOINT']._serialized_end=2414 _globals['_EXPONENTIALHISTOGRAMDATAPOINT']._serialized_start=2417 _globals['_EXPONENTIALHISTOGRAMDATAPOINT']._serialized_end=3019 _globals['_EXPONENTIALHISTOGRAMDATAPOINT_BUCKETS']._serialized_start=2947 _globals['_EXPONENTIALHISTOGRAMDATAPOINT_BUCKETS']._serialized_end=2995 _globals['_SUMMARYDATAPOINT']._serialized_start=3022 _globals['_SUMMARYDATAPOINT']._serialized_end=3347 _globals['_SUMMARYDATAPOINT_VALUEATQUANTILE']._serialized_start=3291 _globals['_SUMMARYDATAPOINT_VALUEATQUANTILE']._serialized_end=3341 _globals['_EXEMPLAR']._serialized_start=3350 _globals['_EXEMPLAR']._serialized_end=3543 # @@protoc_insertion_point(module_scope) python-opentelemetry-1.39.1/opentelemetry-proto/src/opentelemetry/proto/metrics/v1/metrics_pb2.pyi000066400000000000000000001545601511654350100336470ustar00rootroot00000000000000""" @generated by mypy-protobuf. Do not edit manually! isort:skip_file Copyright 2019, OpenTelemetry Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ import builtins import collections.abc import google.protobuf.descriptor import google.protobuf.internal.containers import google.protobuf.internal.enum_type_wrapper import google.protobuf.message import opentelemetry.proto.common.v1.common_pb2 import opentelemetry.proto.resource.v1.resource_pb2 import sys import typing if sys.version_info >= (3, 10): import typing as typing_extensions else: import typing_extensions DESCRIPTOR: google.protobuf.descriptor.FileDescriptor class _AggregationTemporality: ValueType = typing.NewType("ValueType", builtins.int) V: typing_extensions.TypeAlias = ValueType class _AggregationTemporalityEnumTypeWrapper(google.protobuf.internal.enum_type_wrapper._EnumTypeWrapper[_AggregationTemporality.ValueType], builtins.type): DESCRIPTOR: google.protobuf.descriptor.EnumDescriptor AGGREGATION_TEMPORALITY_UNSPECIFIED: _AggregationTemporality.ValueType # 0 """UNSPECIFIED is the default AggregationTemporality, it MUST not be used.""" AGGREGATION_TEMPORALITY_DELTA: _AggregationTemporality.ValueType # 1 """DELTA is an AggregationTemporality for a metric aggregator which reports changes since last report time. Successive metrics contain aggregation of values from continuous and non-overlapping intervals. The values for a DELTA metric are based only on the time interval associated with one measurement cycle. There is no dependency on previous measurements like is the case for CUMULATIVE metrics. For example, consider a system measuring the number of requests that it receives and reports the sum of these requests every second as a DELTA metric: 1. The system starts receiving at time=t_0. 2. A request is received, the system measures 1 request. 3. A request is received, the system measures 1 request. 4. A request is received, the system measures 1 request. 5. The 1 second collection cycle ends. A metric is exported for the number of requests received over the interval of time t_0 to t_0+1 with a value of 3. 6. A request is received, the system measures 1 request. 7. A request is received, the system measures 1 request. 8. The 1 second collection cycle ends. A metric is exported for the number of requests received over the interval of time t_0+1 to t_0+2 with a value of 2. """ AGGREGATION_TEMPORALITY_CUMULATIVE: _AggregationTemporality.ValueType # 2 """CUMULATIVE is an AggregationTemporality for a metric aggregator which reports changes since a fixed start time. This means that current values of a CUMULATIVE metric depend on all previous measurements since the start time. Because of this, the sender is required to retain this state in some form. If this state is lost or invalidated, the CUMULATIVE metric values MUST be reset and a new fixed start time following the last reported measurement time sent MUST be used. For example, consider a system measuring the number of requests that it receives and reports the sum of these requests every second as a CUMULATIVE metric: 1. The system starts receiving at time=t_0. 2. A request is received, the system measures 1 request. 3. A request is received, the system measures 1 request. 4. A request is received, the system measures 1 request. 5. The 1 second collection cycle ends. A metric is exported for the number of requests received over the interval of time t_0 to t_0+1 with a value of 3. 6. A request is received, the system measures 1 request. 7. A request is received, the system measures 1 request. 8. The 1 second collection cycle ends. A metric is exported for the number of requests received over the interval of time t_0 to t_0+2 with a value of 5. 9. The system experiences a fault and loses state. 10. The system recovers and resumes receiving at time=t_1. 11. A request is received, the system measures 1 request. 12. The 1 second collection cycle ends. A metric is exported for the number of requests received over the interval of time t_1 to t_0+1 with a value of 1. Note: Even though, when reporting changes since last report time, using CUMULATIVE is valid, it is not recommended. This may cause problems for systems that do not use start_time to determine when the aggregation value was reset (e.g. Prometheus). """ class AggregationTemporality(_AggregationTemporality, metaclass=_AggregationTemporalityEnumTypeWrapper): """AggregationTemporality defines how a metric aggregator reports aggregated values. It describes how those values relate to the time interval over which they are aggregated. """ AGGREGATION_TEMPORALITY_UNSPECIFIED: AggregationTemporality.ValueType # 0 """UNSPECIFIED is the default AggregationTemporality, it MUST not be used.""" AGGREGATION_TEMPORALITY_DELTA: AggregationTemporality.ValueType # 1 """DELTA is an AggregationTemporality for a metric aggregator which reports changes since last report time. Successive metrics contain aggregation of values from continuous and non-overlapping intervals. The values for a DELTA metric are based only on the time interval associated with one measurement cycle. There is no dependency on previous measurements like is the case for CUMULATIVE metrics. For example, consider a system measuring the number of requests that it receives and reports the sum of these requests every second as a DELTA metric: 1. The system starts receiving at time=t_0. 2. A request is received, the system measures 1 request. 3. A request is received, the system measures 1 request. 4. A request is received, the system measures 1 request. 5. The 1 second collection cycle ends. A metric is exported for the number of requests received over the interval of time t_0 to t_0+1 with a value of 3. 6. A request is received, the system measures 1 request. 7. A request is received, the system measures 1 request. 8. The 1 second collection cycle ends. A metric is exported for the number of requests received over the interval of time t_0+1 to t_0+2 with a value of 2. """ AGGREGATION_TEMPORALITY_CUMULATIVE: AggregationTemporality.ValueType # 2 """CUMULATIVE is an AggregationTemporality for a metric aggregator which reports changes since a fixed start time. This means that current values of a CUMULATIVE metric depend on all previous measurements since the start time. Because of this, the sender is required to retain this state in some form. If this state is lost or invalidated, the CUMULATIVE metric values MUST be reset and a new fixed start time following the last reported measurement time sent MUST be used. For example, consider a system measuring the number of requests that it receives and reports the sum of these requests every second as a CUMULATIVE metric: 1. The system starts receiving at time=t_0. 2. A request is received, the system measures 1 request. 3. A request is received, the system measures 1 request. 4. A request is received, the system measures 1 request. 5. The 1 second collection cycle ends. A metric is exported for the number of requests received over the interval of time t_0 to t_0+1 with a value of 3. 6. A request is received, the system measures 1 request. 7. A request is received, the system measures 1 request. 8. The 1 second collection cycle ends. A metric is exported for the number of requests received over the interval of time t_0 to t_0+2 with a value of 5. 9. The system experiences a fault and loses state. 10. The system recovers and resumes receiving at time=t_1. 11. A request is received, the system measures 1 request. 12. The 1 second collection cycle ends. A metric is exported for the number of requests received over the interval of time t_1 to t_0+1 with a value of 1. Note: Even though, when reporting changes since last report time, using CUMULATIVE is valid, it is not recommended. This may cause problems for systems that do not use start_time to determine when the aggregation value was reset (e.g. Prometheus). """ global___AggregationTemporality = AggregationTemporality class _DataPointFlags: ValueType = typing.NewType("ValueType", builtins.int) V: typing_extensions.TypeAlias = ValueType class _DataPointFlagsEnumTypeWrapper(google.protobuf.internal.enum_type_wrapper._EnumTypeWrapper[_DataPointFlags.ValueType], builtins.type): DESCRIPTOR: google.protobuf.descriptor.EnumDescriptor DATA_POINT_FLAGS_DO_NOT_USE: _DataPointFlags.ValueType # 0 """The zero value for the enum. Should not be used for comparisons. Instead use bitwise "and" with the appropriate mask as shown above. """ DATA_POINT_FLAGS_NO_RECORDED_VALUE_MASK: _DataPointFlags.ValueType # 1 """This DataPoint is valid but has no recorded value. This value SHOULD be used to reflect explicitly missing data in a series, as for an equivalent to the Prometheus "staleness marker". """ class DataPointFlags(_DataPointFlags, metaclass=_DataPointFlagsEnumTypeWrapper): """DataPointFlags is defined as a protobuf 'uint32' type and is to be used as a bit-field representing 32 distinct boolean flags. Each flag defined in this enum is a bit-mask. To test the presence of a single flag in the flags of a data point, for example, use an expression like: (point.flags & DATA_POINT_FLAGS_NO_RECORDED_VALUE_MASK) == DATA_POINT_FLAGS_NO_RECORDED_VALUE_MASK """ DATA_POINT_FLAGS_DO_NOT_USE: DataPointFlags.ValueType # 0 """The zero value for the enum. Should not be used for comparisons. Instead use bitwise "and" with the appropriate mask as shown above. """ DATA_POINT_FLAGS_NO_RECORDED_VALUE_MASK: DataPointFlags.ValueType # 1 """This DataPoint is valid but has no recorded value. This value SHOULD be used to reflect explicitly missing data in a series, as for an equivalent to the Prometheus "staleness marker". """ global___DataPointFlags = DataPointFlags @typing_extensions.final class MetricsData(google.protobuf.message.Message): """MetricsData represents the metrics data that can be stored in a persistent storage, OR can be embedded by other protocols that transfer OTLP metrics data but do not implement the OTLP protocol. MetricsData └─── ResourceMetrics ├── Resource ├── SchemaURL └── ScopeMetrics ├── Scope ├── SchemaURL └── Metric ├── Name ├── Description ├── Unit └── data ├── Gauge ├── Sum ├── Histogram ├── ExponentialHistogram └── Summary The main difference between this message and collector protocol is that in this message there will not be any "control" or "metadata" specific to OTLP protocol. When new fields are added into this message, the OTLP request MUST be updated as well. """ DESCRIPTOR: google.protobuf.descriptor.Descriptor RESOURCE_METRICS_FIELD_NUMBER: builtins.int @property def resource_metrics(self) -> google.protobuf.internal.containers.RepeatedCompositeFieldContainer[global___ResourceMetrics]: """An array of ResourceMetrics. For data coming from a single resource this array will typically contain one element. Intermediary nodes that receive data from multiple origins typically batch the data before forwarding further and in that case this array will contain multiple elements. """ def __init__( self, *, resource_metrics: collections.abc.Iterable[global___ResourceMetrics] | None = ..., ) -> None: ... def ClearField(self, field_name: typing_extensions.Literal["resource_metrics", b"resource_metrics"]) -> None: ... global___MetricsData = MetricsData @typing_extensions.final class ResourceMetrics(google.protobuf.message.Message): """A collection of ScopeMetrics from a Resource.""" DESCRIPTOR: google.protobuf.descriptor.Descriptor RESOURCE_FIELD_NUMBER: builtins.int SCOPE_METRICS_FIELD_NUMBER: builtins.int SCHEMA_URL_FIELD_NUMBER: builtins.int @property def resource(self) -> opentelemetry.proto.resource.v1.resource_pb2.Resource: """The resource for the metrics in this message. If this field is not set then no resource info is known. """ @property def scope_metrics(self) -> google.protobuf.internal.containers.RepeatedCompositeFieldContainer[global___ScopeMetrics]: """A list of metrics that originate from a resource.""" schema_url: builtins.str """The Schema URL, if known. This is the identifier of the Schema that the resource data is recorded in. Notably, the last part of the URL path is the version number of the schema: http[s]://server[:port]/path/. To learn more about Schema URL see https://opentelemetry.io/docs/specs/otel/schemas/#schema-url This schema_url applies to the data in the "resource" field. It does not apply to the data in the "scope_metrics" field which have their own schema_url field. """ def __init__( self, *, resource: opentelemetry.proto.resource.v1.resource_pb2.Resource | None = ..., scope_metrics: collections.abc.Iterable[global___ScopeMetrics] | None = ..., schema_url: builtins.str = ..., ) -> None: ... def HasField(self, field_name: typing_extensions.Literal["resource", b"resource"]) -> builtins.bool: ... def ClearField(self, field_name: typing_extensions.Literal["resource", b"resource", "schema_url", b"schema_url", "scope_metrics", b"scope_metrics"]) -> None: ... global___ResourceMetrics = ResourceMetrics @typing_extensions.final class ScopeMetrics(google.protobuf.message.Message): """A collection of Metrics produced by an Scope.""" DESCRIPTOR: google.protobuf.descriptor.Descriptor SCOPE_FIELD_NUMBER: builtins.int METRICS_FIELD_NUMBER: builtins.int SCHEMA_URL_FIELD_NUMBER: builtins.int @property def scope(self) -> opentelemetry.proto.common.v1.common_pb2.InstrumentationScope: """The instrumentation scope information for the metrics in this message. Semantically when InstrumentationScope isn't set, it is equivalent with an empty instrumentation scope name (unknown). """ @property def metrics(self) -> google.protobuf.internal.containers.RepeatedCompositeFieldContainer[global___Metric]: """A list of metrics that originate from an instrumentation library.""" schema_url: builtins.str """The Schema URL, if known. This is the identifier of the Schema that the metric data is recorded in. Notably, the last part of the URL path is the version number of the schema: http[s]://server[:port]/path/. To learn more about Schema URL see https://opentelemetry.io/docs/specs/otel/schemas/#schema-url This schema_url applies to all metrics in the "metrics" field. """ def __init__( self, *, scope: opentelemetry.proto.common.v1.common_pb2.InstrumentationScope | None = ..., metrics: collections.abc.Iterable[global___Metric] | None = ..., schema_url: builtins.str = ..., ) -> None: ... def HasField(self, field_name: typing_extensions.Literal["scope", b"scope"]) -> builtins.bool: ... def ClearField(self, field_name: typing_extensions.Literal["metrics", b"metrics", "schema_url", b"schema_url", "scope", b"scope"]) -> None: ... global___ScopeMetrics = ScopeMetrics @typing_extensions.final class Metric(google.protobuf.message.Message): """Defines a Metric which has one or more timeseries. The following is a brief summary of the Metric data model. For more details, see: https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/metrics/data-model.md The data model and relation between entities is shown in the diagram below. Here, "DataPoint" is the term used to refer to any one of the specific data point value types, and "points" is the term used to refer to any one of the lists of points contained in the Metric. - Metric is composed of a metadata and data. - Metadata part contains a name, description, unit. - Data is one of the possible types (Sum, Gauge, Histogram, Summary). - DataPoint contains timestamps, attributes, and one of the possible value type fields. Metric +------------+ |name | |description | |unit | +------------------------------------+ |data |---> |Gauge, Sum, Histogram, Summary, ... | +------------+ +------------------------------------+ Data [One of Gauge, Sum, Histogram, Summary, ...] +-----------+ |... | // Metadata about the Data. |points |--+ +-----------+ | | +---------------------------+ | |DataPoint 1 | v |+------+------+ +------+ | +-----+ ||label |label |...|label | | | 1 |-->||value1|value2|...|valueN| | +-----+ |+------+------+ +------+ | | . | |+-----+ | | . | ||value| | | . | |+-----+ | | . | +---------------------------+ | . | . | . | . | . | . | . | +---------------------------+ | . | |DataPoint M | +-----+ |+------+------+ +------+ | | M |-->||label |label |...|label | | +-----+ ||value1|value2|...|valueN| | |+------+------+ +------+ | |+-----+ | ||value| | |+-----+ | +---------------------------+ Each distinct type of DataPoint represents the output of a specific aggregation function, the result of applying the DataPoint's associated function of to one or more measurements. All DataPoint types have three common fields: - Attributes includes key-value pairs associated with the data point - TimeUnixNano is required, set to the end time of the aggregation - StartTimeUnixNano is optional, but strongly encouraged for DataPoints having an AggregationTemporality field, as discussed below. Both TimeUnixNano and StartTimeUnixNano values are expressed as UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January 1970. # TimeUnixNano This field is required, having consistent interpretation across DataPoint types. TimeUnixNano is the moment corresponding to when the data point's aggregate value was captured. Data points with the 0 value for TimeUnixNano SHOULD be rejected by consumers. # StartTimeUnixNano StartTimeUnixNano in general allows detecting when a sequence of observations is unbroken. This field indicates to consumers the start time for points with cumulative and delta AggregationTemporality, and it should be included whenever possible to support correct rate calculation. Although it may be omitted when the start time is truly unknown, setting StartTimeUnixNano is strongly encouraged. """ DESCRIPTOR: google.protobuf.descriptor.Descriptor NAME_FIELD_NUMBER: builtins.int DESCRIPTION_FIELD_NUMBER: builtins.int UNIT_FIELD_NUMBER: builtins.int GAUGE_FIELD_NUMBER: builtins.int SUM_FIELD_NUMBER: builtins.int HISTOGRAM_FIELD_NUMBER: builtins.int EXPONENTIAL_HISTOGRAM_FIELD_NUMBER: builtins.int SUMMARY_FIELD_NUMBER: builtins.int METADATA_FIELD_NUMBER: builtins.int name: builtins.str """name of the metric.""" description: builtins.str """description of the metric, which can be used in documentation.""" unit: builtins.str """unit in which the metric value is reported. Follows the format described by https://unitsofmeasure.org/ucum.html. """ @property def gauge(self) -> global___Gauge: ... @property def sum(self) -> global___Sum: ... @property def histogram(self) -> global___Histogram: ... @property def exponential_histogram(self) -> global___ExponentialHistogram: ... @property def summary(self) -> global___Summary: ... @property def metadata(self) -> google.protobuf.internal.containers.RepeatedCompositeFieldContainer[opentelemetry.proto.common.v1.common_pb2.KeyValue]: """Additional metadata attributes that describe the metric. [Optional]. Attributes are non-identifying. Consumers SHOULD NOT need to be aware of these attributes. These attributes MAY be used to encode information allowing for lossless roundtrip translation to / from another data model. Attribute keys MUST be unique (it is not allowed to have more than one attribute with the same key). """ def __init__( self, *, name: builtins.str = ..., description: builtins.str = ..., unit: builtins.str = ..., gauge: global___Gauge | None = ..., sum: global___Sum | None = ..., histogram: global___Histogram | None = ..., exponential_histogram: global___ExponentialHistogram | None = ..., summary: global___Summary | None = ..., metadata: collections.abc.Iterable[opentelemetry.proto.common.v1.common_pb2.KeyValue] | None = ..., ) -> None: ... def HasField(self, field_name: typing_extensions.Literal["data", b"data", "exponential_histogram", b"exponential_histogram", "gauge", b"gauge", "histogram", b"histogram", "sum", b"sum", "summary", b"summary"]) -> builtins.bool: ... def ClearField(self, field_name: typing_extensions.Literal["data", b"data", "description", b"description", "exponential_histogram", b"exponential_histogram", "gauge", b"gauge", "histogram", b"histogram", "metadata", b"metadata", "name", b"name", "sum", b"sum", "summary", b"summary", "unit", b"unit"]) -> None: ... def WhichOneof(self, oneof_group: typing_extensions.Literal["data", b"data"]) -> typing_extensions.Literal["gauge", "sum", "histogram", "exponential_histogram", "summary"] | None: ... global___Metric = Metric @typing_extensions.final class Gauge(google.protobuf.message.Message): """Gauge represents the type of a scalar metric that always exports the "current value" for every data point. It should be used for an "unknown" aggregation. A Gauge does not support different aggregation temporalities. Given the aggregation is unknown, points cannot be combined using the same aggregation, regardless of aggregation temporalities. Therefore, AggregationTemporality is not included. Consequently, this also means "StartTimeUnixNano" is ignored for all data points. """ DESCRIPTOR: google.protobuf.descriptor.Descriptor DATA_POINTS_FIELD_NUMBER: builtins.int @property def data_points(self) -> google.protobuf.internal.containers.RepeatedCompositeFieldContainer[global___NumberDataPoint]: ... def __init__( self, *, data_points: collections.abc.Iterable[global___NumberDataPoint] | None = ..., ) -> None: ... def ClearField(self, field_name: typing_extensions.Literal["data_points", b"data_points"]) -> None: ... global___Gauge = Gauge @typing_extensions.final class Sum(google.protobuf.message.Message): """Sum represents the type of a scalar metric that is calculated as a sum of all reported measurements over a time interval. """ DESCRIPTOR: google.protobuf.descriptor.Descriptor DATA_POINTS_FIELD_NUMBER: builtins.int AGGREGATION_TEMPORALITY_FIELD_NUMBER: builtins.int IS_MONOTONIC_FIELD_NUMBER: builtins.int @property def data_points(self) -> google.protobuf.internal.containers.RepeatedCompositeFieldContainer[global___NumberDataPoint]: ... aggregation_temporality: global___AggregationTemporality.ValueType """aggregation_temporality describes if the aggregator reports delta changes since last report time, or cumulative changes since a fixed start time. """ is_monotonic: builtins.bool """If "true" means that the sum is monotonic.""" def __init__( self, *, data_points: collections.abc.Iterable[global___NumberDataPoint] | None = ..., aggregation_temporality: global___AggregationTemporality.ValueType = ..., is_monotonic: builtins.bool = ..., ) -> None: ... def ClearField(self, field_name: typing_extensions.Literal["aggregation_temporality", b"aggregation_temporality", "data_points", b"data_points", "is_monotonic", b"is_monotonic"]) -> None: ... global___Sum = Sum @typing_extensions.final class Histogram(google.protobuf.message.Message): """Histogram represents the type of a metric that is calculated by aggregating as a Histogram of all reported measurements over a time interval. """ DESCRIPTOR: google.protobuf.descriptor.Descriptor DATA_POINTS_FIELD_NUMBER: builtins.int AGGREGATION_TEMPORALITY_FIELD_NUMBER: builtins.int @property def data_points(self) -> google.protobuf.internal.containers.RepeatedCompositeFieldContainer[global___HistogramDataPoint]: ... aggregation_temporality: global___AggregationTemporality.ValueType """aggregation_temporality describes if the aggregator reports delta changes since last report time, or cumulative changes since a fixed start time. """ def __init__( self, *, data_points: collections.abc.Iterable[global___HistogramDataPoint] | None = ..., aggregation_temporality: global___AggregationTemporality.ValueType = ..., ) -> None: ... def ClearField(self, field_name: typing_extensions.Literal["aggregation_temporality", b"aggregation_temporality", "data_points", b"data_points"]) -> None: ... global___Histogram = Histogram @typing_extensions.final class ExponentialHistogram(google.protobuf.message.Message): """ExponentialHistogram represents the type of a metric that is calculated by aggregating as a ExponentialHistogram of all reported double measurements over a time interval. """ DESCRIPTOR: google.protobuf.descriptor.Descriptor DATA_POINTS_FIELD_NUMBER: builtins.int AGGREGATION_TEMPORALITY_FIELD_NUMBER: builtins.int @property def data_points(self) -> google.protobuf.internal.containers.RepeatedCompositeFieldContainer[global___ExponentialHistogramDataPoint]: ... aggregation_temporality: global___AggregationTemporality.ValueType """aggregation_temporality describes if the aggregator reports delta changes since last report time, or cumulative changes since a fixed start time. """ def __init__( self, *, data_points: collections.abc.Iterable[global___ExponentialHistogramDataPoint] | None = ..., aggregation_temporality: global___AggregationTemporality.ValueType = ..., ) -> None: ... def ClearField(self, field_name: typing_extensions.Literal["aggregation_temporality", b"aggregation_temporality", "data_points", b"data_points"]) -> None: ... global___ExponentialHistogram = ExponentialHistogram @typing_extensions.final class Summary(google.protobuf.message.Message): """Summary metric data are used to convey quantile summaries, a Prometheus (see: https://prometheus.io/docs/concepts/metric_types/#summary) and OpenMetrics (see: https://github.com/prometheus/OpenMetrics/blob/4dbf6075567ab43296eed941037c12951faafb92/protos/prometheus.proto#L45) data type. These data points cannot always be merged in a meaningful way. While they can be useful in some applications, histogram data points are recommended for new applications. Summary metrics do not have an aggregation temporality field. This is because the count and sum fields of a SummaryDataPoint are assumed to be cumulative values. """ DESCRIPTOR: google.protobuf.descriptor.Descriptor DATA_POINTS_FIELD_NUMBER: builtins.int @property def data_points(self) -> google.protobuf.internal.containers.RepeatedCompositeFieldContainer[global___SummaryDataPoint]: ... def __init__( self, *, data_points: collections.abc.Iterable[global___SummaryDataPoint] | None = ..., ) -> None: ... def ClearField(self, field_name: typing_extensions.Literal["data_points", b"data_points"]) -> None: ... global___Summary = Summary @typing_extensions.final class NumberDataPoint(google.protobuf.message.Message): """NumberDataPoint is a single data point in a timeseries that describes the time-varying scalar value of a metric. """ DESCRIPTOR: google.protobuf.descriptor.Descriptor ATTRIBUTES_FIELD_NUMBER: builtins.int START_TIME_UNIX_NANO_FIELD_NUMBER: builtins.int TIME_UNIX_NANO_FIELD_NUMBER: builtins.int AS_DOUBLE_FIELD_NUMBER: builtins.int AS_INT_FIELD_NUMBER: builtins.int EXEMPLARS_FIELD_NUMBER: builtins.int FLAGS_FIELD_NUMBER: builtins.int @property def attributes(self) -> google.protobuf.internal.containers.RepeatedCompositeFieldContainer[opentelemetry.proto.common.v1.common_pb2.KeyValue]: """The set of key/value pairs that uniquely identify the timeseries from where this point belongs. The list may be empty (may contain 0 elements). Attribute keys MUST be unique (it is not allowed to have more than one attribute with the same key). """ start_time_unix_nano: builtins.int """StartTimeUnixNano is optional but strongly encouraged, see the the detailed comments above Metric. Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January 1970. """ time_unix_nano: builtins.int """TimeUnixNano is required, see the detailed comments above Metric. Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January 1970. """ as_double: builtins.float as_int: builtins.int @property def exemplars(self) -> google.protobuf.internal.containers.RepeatedCompositeFieldContainer[global___Exemplar]: """(Optional) List of exemplars collected from measurements that were used to form the data point """ flags: builtins.int """Flags that apply to this specific data point. See DataPointFlags for the available flags and their meaning. """ def __init__( self, *, attributes: collections.abc.Iterable[opentelemetry.proto.common.v1.common_pb2.KeyValue] | None = ..., start_time_unix_nano: builtins.int = ..., time_unix_nano: builtins.int = ..., as_double: builtins.float = ..., as_int: builtins.int = ..., exemplars: collections.abc.Iterable[global___Exemplar] | None = ..., flags: builtins.int = ..., ) -> None: ... def HasField(self, field_name: typing_extensions.Literal["as_double", b"as_double", "as_int", b"as_int", "value", b"value"]) -> builtins.bool: ... def ClearField(self, field_name: typing_extensions.Literal["as_double", b"as_double", "as_int", b"as_int", "attributes", b"attributes", "exemplars", b"exemplars", "flags", b"flags", "start_time_unix_nano", b"start_time_unix_nano", "time_unix_nano", b"time_unix_nano", "value", b"value"]) -> None: ... def WhichOneof(self, oneof_group: typing_extensions.Literal["value", b"value"]) -> typing_extensions.Literal["as_double", "as_int"] | None: ... global___NumberDataPoint = NumberDataPoint @typing_extensions.final class HistogramDataPoint(google.protobuf.message.Message): """HistogramDataPoint is a single data point in a timeseries that describes the time-varying values of a Histogram. A Histogram contains summary statistics for a population of values, it may optionally contain the distribution of those values across a set of buckets. If the histogram contains the distribution of values, then both "explicit_bounds" and "bucket counts" fields must be defined. If the histogram does not contain the distribution of values, then both "explicit_bounds" and "bucket_counts" must be omitted and only "count" and "sum" are known. """ DESCRIPTOR: google.protobuf.descriptor.Descriptor ATTRIBUTES_FIELD_NUMBER: builtins.int START_TIME_UNIX_NANO_FIELD_NUMBER: builtins.int TIME_UNIX_NANO_FIELD_NUMBER: builtins.int COUNT_FIELD_NUMBER: builtins.int SUM_FIELD_NUMBER: builtins.int BUCKET_COUNTS_FIELD_NUMBER: builtins.int EXPLICIT_BOUNDS_FIELD_NUMBER: builtins.int EXEMPLARS_FIELD_NUMBER: builtins.int FLAGS_FIELD_NUMBER: builtins.int MIN_FIELD_NUMBER: builtins.int MAX_FIELD_NUMBER: builtins.int @property def attributes(self) -> google.protobuf.internal.containers.RepeatedCompositeFieldContainer[opentelemetry.proto.common.v1.common_pb2.KeyValue]: """The set of key/value pairs that uniquely identify the timeseries from where this point belongs. The list may be empty (may contain 0 elements). Attribute keys MUST be unique (it is not allowed to have more than one attribute with the same key). """ start_time_unix_nano: builtins.int """StartTimeUnixNano is optional but strongly encouraged, see the the detailed comments above Metric. Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January 1970. """ time_unix_nano: builtins.int """TimeUnixNano is required, see the detailed comments above Metric. Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January 1970. """ count: builtins.int """count is the number of values in the population. Must be non-negative. This value must be equal to the sum of the "count" fields in buckets if a histogram is provided. """ sum: builtins.float """sum of the values in the population. If count is zero then this field must be zero. Note: Sum should only be filled out when measuring non-negative discrete events, and is assumed to be monotonic over the values of these events. Negative events *can* be recorded, but sum should not be filled out when doing so. This is specifically to enforce compatibility w/ OpenMetrics, see: https://github.com/prometheus/OpenMetrics/blob/v1.0.0/specification/OpenMetrics.md#histogram """ @property def bucket_counts(self) -> google.protobuf.internal.containers.RepeatedScalarFieldContainer[builtins.int]: """bucket_counts is an optional field contains the count values of histogram for each bucket. The sum of the bucket_counts must equal the value in the count field. The number of elements in bucket_counts array must be by one greater than the number of elements in explicit_bounds array. The exception to this rule is when the length of bucket_counts is 0, then the length of explicit_bounds must also be 0. """ @property def explicit_bounds(self) -> google.protobuf.internal.containers.RepeatedScalarFieldContainer[builtins.float]: """explicit_bounds specifies buckets with explicitly defined bounds for values. The boundaries for bucket at index i are: (-infinity, explicit_bounds[i]] for i == 0 (explicit_bounds[i-1], explicit_bounds[i]] for 0 < i < size(explicit_bounds) (explicit_bounds[i-1], +infinity) for i == size(explicit_bounds) The values in the explicit_bounds array must be strictly increasing. Histogram buckets are inclusive of their upper boundary, except the last bucket where the boundary is at infinity. This format is intentionally compatible with the OpenMetrics histogram definition. If bucket_counts length is 0 then explicit_bounds length must also be 0, otherwise the data point is invalid. """ @property def exemplars(self) -> google.protobuf.internal.containers.RepeatedCompositeFieldContainer[global___Exemplar]: """(Optional) List of exemplars collected from measurements that were used to form the data point """ flags: builtins.int """Flags that apply to this specific data point. See DataPointFlags for the available flags and their meaning. """ min: builtins.float """min is the minimum value over (start_time, end_time].""" max: builtins.float """max is the maximum value over (start_time, end_time].""" def __init__( self, *, attributes: collections.abc.Iterable[opentelemetry.proto.common.v1.common_pb2.KeyValue] | None = ..., start_time_unix_nano: builtins.int = ..., time_unix_nano: builtins.int = ..., count: builtins.int = ..., sum: builtins.float | None = ..., bucket_counts: collections.abc.Iterable[builtins.int] | None = ..., explicit_bounds: collections.abc.Iterable[builtins.float] | None = ..., exemplars: collections.abc.Iterable[global___Exemplar] | None = ..., flags: builtins.int = ..., min: builtins.float | None = ..., max: builtins.float | None = ..., ) -> None: ... def HasField(self, field_name: typing_extensions.Literal["_max", b"_max", "_min", b"_min", "_sum", b"_sum", "max", b"max", "min", b"min", "sum", b"sum"]) -> builtins.bool: ... def ClearField(self, field_name: typing_extensions.Literal["_max", b"_max", "_min", b"_min", "_sum", b"_sum", "attributes", b"attributes", "bucket_counts", b"bucket_counts", "count", b"count", "exemplars", b"exemplars", "explicit_bounds", b"explicit_bounds", "flags", b"flags", "max", b"max", "min", b"min", "start_time_unix_nano", b"start_time_unix_nano", "sum", b"sum", "time_unix_nano", b"time_unix_nano"]) -> None: ... @typing.overload def WhichOneof(self, oneof_group: typing_extensions.Literal["_max", b"_max"]) -> typing_extensions.Literal["max"] | None: ... @typing.overload def WhichOneof(self, oneof_group: typing_extensions.Literal["_min", b"_min"]) -> typing_extensions.Literal["min"] | None: ... @typing.overload def WhichOneof(self, oneof_group: typing_extensions.Literal["_sum", b"_sum"]) -> typing_extensions.Literal["sum"] | None: ... global___HistogramDataPoint = HistogramDataPoint @typing_extensions.final class ExponentialHistogramDataPoint(google.protobuf.message.Message): """ExponentialHistogramDataPoint is a single data point in a timeseries that describes the time-varying values of a ExponentialHistogram of double values. A ExponentialHistogram contains summary statistics for a population of values, it may optionally contain the distribution of those values across a set of buckets. """ DESCRIPTOR: google.protobuf.descriptor.Descriptor @typing_extensions.final class Buckets(google.protobuf.message.Message): """Buckets are a set of bucket counts, encoded in a contiguous array of counts. """ DESCRIPTOR: google.protobuf.descriptor.Descriptor OFFSET_FIELD_NUMBER: builtins.int BUCKET_COUNTS_FIELD_NUMBER: builtins.int offset: builtins.int """Offset is the bucket index of the first entry in the bucket_counts array. Note: This uses a varint encoding as a simple form of compression. """ @property def bucket_counts(self) -> google.protobuf.internal.containers.RepeatedScalarFieldContainer[builtins.int]: """bucket_counts is an array of count values, where bucket_counts[i] carries the count of the bucket at index (offset+i). bucket_counts[i] is the count of values greater than base^(offset+i) and less than or equal to base^(offset+i+1). Note: By contrast, the explicit HistogramDataPoint uses fixed64. This field is expected to have many buckets, especially zeros, so uint64 has been selected to ensure varint encoding. """ def __init__( self, *, offset: builtins.int = ..., bucket_counts: collections.abc.Iterable[builtins.int] | None = ..., ) -> None: ... def ClearField(self, field_name: typing_extensions.Literal["bucket_counts", b"bucket_counts", "offset", b"offset"]) -> None: ... ATTRIBUTES_FIELD_NUMBER: builtins.int START_TIME_UNIX_NANO_FIELD_NUMBER: builtins.int TIME_UNIX_NANO_FIELD_NUMBER: builtins.int COUNT_FIELD_NUMBER: builtins.int SUM_FIELD_NUMBER: builtins.int SCALE_FIELD_NUMBER: builtins.int ZERO_COUNT_FIELD_NUMBER: builtins.int POSITIVE_FIELD_NUMBER: builtins.int NEGATIVE_FIELD_NUMBER: builtins.int FLAGS_FIELD_NUMBER: builtins.int EXEMPLARS_FIELD_NUMBER: builtins.int MIN_FIELD_NUMBER: builtins.int MAX_FIELD_NUMBER: builtins.int ZERO_THRESHOLD_FIELD_NUMBER: builtins.int @property def attributes(self) -> google.protobuf.internal.containers.RepeatedCompositeFieldContainer[opentelemetry.proto.common.v1.common_pb2.KeyValue]: """The set of key/value pairs that uniquely identify the timeseries from where this point belongs. The list may be empty (may contain 0 elements). Attribute keys MUST be unique (it is not allowed to have more than one attribute with the same key). """ start_time_unix_nano: builtins.int """StartTimeUnixNano is optional but strongly encouraged, see the the detailed comments above Metric. Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January 1970. """ time_unix_nano: builtins.int """TimeUnixNano is required, see the detailed comments above Metric. Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January 1970. """ count: builtins.int """count is the number of values in the population. Must be non-negative. This value must be equal to the sum of the "bucket_counts" values in the positive and negative Buckets plus the "zero_count" field. """ sum: builtins.float """sum of the values in the population. If count is zero then this field must be zero. Note: Sum should only be filled out when measuring non-negative discrete events, and is assumed to be monotonic over the values of these events. Negative events *can* be recorded, but sum should not be filled out when doing so. This is specifically to enforce compatibility w/ OpenMetrics, see: https://github.com/prometheus/OpenMetrics/blob/v1.0.0/specification/OpenMetrics.md#histogram """ scale: builtins.int """scale describes the resolution of the histogram. Boundaries are located at powers of the base, where: base = (2^(2^-scale)) The histogram bucket identified by `index`, a signed integer, contains values that are greater than (base^index) and less than or equal to (base^(index+1)). The positive and negative ranges of the histogram are expressed separately. Negative values are mapped by their absolute value into the negative range using the same scale as the positive range. scale is not restricted by the protocol, as the permissible values depend on the range of the data. """ zero_count: builtins.int """zero_count is the count of values that are either exactly zero or within the region considered zero by the instrumentation at the tolerated degree of precision. This bucket stores values that cannot be expressed using the standard exponential formula as well as values that have been rounded to zero. Implementations MAY consider the zero bucket to have probability mass equal to (zero_count / count). """ @property def positive(self) -> global___ExponentialHistogramDataPoint.Buckets: """positive carries the positive range of exponential bucket counts.""" @property def negative(self) -> global___ExponentialHistogramDataPoint.Buckets: """negative carries the negative range of exponential bucket counts.""" flags: builtins.int """Flags that apply to this specific data point. See DataPointFlags for the available flags and their meaning. """ @property def exemplars(self) -> google.protobuf.internal.containers.RepeatedCompositeFieldContainer[global___Exemplar]: """(Optional) List of exemplars collected from measurements that were used to form the data point """ min: builtins.float """min is the minimum value over (start_time, end_time].""" max: builtins.float """max is the maximum value over (start_time, end_time].""" zero_threshold: builtins.float """ZeroThreshold may be optionally set to convey the width of the zero region. Where the zero region is defined as the closed interval [-ZeroThreshold, ZeroThreshold]. When ZeroThreshold is 0, zero count bucket stores values that cannot be expressed using the standard exponential formula as well as values that have been rounded to zero. """ def __init__( self, *, attributes: collections.abc.Iterable[opentelemetry.proto.common.v1.common_pb2.KeyValue] | None = ..., start_time_unix_nano: builtins.int = ..., time_unix_nano: builtins.int = ..., count: builtins.int = ..., sum: builtins.float | None = ..., scale: builtins.int = ..., zero_count: builtins.int = ..., positive: global___ExponentialHistogramDataPoint.Buckets | None = ..., negative: global___ExponentialHistogramDataPoint.Buckets | None = ..., flags: builtins.int = ..., exemplars: collections.abc.Iterable[global___Exemplar] | None = ..., min: builtins.float | None = ..., max: builtins.float | None = ..., zero_threshold: builtins.float = ..., ) -> None: ... def HasField(self, field_name: typing_extensions.Literal["_max", b"_max", "_min", b"_min", "_sum", b"_sum", "max", b"max", "min", b"min", "negative", b"negative", "positive", b"positive", "sum", b"sum"]) -> builtins.bool: ... def ClearField(self, field_name: typing_extensions.Literal["_max", b"_max", "_min", b"_min", "_sum", b"_sum", "attributes", b"attributes", "count", b"count", "exemplars", b"exemplars", "flags", b"flags", "max", b"max", "min", b"min", "negative", b"negative", "positive", b"positive", "scale", b"scale", "start_time_unix_nano", b"start_time_unix_nano", "sum", b"sum", "time_unix_nano", b"time_unix_nano", "zero_count", b"zero_count", "zero_threshold", b"zero_threshold"]) -> None: ... @typing.overload def WhichOneof(self, oneof_group: typing_extensions.Literal["_max", b"_max"]) -> typing_extensions.Literal["max"] | None: ... @typing.overload def WhichOneof(self, oneof_group: typing_extensions.Literal["_min", b"_min"]) -> typing_extensions.Literal["min"] | None: ... @typing.overload def WhichOneof(self, oneof_group: typing_extensions.Literal["_sum", b"_sum"]) -> typing_extensions.Literal["sum"] | None: ... global___ExponentialHistogramDataPoint = ExponentialHistogramDataPoint @typing_extensions.final class SummaryDataPoint(google.protobuf.message.Message): """SummaryDataPoint is a single data point in a timeseries that describes the time-varying values of a Summary metric. The count and sum fields represent cumulative values. """ DESCRIPTOR: google.protobuf.descriptor.Descriptor @typing_extensions.final class ValueAtQuantile(google.protobuf.message.Message): """Represents the value at a given quantile of a distribution. To record Min and Max values following conventions are used: - The 1.0 quantile is equivalent to the maximum value observed. - The 0.0 quantile is equivalent to the minimum value observed. See the following issue for more context: https://github.com/open-telemetry/opentelemetry-proto/issues/125 """ DESCRIPTOR: google.protobuf.descriptor.Descriptor QUANTILE_FIELD_NUMBER: builtins.int VALUE_FIELD_NUMBER: builtins.int quantile: builtins.float """The quantile of a distribution. Must be in the interval [0.0, 1.0]. """ value: builtins.float """The value at the given quantile of a distribution. Quantile values must NOT be negative. """ def __init__( self, *, quantile: builtins.float = ..., value: builtins.float = ..., ) -> None: ... def ClearField(self, field_name: typing_extensions.Literal["quantile", b"quantile", "value", b"value"]) -> None: ... ATTRIBUTES_FIELD_NUMBER: builtins.int START_TIME_UNIX_NANO_FIELD_NUMBER: builtins.int TIME_UNIX_NANO_FIELD_NUMBER: builtins.int COUNT_FIELD_NUMBER: builtins.int SUM_FIELD_NUMBER: builtins.int QUANTILE_VALUES_FIELD_NUMBER: builtins.int FLAGS_FIELD_NUMBER: builtins.int @property def attributes(self) -> google.protobuf.internal.containers.RepeatedCompositeFieldContainer[opentelemetry.proto.common.v1.common_pb2.KeyValue]: """The set of key/value pairs that uniquely identify the timeseries from where this point belongs. The list may be empty (may contain 0 elements). Attribute keys MUST be unique (it is not allowed to have more than one attribute with the same key). """ start_time_unix_nano: builtins.int """StartTimeUnixNano is optional but strongly encouraged, see the the detailed comments above Metric. Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January 1970. """ time_unix_nano: builtins.int """TimeUnixNano is required, see the detailed comments above Metric. Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January 1970. """ count: builtins.int """count is the number of values in the population. Must be non-negative.""" sum: builtins.float """sum of the values in the population. If count is zero then this field must be zero. Note: Sum should only be filled out when measuring non-negative discrete events, and is assumed to be monotonic over the values of these events. Negative events *can* be recorded, but sum should not be filled out when doing so. This is specifically to enforce compatibility w/ OpenMetrics, see: https://github.com/prometheus/OpenMetrics/blob/v1.0.0/specification/OpenMetrics.md#summary """ @property def quantile_values(self) -> google.protobuf.internal.containers.RepeatedCompositeFieldContainer[global___SummaryDataPoint.ValueAtQuantile]: """(Optional) list of values at different quantiles of the distribution calculated from the current snapshot. The quantiles must be strictly increasing. """ flags: builtins.int """Flags that apply to this specific data point. See DataPointFlags for the available flags and their meaning. """ def __init__( self, *, attributes: collections.abc.Iterable[opentelemetry.proto.common.v1.common_pb2.KeyValue] | None = ..., start_time_unix_nano: builtins.int = ..., time_unix_nano: builtins.int = ..., count: builtins.int = ..., sum: builtins.float = ..., quantile_values: collections.abc.Iterable[global___SummaryDataPoint.ValueAtQuantile] | None = ..., flags: builtins.int = ..., ) -> None: ... def ClearField(self, field_name: typing_extensions.Literal["attributes", b"attributes", "count", b"count", "flags", b"flags", "quantile_values", b"quantile_values", "start_time_unix_nano", b"start_time_unix_nano", "sum", b"sum", "time_unix_nano", b"time_unix_nano"]) -> None: ... global___SummaryDataPoint = SummaryDataPoint @typing_extensions.final class Exemplar(google.protobuf.message.Message): """A representation of an exemplar, which is a sample input measurement. Exemplars also hold information about the environment when the measurement was recorded, for example the span and trace ID of the active span when the exemplar was recorded. """ DESCRIPTOR: google.protobuf.descriptor.Descriptor FILTERED_ATTRIBUTES_FIELD_NUMBER: builtins.int TIME_UNIX_NANO_FIELD_NUMBER: builtins.int AS_DOUBLE_FIELD_NUMBER: builtins.int AS_INT_FIELD_NUMBER: builtins.int SPAN_ID_FIELD_NUMBER: builtins.int TRACE_ID_FIELD_NUMBER: builtins.int @property def filtered_attributes(self) -> google.protobuf.internal.containers.RepeatedCompositeFieldContainer[opentelemetry.proto.common.v1.common_pb2.KeyValue]: """The set of key/value pairs that were filtered out by the aggregator, but recorded alongside the original measurement. Only key/value pairs that were filtered out by the aggregator should be included """ time_unix_nano: builtins.int """time_unix_nano is the exact time when this exemplar was recorded Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January 1970. """ as_double: builtins.float as_int: builtins.int span_id: builtins.bytes """(Optional) Span ID of the exemplar trace. span_id may be missing if the measurement is not recorded inside a trace or if the trace is not sampled. """ trace_id: builtins.bytes """(Optional) Trace ID of the exemplar trace. trace_id may be missing if the measurement is not recorded inside a trace or if the trace is not sampled. """ def __init__( self, *, filtered_attributes: collections.abc.Iterable[opentelemetry.proto.common.v1.common_pb2.KeyValue] | None = ..., time_unix_nano: builtins.int = ..., as_double: builtins.float = ..., as_int: builtins.int = ..., span_id: builtins.bytes = ..., trace_id: builtins.bytes = ..., ) -> None: ... def HasField(self, field_name: typing_extensions.Literal["as_double", b"as_double", "as_int", b"as_int", "value", b"value"]) -> builtins.bool: ... def ClearField(self, field_name: typing_extensions.Literal["as_double", b"as_double", "as_int", b"as_int", "filtered_attributes", b"filtered_attributes", "span_id", b"span_id", "time_unix_nano", b"time_unix_nano", "trace_id", b"trace_id", "value", b"value"]) -> None: ... def WhichOneof(self, oneof_group: typing_extensions.Literal["value", b"value"]) -> typing_extensions.Literal["as_double", "as_int"] | None: ... global___Exemplar = Exemplar python-opentelemetry-1.39.1/opentelemetry-proto/src/opentelemetry/proto/profiles/000077500000000000000000000000001511654350100305275ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-proto/src/opentelemetry/proto/profiles/v1development/000077500000000000000000000000001511654350100333205ustar00rootroot00000000000000profiles_pb2.py000066400000000000000000000166641511654350100362160ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-proto/src/opentelemetry/proto/profiles/v1development# -*- coding: utf-8 -*- # Generated by the protocol buffer compiler. DO NOT EDIT! # source: opentelemetry/proto/profiles/v1development/profiles.proto # Protobuf Python Version: 5.26.1 """Generated protocol buffer code.""" from google.protobuf import descriptor as _descriptor from google.protobuf import descriptor_pool as _descriptor_pool from google.protobuf import symbol_database as _symbol_database from google.protobuf.internal import builder as _builder # @@protoc_insertion_point(imports) _sym_db = _symbol_database.Default() from opentelemetry.proto.common.v1 import common_pb2 as opentelemetry_dot_proto_dot_common_dot_v1_dot_common__pb2 from opentelemetry.proto.resource.v1 import resource_pb2 as opentelemetry_dot_proto_dot_resource_dot_v1_dot_resource__pb2 DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n9opentelemetry/proto/profiles/v1development/profiles.proto\x12*opentelemetry.proto.profiles.v1development\x1a*opentelemetry/proto/common/v1/common.proto\x1a.opentelemetry/proto/resource/v1/resource.proto\"\xee\x03\n\x12ProfilesDictionary\x12J\n\rmapping_table\x18\x01 \x03(\x0b\x32\x33.opentelemetry.proto.profiles.v1development.Mapping\x12L\n\x0elocation_table\x18\x02 \x03(\x0b\x32\x34.opentelemetry.proto.profiles.v1development.Location\x12L\n\x0e\x66unction_table\x18\x03 \x03(\x0b\x32\x34.opentelemetry.proto.profiles.v1development.Function\x12\x44\n\nlink_table\x18\x04 \x03(\x0b\x32\x30.opentelemetry.proto.profiles.v1development.Link\x12\x14\n\x0cstring_table\x18\x05 \x03(\t\x12@\n\x0f\x61ttribute_table\x18\x06 \x03(\x0b\x32\'.opentelemetry.proto.common.v1.KeyValue\x12R\n\x0f\x61ttribute_units\x18\x07 \x03(\x0b\x32\x39.opentelemetry.proto.profiles.v1development.AttributeUnit\"\xbb\x01\n\x0cProfilesData\x12W\n\x11resource_profiles\x18\x01 \x03(\x0b\x32<.opentelemetry.proto.profiles.v1development.ResourceProfiles\x12R\n\ndictionary\x18\x02 \x01(\x0b\x32>.opentelemetry.proto.profiles.v1development.ProfilesDictionary\"\xbe\x01\n\x10ResourceProfiles\x12;\n\x08resource\x18\x01 \x01(\x0b\x32).opentelemetry.proto.resource.v1.Resource\x12Q\n\x0escope_profiles\x18\x02 \x03(\x0b\x32\x39.opentelemetry.proto.profiles.v1development.ScopeProfiles\x12\x12\n\nschema_url\x18\x03 \x01(\tJ\x06\x08\xe8\x07\x10\xe9\x07\"\xae\x01\n\rScopeProfiles\x12\x42\n\x05scope\x18\x01 \x01(\x0b\x32\x33.opentelemetry.proto.common.v1.InstrumentationScope\x12\x45\n\x08profiles\x18\x02 \x03(\x0b\x32\x33.opentelemetry.proto.profiles.v1development.Profile\x12\x12\n\nschema_url\x18\x03 \x01(\t\"\x86\x04\n\x07Profile\x12J\n\x0bsample_type\x18\x01 \x03(\x0b\x32\x35.opentelemetry.proto.profiles.v1development.ValueType\x12\x42\n\x06sample\x18\x02 \x03(\x0b\x32\x32.opentelemetry.proto.profiles.v1development.Sample\x12\x18\n\x10location_indices\x18\x03 \x03(\x05\x12\x12\n\ntime_nanos\x18\x04 \x01(\x03\x12\x16\n\x0e\x64uration_nanos\x18\x05 \x01(\x03\x12J\n\x0bperiod_type\x18\x06 \x01(\x0b\x32\x35.opentelemetry.proto.profiles.v1development.ValueType\x12\x0e\n\x06period\x18\x07 \x01(\x03\x12\x1a\n\x12\x63omment_strindices\x18\x08 \x03(\x05\x12!\n\x19\x64\x65\x66\x61ult_sample_type_index\x18\t \x01(\x05\x12\x12\n\nprofile_id\x18\n \x01(\x0c\x12 \n\x18\x64ropped_attributes_count\x18\x0b \x01(\r\x12\x1f\n\x17original_payload_format\x18\x0c \x01(\t\x12\x18\n\x10original_payload\x18\r \x01(\x0c\x12\x19\n\x11\x61ttribute_indices\x18\x0e \x03(\x05\"F\n\rAttributeUnit\x12\x1e\n\x16\x61ttribute_key_strindex\x18\x01 \x01(\x05\x12\x15\n\runit_strindex\x18\x02 \x01(\x05\")\n\x04Link\x12\x10\n\x08trace_id\x18\x01 \x01(\x0c\x12\x0f\n\x07span_id\x18\x02 \x01(\x0c\"\x9e\x01\n\tValueType\x12\x15\n\rtype_strindex\x18\x01 \x01(\x05\x12\x15\n\runit_strindex\x18\x02 \x01(\x05\x12\x63\n\x17\x61ggregation_temporality\x18\x03 \x01(\x0e\x32\x42.opentelemetry.proto.profiles.v1development.AggregationTemporality\"\xb1\x01\n\x06Sample\x12\x1d\n\x15locations_start_index\x18\x01 \x01(\x05\x12\x18\n\x10locations_length\x18\x02 \x01(\x05\x12\r\n\x05value\x18\x03 \x03(\x03\x12\x19\n\x11\x61ttribute_indices\x18\x04 \x03(\x05\x12\x17\n\nlink_index\x18\x05 \x01(\x05H\x00\x88\x01\x01\x12\x1c\n\x14timestamps_unix_nano\x18\x06 \x03(\x04\x42\r\n\x0b_link_index\"\xe3\x01\n\x07Mapping\x12\x14\n\x0cmemory_start\x18\x01 \x01(\x04\x12\x14\n\x0cmemory_limit\x18\x02 \x01(\x04\x12\x13\n\x0b\x66ile_offset\x18\x03 \x01(\x04\x12\x19\n\x11\x66ilename_strindex\x18\x04 \x01(\x05\x12\x19\n\x11\x61ttribute_indices\x18\x05 \x03(\x05\x12\x15\n\rhas_functions\x18\x06 \x01(\x08\x12\x15\n\rhas_filenames\x18\x07 \x01(\x08\x12\x18\n\x10has_line_numbers\x18\x08 \x01(\x08\x12\x19\n\x11has_inline_frames\x18\t \x01(\x08\"\xb7\x01\n\x08Location\x12\x1a\n\rmapping_index\x18\x01 \x01(\x05H\x00\x88\x01\x01\x12\x0f\n\x07\x61\x64\x64ress\x18\x02 \x01(\x04\x12>\n\x04line\x18\x03 \x03(\x0b\x32\x30.opentelemetry.proto.profiles.v1development.Line\x12\x11\n\tis_folded\x18\x04 \x01(\x08\x12\x19\n\x11\x61ttribute_indices\x18\x05 \x03(\x05\x42\x10\n\x0e_mapping_index\"<\n\x04Line\x12\x16\n\x0e\x66unction_index\x18\x01 \x01(\x05\x12\x0c\n\x04line\x18\x02 \x01(\x03\x12\x0e\n\x06\x63olumn\x18\x03 \x01(\x03\"n\n\x08\x46unction\x12\x15\n\rname_strindex\x18\x01 \x01(\x05\x12\x1c\n\x14system_name_strindex\x18\x02 \x01(\x05\x12\x19\n\x11\x66ilename_strindex\x18\x03 \x01(\x05\x12\x12\n\nstart_line\x18\x04 \x01(\x03*\x8c\x01\n\x16\x41ggregationTemporality\x12\'\n#AGGREGATION_TEMPORALITY_UNSPECIFIED\x10\x00\x12!\n\x1d\x41GGREGATION_TEMPORALITY_DELTA\x10\x01\x12&\n\"AGGREGATION_TEMPORALITY_CUMULATIVE\x10\x02\x42\xa4\x01\n-io.opentelemetry.proto.profiles.v1developmentB\rProfilesProtoP\x01Z5go.opentelemetry.io/proto/otlp/profiles/v1development\xaa\x02*OpenTelemetry.Proto.Profiles.V1Developmentb\x06proto3') _globals = globals() _builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) _builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'opentelemetry.proto.profiles.v1development.profiles_pb2', _globals) if not _descriptor._USE_C_DESCRIPTORS: _globals['DESCRIPTOR']._loaded_options = None _globals['DESCRIPTOR']._serialized_options = b'\n-io.opentelemetry.proto.profiles.v1developmentB\rProfilesProtoP\001Z5go.opentelemetry.io/proto/otlp/profiles/v1development\252\002*OpenTelemetry.Proto.Profiles.V1Development' _globals['_AGGREGATIONTEMPORALITY']._serialized_start=2822 _globals['_AGGREGATIONTEMPORALITY']._serialized_end=2962 _globals['_PROFILESDICTIONARY']._serialized_start=198 _globals['_PROFILESDICTIONARY']._serialized_end=692 _globals['_PROFILESDATA']._serialized_start=695 _globals['_PROFILESDATA']._serialized_end=882 _globals['_RESOURCEPROFILES']._serialized_start=885 _globals['_RESOURCEPROFILES']._serialized_end=1075 _globals['_SCOPEPROFILES']._serialized_start=1078 _globals['_SCOPEPROFILES']._serialized_end=1252 _globals['_PROFILE']._serialized_start=1255 _globals['_PROFILE']._serialized_end=1773 _globals['_ATTRIBUTEUNIT']._serialized_start=1775 _globals['_ATTRIBUTEUNIT']._serialized_end=1845 _globals['_LINK']._serialized_start=1847 _globals['_LINK']._serialized_end=1888 _globals['_VALUETYPE']._serialized_start=1891 _globals['_VALUETYPE']._serialized_end=2049 _globals['_SAMPLE']._serialized_start=2052 _globals['_SAMPLE']._serialized_end=2229 _globals['_MAPPING']._serialized_start=2232 _globals['_MAPPING']._serialized_end=2459 _globals['_LOCATION']._serialized_start=2462 _globals['_LOCATION']._serialized_end=2645 _globals['_LINE']._serialized_start=2647 _globals['_LINE']._serialized_end=2707 _globals['_FUNCTION']._serialized_start=2709 _globals['_FUNCTION']._serialized_end=2819 # @@protoc_insertion_point(module_scope) profiles_pb2.pyi000066400000000000000000001242651511654350100363640ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-proto/src/opentelemetry/proto/profiles/v1development""" @generated by mypy-protobuf. Do not edit manually! isort:skip_file Copyright 2023, OpenTelemetry Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. This file includes work covered by the following copyright and permission notices: Copyright 2016 Google Inc. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ import builtins import collections.abc import google.protobuf.descriptor import google.protobuf.internal.containers import google.protobuf.internal.enum_type_wrapper import google.protobuf.message import opentelemetry.proto.common.v1.common_pb2 import opentelemetry.proto.resource.v1.resource_pb2 import sys import typing if sys.version_info >= (3, 10): import typing as typing_extensions else: import typing_extensions DESCRIPTOR: google.protobuf.descriptor.FileDescriptor class _AggregationTemporality: ValueType = typing.NewType("ValueType", builtins.int) V: typing_extensions.TypeAlias = ValueType class _AggregationTemporalityEnumTypeWrapper(google.protobuf.internal.enum_type_wrapper._EnumTypeWrapper[_AggregationTemporality.ValueType], builtins.type): DESCRIPTOR: google.protobuf.descriptor.EnumDescriptor AGGREGATION_TEMPORALITY_UNSPECIFIED: _AggregationTemporality.ValueType # 0 """UNSPECIFIED is the default AggregationTemporality, it MUST not be used.""" AGGREGATION_TEMPORALITY_DELTA: _AggregationTemporality.ValueType # 1 """* DELTA is an AggregationTemporality for a profiler which reports changes since last report time. Successive metrics contain aggregation of values from continuous and non-overlapping intervals. The values for a DELTA metric are based only on the time interval associated with one measurement cycle. There is no dependency on previous measurements like is the case for CUMULATIVE metrics. For example, consider a system measuring the number of requests that it receives and reports the sum of these requests every second as a DELTA metric: 1. The system starts receiving at time=t_0. 2. A request is received, the system measures 1 request. 3. A request is received, the system measures 1 request. 4. A request is received, the system measures 1 request. 5. The 1 second collection cycle ends. A metric is exported for the number of requests received over the interval of time t_0 to t_0+1 with a value of 3. 6. A request is received, the system measures 1 request. 7. A request is received, the system measures 1 request. 8. The 1 second collection cycle ends. A metric is exported for the number of requests received over the interval of time t_0+1 to t_0+2 with a value of 2. """ AGGREGATION_TEMPORALITY_CUMULATIVE: _AggregationTemporality.ValueType # 2 """* CUMULATIVE is an AggregationTemporality for a profiler which reports changes since a fixed start time. This means that current values of a CUMULATIVE metric depend on all previous measurements since the start time. Because of this, the sender is required to retain this state in some form. If this state is lost or invalidated, the CUMULATIVE metric values MUST be reset and a new fixed start time following the last reported measurement time sent MUST be used. For example, consider a system measuring the number of requests that it receives and reports the sum of these requests every second as a CUMULATIVE metric: 1. The system starts receiving at time=t_0. 2. A request is received, the system measures 1 request. 3. A request is received, the system measures 1 request. 4. A request is received, the system measures 1 request. 5. The 1 second collection cycle ends. A metric is exported for the number of requests received over the interval of time t_0 to t_0+1 with a value of 3. 6. A request is received, the system measures 1 request. 7. A request is received, the system measures 1 request. 8. The 1 second collection cycle ends. A metric is exported for the number of requests received over the interval of time t_0 to t_0+2 with a value of 5. 9. The system experiences a fault and loses state. 10. The system recovers and resumes receiving at time=t_1. 11. A request is received, the system measures 1 request. 12. The 1 second collection cycle ends. A metric is exported for the number of requests received over the interval of time t_1 to t_1+1 with a value of 1. Note: Even though, when reporting changes since last report time, using CUMULATIVE is valid, it is not recommended. """ class AggregationTemporality(_AggregationTemporality, metaclass=_AggregationTemporalityEnumTypeWrapper): """Specifies the method of aggregating metric values, either DELTA (change since last report) or CUMULATIVE (total since a fixed start time). """ AGGREGATION_TEMPORALITY_UNSPECIFIED: AggregationTemporality.ValueType # 0 """UNSPECIFIED is the default AggregationTemporality, it MUST not be used.""" AGGREGATION_TEMPORALITY_DELTA: AggregationTemporality.ValueType # 1 """* DELTA is an AggregationTemporality for a profiler which reports changes since last report time. Successive metrics contain aggregation of values from continuous and non-overlapping intervals. The values for a DELTA metric are based only on the time interval associated with one measurement cycle. There is no dependency on previous measurements like is the case for CUMULATIVE metrics. For example, consider a system measuring the number of requests that it receives and reports the sum of these requests every second as a DELTA metric: 1. The system starts receiving at time=t_0. 2. A request is received, the system measures 1 request. 3. A request is received, the system measures 1 request. 4. A request is received, the system measures 1 request. 5. The 1 second collection cycle ends. A metric is exported for the number of requests received over the interval of time t_0 to t_0+1 with a value of 3. 6. A request is received, the system measures 1 request. 7. A request is received, the system measures 1 request. 8. The 1 second collection cycle ends. A metric is exported for the number of requests received over the interval of time t_0+1 to t_0+2 with a value of 2. """ AGGREGATION_TEMPORALITY_CUMULATIVE: AggregationTemporality.ValueType # 2 """* CUMULATIVE is an AggregationTemporality for a profiler which reports changes since a fixed start time. This means that current values of a CUMULATIVE metric depend on all previous measurements since the start time. Because of this, the sender is required to retain this state in some form. If this state is lost or invalidated, the CUMULATIVE metric values MUST be reset and a new fixed start time following the last reported measurement time sent MUST be used. For example, consider a system measuring the number of requests that it receives and reports the sum of these requests every second as a CUMULATIVE metric: 1. The system starts receiving at time=t_0. 2. A request is received, the system measures 1 request. 3. A request is received, the system measures 1 request. 4. A request is received, the system measures 1 request. 5. The 1 second collection cycle ends. A metric is exported for the number of requests received over the interval of time t_0 to t_0+1 with a value of 3. 6. A request is received, the system measures 1 request. 7. A request is received, the system measures 1 request. 8. The 1 second collection cycle ends. A metric is exported for the number of requests received over the interval of time t_0 to t_0+2 with a value of 5. 9. The system experiences a fault and loses state. 10. The system recovers and resumes receiving at time=t_1. 11. A request is received, the system measures 1 request. 12. The 1 second collection cycle ends. A metric is exported for the number of requests received over the interval of time t_1 to t_1+1 with a value of 1. Note: Even though, when reporting changes since last report time, using CUMULATIVE is valid, it is not recommended. """ global___AggregationTemporality = AggregationTemporality @typing_extensions.final class ProfilesDictionary(google.protobuf.message.Message): """ Relationships Diagram ┌──────────────────┐ LEGEND │ ProfilesData │ ─────┐ └──────────────────┘ │ ─────▶ embedded │ │ │ 1-n │ ─────▷ referenced by index ▼ ▼ ┌──────────────────┐ ┌────────────────────┐ │ ResourceProfiles │ │ ProfilesDictionary │ └──────────────────┘ └────────────────────┘ │ │ 1-n ▼ ┌──────────────────┐ │ ScopeProfiles │ └──────────────────┘ │ │ 1-1 ▼ ┌──────────────────┐ │ Profile │ └──────────────────┘ │ n-1 │ 1-n ┌───────────────────────────────────────┐ ▼ │ ▽ ┌──────────────────┐ 1-n ┌──────────────┐ ┌──────────┐ │ Sample │ ──────▷ │ KeyValue │ │ Link │ └──────────────────┘ └──────────────┘ └──────────┘ │ 1-n △ △ │ 1-n ┌─────────────────┘ │ 1-n ▽ │ │ ┌──────────────────┐ n-1 ┌──────────────┐ │ Location │ ──────▷ │ Mapping │ └──────────────────┘ └──────────────┘ │ │ 1-n ▼ ┌──────────────────┐ │ Line │ └──────────────────┘ │ │ 1-1 ▽ ┌──────────────────┐ │ Function │ └──────────────────┘ ProfilesDictionary represents the profiles data shared across the entire message being sent. """ DESCRIPTOR: google.protobuf.descriptor.Descriptor MAPPING_TABLE_FIELD_NUMBER: builtins.int LOCATION_TABLE_FIELD_NUMBER: builtins.int FUNCTION_TABLE_FIELD_NUMBER: builtins.int LINK_TABLE_FIELD_NUMBER: builtins.int STRING_TABLE_FIELD_NUMBER: builtins.int ATTRIBUTE_TABLE_FIELD_NUMBER: builtins.int ATTRIBUTE_UNITS_FIELD_NUMBER: builtins.int @property def mapping_table(self) -> google.protobuf.internal.containers.RepeatedCompositeFieldContainer[global___Mapping]: """Mappings from address ranges to the image/binary/library mapped into that address range referenced by locations via Location.mapping_index. """ @property def location_table(self) -> google.protobuf.internal.containers.RepeatedCompositeFieldContainer[global___Location]: """Locations referenced by samples via Profile.location_indices.""" @property def function_table(self) -> google.protobuf.internal.containers.RepeatedCompositeFieldContainer[global___Function]: """Functions referenced by locations via Line.function_index.""" @property def link_table(self) -> google.protobuf.internal.containers.RepeatedCompositeFieldContainer[global___Link]: """Links referenced by samples via Sample.link_index.""" @property def string_table(self) -> google.protobuf.internal.containers.RepeatedScalarFieldContainer[builtins.str]: """A common table for strings referenced by various messages. string_table[0] must always be "". """ @property def attribute_table(self) -> google.protobuf.internal.containers.RepeatedCompositeFieldContainer[opentelemetry.proto.common.v1.common_pb2.KeyValue]: """A common table for attributes referenced by various messages.""" @property def attribute_units(self) -> google.protobuf.internal.containers.RepeatedCompositeFieldContainer[global___AttributeUnit]: """Represents a mapping between Attribute Keys and Units.""" def __init__( self, *, mapping_table: collections.abc.Iterable[global___Mapping] | None = ..., location_table: collections.abc.Iterable[global___Location] | None = ..., function_table: collections.abc.Iterable[global___Function] | None = ..., link_table: collections.abc.Iterable[global___Link] | None = ..., string_table: collections.abc.Iterable[builtins.str] | None = ..., attribute_table: collections.abc.Iterable[opentelemetry.proto.common.v1.common_pb2.KeyValue] | None = ..., attribute_units: collections.abc.Iterable[global___AttributeUnit] | None = ..., ) -> None: ... def ClearField(self, field_name: typing_extensions.Literal["attribute_table", b"attribute_table", "attribute_units", b"attribute_units", "function_table", b"function_table", "link_table", b"link_table", "location_table", b"location_table", "mapping_table", b"mapping_table", "string_table", b"string_table"]) -> None: ... global___ProfilesDictionary = ProfilesDictionary @typing_extensions.final class ProfilesData(google.protobuf.message.Message): """ProfilesData represents the profiles data that can be stored in persistent storage, OR can be embedded by other protocols that transfer OTLP profiles data but do not implement the OTLP protocol. The main difference between this message and collector protocol is that in this message there will not be any "control" or "metadata" specific to OTLP protocol. When new fields are added into this message, the OTLP request MUST be updated as well. """ DESCRIPTOR: google.protobuf.descriptor.Descriptor RESOURCE_PROFILES_FIELD_NUMBER: builtins.int DICTIONARY_FIELD_NUMBER: builtins.int @property def resource_profiles(self) -> google.protobuf.internal.containers.RepeatedCompositeFieldContainer[global___ResourceProfiles]: """An array of ResourceProfiles. For data coming from an SDK profiler, this array will typically contain one element. Host-level profilers will usually create one ResourceProfile per container, as well as one additional ResourceProfile grouping all samples from non-containerized processes. Other resource groupings are possible as well and clarified via Resource.attributes and semantic conventions. """ @property def dictionary(self) -> global___ProfilesDictionary: """One instance of ProfilesDictionary""" def __init__( self, *, resource_profiles: collections.abc.Iterable[global___ResourceProfiles] | None = ..., dictionary: global___ProfilesDictionary | None = ..., ) -> None: ... def HasField(self, field_name: typing_extensions.Literal["dictionary", b"dictionary"]) -> builtins.bool: ... def ClearField(self, field_name: typing_extensions.Literal["dictionary", b"dictionary", "resource_profiles", b"resource_profiles"]) -> None: ... global___ProfilesData = ProfilesData @typing_extensions.final class ResourceProfiles(google.protobuf.message.Message): """A collection of ScopeProfiles from a Resource.""" DESCRIPTOR: google.protobuf.descriptor.Descriptor RESOURCE_FIELD_NUMBER: builtins.int SCOPE_PROFILES_FIELD_NUMBER: builtins.int SCHEMA_URL_FIELD_NUMBER: builtins.int @property def resource(self) -> opentelemetry.proto.resource.v1.resource_pb2.Resource: """The resource for the profiles in this message. If this field is not set then no resource info is known. """ @property def scope_profiles(self) -> google.protobuf.internal.containers.RepeatedCompositeFieldContainer[global___ScopeProfiles]: """A list of ScopeProfiles that originate from a resource.""" schema_url: builtins.str """The Schema URL, if known. This is the identifier of the Schema that the resource data is recorded in. Notably, the last part of the URL path is the version number of the schema: http[s]://server[:port]/path/. To learn more about Schema URL see https://opentelemetry.io/docs/specs/otel/schemas/#schema-url This schema_url applies to the data in the "resource" field. It does not apply to the data in the "scope_profiles" field which have their own schema_url field. """ def __init__( self, *, resource: opentelemetry.proto.resource.v1.resource_pb2.Resource | None = ..., scope_profiles: collections.abc.Iterable[global___ScopeProfiles] | None = ..., schema_url: builtins.str = ..., ) -> None: ... def HasField(self, field_name: typing_extensions.Literal["resource", b"resource"]) -> builtins.bool: ... def ClearField(self, field_name: typing_extensions.Literal["resource", b"resource", "schema_url", b"schema_url", "scope_profiles", b"scope_profiles"]) -> None: ... global___ResourceProfiles = ResourceProfiles @typing_extensions.final class ScopeProfiles(google.protobuf.message.Message): """A collection of Profiles produced by an InstrumentationScope.""" DESCRIPTOR: google.protobuf.descriptor.Descriptor SCOPE_FIELD_NUMBER: builtins.int PROFILES_FIELD_NUMBER: builtins.int SCHEMA_URL_FIELD_NUMBER: builtins.int @property def scope(self) -> opentelemetry.proto.common.v1.common_pb2.InstrumentationScope: """The instrumentation scope information for the profiles in this message. Semantically when InstrumentationScope isn't set, it is equivalent with an empty instrumentation scope name (unknown). """ @property def profiles(self) -> google.protobuf.internal.containers.RepeatedCompositeFieldContainer[global___Profile]: """A list of Profiles that originate from an instrumentation scope.""" schema_url: builtins.str """The Schema URL, if known. This is the identifier of the Schema that the profile data is recorded in. Notably, the last part of the URL path is the version number of the schema: http[s]://server[:port]/path/. To learn more about Schema URL see https://opentelemetry.io/docs/specs/otel/schemas/#schema-url This schema_url applies to all profiles in the "profiles" field. """ def __init__( self, *, scope: opentelemetry.proto.common.v1.common_pb2.InstrumentationScope | None = ..., profiles: collections.abc.Iterable[global___Profile] | None = ..., schema_url: builtins.str = ..., ) -> None: ... def HasField(self, field_name: typing_extensions.Literal["scope", b"scope"]) -> builtins.bool: ... def ClearField(self, field_name: typing_extensions.Literal["profiles", b"profiles", "schema_url", b"schema_url", "scope", b"scope"]) -> None: ... global___ScopeProfiles = ScopeProfiles @typing_extensions.final class Profile(google.protobuf.message.Message): """Profile is a common stacktrace profile format. Measurements represented with this format should follow the following conventions: - Consumers should treat unset optional fields as if they had been set with their default value. - When possible, measurements should be stored in "unsampled" form that is most useful to humans. There should be enough information present to determine the original sampled values. - On-disk, the serialized proto must be gzip-compressed. - The profile is represented as a set of samples, where each sample references a sequence of locations, and where each location belongs to a mapping. - There is a N->1 relationship from sample.location_id entries to locations. For every sample.location_id entry there must be a unique Location with that index. - There is an optional N->1 relationship from locations to mappings. For every nonzero Location.mapping_id there must be a unique Mapping with that index. Represents a complete profile, including sample types, samples, mappings to binaries, locations, functions, string table, and additional metadata. It modifies and annotates pprof Profile with OpenTelemetry specific fields. Note that whilst fields in this message retain the name and field id from pprof in most cases for ease of understanding data migration, it is not intended that pprof:Profile and OpenTelemetry:Profile encoding be wire compatible. """ DESCRIPTOR: google.protobuf.descriptor.Descriptor SAMPLE_TYPE_FIELD_NUMBER: builtins.int SAMPLE_FIELD_NUMBER: builtins.int LOCATION_INDICES_FIELD_NUMBER: builtins.int TIME_NANOS_FIELD_NUMBER: builtins.int DURATION_NANOS_FIELD_NUMBER: builtins.int PERIOD_TYPE_FIELD_NUMBER: builtins.int PERIOD_FIELD_NUMBER: builtins.int COMMENT_STRINDICES_FIELD_NUMBER: builtins.int DEFAULT_SAMPLE_TYPE_INDEX_FIELD_NUMBER: builtins.int PROFILE_ID_FIELD_NUMBER: builtins.int DROPPED_ATTRIBUTES_COUNT_FIELD_NUMBER: builtins.int ORIGINAL_PAYLOAD_FORMAT_FIELD_NUMBER: builtins.int ORIGINAL_PAYLOAD_FIELD_NUMBER: builtins.int ATTRIBUTE_INDICES_FIELD_NUMBER: builtins.int @property def sample_type(self) -> google.protobuf.internal.containers.RepeatedCompositeFieldContainer[global___ValueType]: """A description of the samples associated with each Sample.value. For a cpu profile this might be: [["cpu","nanoseconds"]] or [["wall","seconds"]] or [["syscall","count"]] For a heap profile, this might be: [["allocations","count"], ["space","bytes"]], If one of the values represents the number of events represented by the sample, by convention it should be at index 0 and use sample_type.unit == "count". """ @property def sample(self) -> google.protobuf.internal.containers.RepeatedCompositeFieldContainer[global___Sample]: """The set of samples recorded in this profile.""" @property def location_indices(self) -> google.protobuf.internal.containers.RepeatedScalarFieldContainer[builtins.int]: """References to locations in ProfilesDictionary.location_table.""" time_nanos: builtins.int """The following fields 4-14 are informational, do not affect interpretation of results. Time of collection (UTC) represented as nanoseconds past the epoch. """ duration_nanos: builtins.int """Duration of the profile, if a duration makes sense.""" @property def period_type(self) -> global___ValueType: """The kind of events between sampled occurrences. e.g [ "cpu","cycles" ] or [ "heap","bytes" ] """ period: builtins.int """The number of events between sampled occurrences.""" @property def comment_strindices(self) -> google.protobuf.internal.containers.RepeatedScalarFieldContainer[builtins.int]: """Free-form text associated with the profile. The text is displayed as is to the user by the tools that read profiles (e.g. by pprof). This field should not be used to store any machine-readable information, it is only for human-friendly content. The profile must stay functional if this field is cleaned. Indices into ProfilesDictionary.string_table. """ default_sample_type_index: builtins.int """Index into the sample_type array to the default sample type.""" profile_id: builtins.bytes """A globally unique identifier for a profile. The ID is a 16-byte array. An ID with all zeroes is considered invalid. This field is required. """ dropped_attributes_count: builtins.int """dropped_attributes_count is the number of attributes that were discarded. Attributes can be discarded because their keys are too long or because there are too many attributes. If this value is 0, then no attributes were dropped. """ original_payload_format: builtins.str """Specifies format of the original payload. Common values are defined in semantic conventions. [required if original_payload is present]""" original_payload: builtins.bytes """Original payload can be stored in this field. This can be useful for users who want to get the original payload. Formats such as JFR are highly extensible and can contain more information than what is defined in this spec. Inclusion of original payload should be configurable by the user. Default behavior should be to not include the original payload. If the original payload is in pprof format, it SHOULD not be included in this field. The field is optional, however if it is present then equivalent converted data should be populated in other fields of this message as far as is practicable. """ @property def attribute_indices(self) -> google.protobuf.internal.containers.RepeatedScalarFieldContainer[builtins.int]: """References to attributes in attribute_table. [optional] It is a collection of key/value pairs. Note, global attributes like server name can be set using the resource API. Examples of attributes: "/http/user_agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36" "/http/server_latency": 300 "abc.com/myattribute": true "abc.com/score": 10.239 The OpenTelemetry API specification further restricts the allowed value types: https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/common/README.md#attribute Attribute keys MUST be unique (it is not allowed to have more than one attribute with the same key). """ def __init__( self, *, sample_type: collections.abc.Iterable[global___ValueType] | None = ..., sample: collections.abc.Iterable[global___Sample] | None = ..., location_indices: collections.abc.Iterable[builtins.int] | None = ..., time_nanos: builtins.int = ..., duration_nanos: builtins.int = ..., period_type: global___ValueType | None = ..., period: builtins.int = ..., comment_strindices: collections.abc.Iterable[builtins.int] | None = ..., default_sample_type_index: builtins.int = ..., profile_id: builtins.bytes = ..., dropped_attributes_count: builtins.int = ..., original_payload_format: builtins.str = ..., original_payload: builtins.bytes = ..., attribute_indices: collections.abc.Iterable[builtins.int] | None = ..., ) -> None: ... def HasField(self, field_name: typing_extensions.Literal["period_type", b"period_type"]) -> builtins.bool: ... def ClearField(self, field_name: typing_extensions.Literal["attribute_indices", b"attribute_indices", "comment_strindices", b"comment_strindices", "default_sample_type_index", b"default_sample_type_index", "dropped_attributes_count", b"dropped_attributes_count", "duration_nanos", b"duration_nanos", "location_indices", b"location_indices", "original_payload", b"original_payload", "original_payload_format", b"original_payload_format", "period", b"period", "period_type", b"period_type", "profile_id", b"profile_id", "sample", b"sample", "sample_type", b"sample_type", "time_nanos", b"time_nanos"]) -> None: ... global___Profile = Profile @typing_extensions.final class AttributeUnit(google.protobuf.message.Message): """Represents a mapping between Attribute Keys and Units.""" DESCRIPTOR: google.protobuf.descriptor.Descriptor ATTRIBUTE_KEY_STRINDEX_FIELD_NUMBER: builtins.int UNIT_STRINDEX_FIELD_NUMBER: builtins.int attribute_key_strindex: builtins.int """Index into string table.""" unit_strindex: builtins.int """Index into string table.""" def __init__( self, *, attribute_key_strindex: builtins.int = ..., unit_strindex: builtins.int = ..., ) -> None: ... def ClearField(self, field_name: typing_extensions.Literal["attribute_key_strindex", b"attribute_key_strindex", "unit_strindex", b"unit_strindex"]) -> None: ... global___AttributeUnit = AttributeUnit @typing_extensions.final class Link(google.protobuf.message.Message): """A pointer from a profile Sample to a trace Span. Connects a profile sample to a trace span, identified by unique trace and span IDs. """ DESCRIPTOR: google.protobuf.descriptor.Descriptor TRACE_ID_FIELD_NUMBER: builtins.int SPAN_ID_FIELD_NUMBER: builtins.int trace_id: builtins.bytes """A unique identifier of a trace that this linked span is part of. The ID is a 16-byte array. """ span_id: builtins.bytes """A unique identifier for the linked span. The ID is an 8-byte array.""" def __init__( self, *, trace_id: builtins.bytes = ..., span_id: builtins.bytes = ..., ) -> None: ... def ClearField(self, field_name: typing_extensions.Literal["span_id", b"span_id", "trace_id", b"trace_id"]) -> None: ... global___Link = Link @typing_extensions.final class ValueType(google.protobuf.message.Message): """ValueType describes the type and units of a value, with an optional aggregation temporality.""" DESCRIPTOR: google.protobuf.descriptor.Descriptor TYPE_STRINDEX_FIELD_NUMBER: builtins.int UNIT_STRINDEX_FIELD_NUMBER: builtins.int AGGREGATION_TEMPORALITY_FIELD_NUMBER: builtins.int type_strindex: builtins.int """Index into ProfilesDictionary.string_table.""" unit_strindex: builtins.int """Index into ProfilesDictionary.string_table.""" aggregation_temporality: global___AggregationTemporality.ValueType def __init__( self, *, type_strindex: builtins.int = ..., unit_strindex: builtins.int = ..., aggregation_temporality: global___AggregationTemporality.ValueType = ..., ) -> None: ... def ClearField(self, field_name: typing_extensions.Literal["aggregation_temporality", b"aggregation_temporality", "type_strindex", b"type_strindex", "unit_strindex", b"unit_strindex"]) -> None: ... global___ValueType = ValueType @typing_extensions.final class Sample(google.protobuf.message.Message): """Each Sample records values encountered in some program context. The program context is typically a stack trace, perhaps augmented with auxiliary information like the thread-id, some indicator of a higher level request being handled etc. """ DESCRIPTOR: google.protobuf.descriptor.Descriptor LOCATIONS_START_INDEX_FIELD_NUMBER: builtins.int LOCATIONS_LENGTH_FIELD_NUMBER: builtins.int VALUE_FIELD_NUMBER: builtins.int ATTRIBUTE_INDICES_FIELD_NUMBER: builtins.int LINK_INDEX_FIELD_NUMBER: builtins.int TIMESTAMPS_UNIX_NANO_FIELD_NUMBER: builtins.int locations_start_index: builtins.int """locations_start_index along with locations_length refers to to a slice of locations in Profile.location_indices.""" locations_length: builtins.int """locations_length along with locations_start_index refers to a slice of locations in Profile.location_indices. Supersedes location_index. """ @property def value(self) -> google.protobuf.internal.containers.RepeatedScalarFieldContainer[builtins.int]: """The type and unit of each value is defined by the corresponding entry in Profile.sample_type. All samples must have the same number of values, the same as the length of Profile.sample_type. When aggregating multiple samples into a single sample, the result has a list of values that is the element-wise sum of the lists of the originals. """ @property def attribute_indices(self) -> google.protobuf.internal.containers.RepeatedScalarFieldContainer[builtins.int]: """References to attributes in ProfilesDictionary.attribute_table. [optional]""" link_index: builtins.int """Reference to link in ProfilesDictionary.link_table. [optional]""" @property def timestamps_unix_nano(self) -> google.protobuf.internal.containers.RepeatedScalarFieldContainer[builtins.int]: """Timestamps associated with Sample represented in nanoseconds. These timestamps are expected to fall within the Profile's time range. [optional] """ def __init__( self, *, locations_start_index: builtins.int = ..., locations_length: builtins.int = ..., value: collections.abc.Iterable[builtins.int] | None = ..., attribute_indices: collections.abc.Iterable[builtins.int] | None = ..., link_index: builtins.int | None = ..., timestamps_unix_nano: collections.abc.Iterable[builtins.int] | None = ..., ) -> None: ... def HasField(self, field_name: typing_extensions.Literal["_link_index", b"_link_index", "link_index", b"link_index"]) -> builtins.bool: ... def ClearField(self, field_name: typing_extensions.Literal["_link_index", b"_link_index", "attribute_indices", b"attribute_indices", "link_index", b"link_index", "locations_length", b"locations_length", "locations_start_index", b"locations_start_index", "timestamps_unix_nano", b"timestamps_unix_nano", "value", b"value"]) -> None: ... def WhichOneof(self, oneof_group: typing_extensions.Literal["_link_index", b"_link_index"]) -> typing_extensions.Literal["link_index"] | None: ... global___Sample = Sample @typing_extensions.final class Mapping(google.protobuf.message.Message): """Describes the mapping of a binary in memory, including its address range, file offset, and metadata like build ID """ DESCRIPTOR: google.protobuf.descriptor.Descriptor MEMORY_START_FIELD_NUMBER: builtins.int MEMORY_LIMIT_FIELD_NUMBER: builtins.int FILE_OFFSET_FIELD_NUMBER: builtins.int FILENAME_STRINDEX_FIELD_NUMBER: builtins.int ATTRIBUTE_INDICES_FIELD_NUMBER: builtins.int HAS_FUNCTIONS_FIELD_NUMBER: builtins.int HAS_FILENAMES_FIELD_NUMBER: builtins.int HAS_LINE_NUMBERS_FIELD_NUMBER: builtins.int HAS_INLINE_FRAMES_FIELD_NUMBER: builtins.int memory_start: builtins.int """Address at which the binary (or DLL) is loaded into memory.""" memory_limit: builtins.int """The limit of the address range occupied by this mapping.""" file_offset: builtins.int """Offset in the binary that corresponds to the first mapped address.""" filename_strindex: builtins.int """The object this entry is loaded from. This can be a filename on disk for the main binary and shared libraries, or virtual abstractions like "[vdso]". Index into ProfilesDictionary.string_table. """ @property def attribute_indices(self) -> google.protobuf.internal.containers.RepeatedScalarFieldContainer[builtins.int]: """References to attributes in ProfilesDictionary.attribute_table. [optional]""" has_functions: builtins.bool """The following fields indicate the resolution of symbolic info.""" has_filenames: builtins.bool has_line_numbers: builtins.bool has_inline_frames: builtins.bool def __init__( self, *, memory_start: builtins.int = ..., memory_limit: builtins.int = ..., file_offset: builtins.int = ..., filename_strindex: builtins.int = ..., attribute_indices: collections.abc.Iterable[builtins.int] | None = ..., has_functions: builtins.bool = ..., has_filenames: builtins.bool = ..., has_line_numbers: builtins.bool = ..., has_inline_frames: builtins.bool = ..., ) -> None: ... def ClearField(self, field_name: typing_extensions.Literal["attribute_indices", b"attribute_indices", "file_offset", b"file_offset", "filename_strindex", b"filename_strindex", "has_filenames", b"has_filenames", "has_functions", b"has_functions", "has_inline_frames", b"has_inline_frames", "has_line_numbers", b"has_line_numbers", "memory_limit", b"memory_limit", "memory_start", b"memory_start"]) -> None: ... global___Mapping = Mapping @typing_extensions.final class Location(google.protobuf.message.Message): """Describes function and line table debug information.""" DESCRIPTOR: google.protobuf.descriptor.Descriptor MAPPING_INDEX_FIELD_NUMBER: builtins.int ADDRESS_FIELD_NUMBER: builtins.int LINE_FIELD_NUMBER: builtins.int IS_FOLDED_FIELD_NUMBER: builtins.int ATTRIBUTE_INDICES_FIELD_NUMBER: builtins.int mapping_index: builtins.int """Reference to mapping in ProfilesDictionary.mapping_table. It can be unset if the mapping is unknown or not applicable for this profile type. """ address: builtins.int """The instruction address for this location, if available. It should be within [Mapping.memory_start...Mapping.memory_limit] for the corresponding mapping. A non-leaf address may be in the middle of a call instruction. It is up to display tools to find the beginning of the instruction if necessary. """ @property def line(self) -> google.protobuf.internal.containers.RepeatedCompositeFieldContainer[global___Line]: """Multiple line indicates this location has inlined functions, where the last entry represents the caller into which the preceding entries were inlined. E.g., if memcpy() is inlined into printf: line[0].function_name == "memcpy" line[1].function_name == "printf" """ is_folded: builtins.bool """Provides an indication that multiple symbols map to this location's address, for example due to identical code folding by the linker. In that case the line information above represents one of the multiple symbols. This field must be recomputed when the symbolization state of the profile changes. """ @property def attribute_indices(self) -> google.protobuf.internal.containers.RepeatedScalarFieldContainer[builtins.int]: """References to attributes in ProfilesDictionary.attribute_table. [optional]""" def __init__( self, *, mapping_index: builtins.int | None = ..., address: builtins.int = ..., line: collections.abc.Iterable[global___Line] | None = ..., is_folded: builtins.bool = ..., attribute_indices: collections.abc.Iterable[builtins.int] | None = ..., ) -> None: ... def HasField(self, field_name: typing_extensions.Literal["_mapping_index", b"_mapping_index", "mapping_index", b"mapping_index"]) -> builtins.bool: ... def ClearField(self, field_name: typing_extensions.Literal["_mapping_index", b"_mapping_index", "address", b"address", "attribute_indices", b"attribute_indices", "is_folded", b"is_folded", "line", b"line", "mapping_index", b"mapping_index"]) -> None: ... def WhichOneof(self, oneof_group: typing_extensions.Literal["_mapping_index", b"_mapping_index"]) -> typing_extensions.Literal["mapping_index"] | None: ... global___Location = Location @typing_extensions.final class Line(google.protobuf.message.Message): """Details a specific line in a source code, linked to a function.""" DESCRIPTOR: google.protobuf.descriptor.Descriptor FUNCTION_INDEX_FIELD_NUMBER: builtins.int LINE_FIELD_NUMBER: builtins.int COLUMN_FIELD_NUMBER: builtins.int function_index: builtins.int """Reference to function in ProfilesDictionary.function_table.""" line: builtins.int """Line number in source code. 0 means unset.""" column: builtins.int """Column number in source code. 0 means unset.""" def __init__( self, *, function_index: builtins.int = ..., line: builtins.int = ..., column: builtins.int = ..., ) -> None: ... def ClearField(self, field_name: typing_extensions.Literal["column", b"column", "function_index", b"function_index", "line", b"line"]) -> None: ... global___Line = Line @typing_extensions.final class Function(google.protobuf.message.Message): """Describes a function, including its human-readable name, system name, source file, and starting line number in the source. """ DESCRIPTOR: google.protobuf.descriptor.Descriptor NAME_STRINDEX_FIELD_NUMBER: builtins.int SYSTEM_NAME_STRINDEX_FIELD_NUMBER: builtins.int FILENAME_STRINDEX_FIELD_NUMBER: builtins.int START_LINE_FIELD_NUMBER: builtins.int name_strindex: builtins.int """Function name. Empty string if not available.""" system_name_strindex: builtins.int """Function name, as identified by the system. For instance, it can be a C++ mangled name. Empty string if not available. """ filename_strindex: builtins.int """Source file containing the function. Empty string if not available.""" start_line: builtins.int """Line number in source file. 0 means unset.""" def __init__( self, *, name_strindex: builtins.int = ..., system_name_strindex: builtins.int = ..., filename_strindex: builtins.int = ..., start_line: builtins.int = ..., ) -> None: ... def ClearField(self, field_name: typing_extensions.Literal["filename_strindex", b"filename_strindex", "name_strindex", b"name_strindex", "start_line", b"start_line", "system_name_strindex", b"system_name_strindex"]) -> None: ... global___Function = Function python-opentelemetry-1.39.1/opentelemetry-proto/src/opentelemetry/proto/py.typed000066400000000000000000000000001511654350100303710ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-proto/src/opentelemetry/proto/resource/000077500000000000000000000000001511654350100305335ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-proto/src/opentelemetry/proto/resource/__init__.py000066400000000000000000000000001511654350100326320ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-proto/src/opentelemetry/proto/resource/v1/000077500000000000000000000000001511654350100310615ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-proto/src/opentelemetry/proto/resource/v1/__init__.py000066400000000000000000000000001511654350100331600ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-proto/src/opentelemetry/proto/resource/v1/resource_pb2.py000066400000000000000000000035101511654350100340240ustar00rootroot00000000000000# -*- coding: utf-8 -*- # Generated by the protocol buffer compiler. DO NOT EDIT! # source: opentelemetry/proto/resource/v1/resource.proto # Protobuf Python Version: 5.26.1 """Generated protocol buffer code.""" from google.protobuf import descriptor as _descriptor from google.protobuf import descriptor_pool as _descriptor_pool from google.protobuf import symbol_database as _symbol_database from google.protobuf.internal import builder as _builder # @@protoc_insertion_point(imports) _sym_db = _symbol_database.Default() from opentelemetry.proto.common.v1 import common_pb2 as opentelemetry_dot_proto_dot_common_dot_v1_dot_common__pb2 DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n.opentelemetry/proto/resource/v1/resource.proto\x12\x1fopentelemetry.proto.resource.v1\x1a*opentelemetry/proto/common/v1/common.proto\"\xa8\x01\n\x08Resource\x12;\n\nattributes\x18\x01 \x03(\x0b\x32\'.opentelemetry.proto.common.v1.KeyValue\x12 \n\x18\x64ropped_attributes_count\x18\x02 \x01(\r\x12=\n\x0b\x65ntity_refs\x18\x03 \x03(\x0b\x32(.opentelemetry.proto.common.v1.EntityRefB\x83\x01\n\"io.opentelemetry.proto.resource.v1B\rResourceProtoP\x01Z*go.opentelemetry.io/proto/otlp/resource/v1\xaa\x02\x1fOpenTelemetry.Proto.Resource.V1b\x06proto3') _globals = globals() _builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) _builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'opentelemetry.proto.resource.v1.resource_pb2', _globals) if not _descriptor._USE_C_DESCRIPTORS: _globals['DESCRIPTOR']._loaded_options = None _globals['DESCRIPTOR']._serialized_options = b'\n\"io.opentelemetry.proto.resource.v1B\rResourceProtoP\001Z*go.opentelemetry.io/proto/otlp/resource/v1\252\002\037OpenTelemetry.Proto.Resource.V1' _globals['_RESOURCE']._serialized_start=128 _globals['_RESOURCE']._serialized_end=296 # @@protoc_insertion_point(module_scope) python-opentelemetry-1.39.1/opentelemetry-proto/src/opentelemetry/proto/resource/v1/resource_pb2.pyi000066400000000000000000000052711511654350100342030ustar00rootroot00000000000000""" @generated by mypy-protobuf. Do not edit manually! isort:skip_file Copyright 2019, OpenTelemetry Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ import builtins import collections.abc import google.protobuf.descriptor import google.protobuf.internal.containers import google.protobuf.message import opentelemetry.proto.common.v1.common_pb2 import sys if sys.version_info >= (3, 8): import typing as typing_extensions else: import typing_extensions DESCRIPTOR: google.protobuf.descriptor.FileDescriptor @typing_extensions.final class Resource(google.protobuf.message.Message): """Resource information.""" DESCRIPTOR: google.protobuf.descriptor.Descriptor ATTRIBUTES_FIELD_NUMBER: builtins.int DROPPED_ATTRIBUTES_COUNT_FIELD_NUMBER: builtins.int ENTITY_REFS_FIELD_NUMBER: builtins.int @property def attributes(self) -> google.protobuf.internal.containers.RepeatedCompositeFieldContainer[opentelemetry.proto.common.v1.common_pb2.KeyValue]: """Set of attributes that describe the resource. Attribute keys MUST be unique (it is not allowed to have more than one attribute with the same key). """ dropped_attributes_count: builtins.int """dropped_attributes_count is the number of dropped attributes. If the value is 0, then no attributes were dropped. """ @property def entity_refs(self) -> google.protobuf.internal.containers.RepeatedCompositeFieldContainer[opentelemetry.proto.common.v1.common_pb2.EntityRef]: """Set of entities that participate in this Resource. Note: keys in the references MUST exist in attributes of this message. Status: [Development] """ def __init__( self, *, attributes: collections.abc.Iterable[opentelemetry.proto.common.v1.common_pb2.KeyValue] | None = ..., dropped_attributes_count: builtins.int = ..., entity_refs: collections.abc.Iterable[opentelemetry.proto.common.v1.common_pb2.EntityRef] | None = ..., ) -> None: ... def ClearField(self, field_name: typing_extensions.Literal["attributes", b"attributes", "dropped_attributes_count", b"dropped_attributes_count", "entity_refs", b"entity_refs"]) -> None: ... global___Resource = Resource python-opentelemetry-1.39.1/opentelemetry-proto/src/opentelemetry/proto/trace/000077500000000000000000000000001511654350100300025ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-proto/src/opentelemetry/proto/trace/__init__.py000066400000000000000000000000001511654350100321010ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-proto/src/opentelemetry/proto/trace/v1/000077500000000000000000000000001511654350100303305ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-proto/src/opentelemetry/proto/trace/v1/__init__.py000066400000000000000000000000001511654350100324270ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-proto/src/opentelemetry/proto/trace/v1/trace_pb2.py000066400000000000000000000125651511654350100325540ustar00rootroot00000000000000# -*- coding: utf-8 -*- # Generated by the protocol buffer compiler. DO NOT EDIT! # source: opentelemetry/proto/trace/v1/trace.proto # Protobuf Python Version: 5.26.1 """Generated protocol buffer code.""" from google.protobuf import descriptor as _descriptor from google.protobuf import descriptor_pool as _descriptor_pool from google.protobuf import symbol_database as _symbol_database from google.protobuf.internal import builder as _builder # @@protoc_insertion_point(imports) _sym_db = _symbol_database.Default() from opentelemetry.proto.common.v1 import common_pb2 as opentelemetry_dot_proto_dot_common_dot_v1_dot_common__pb2 from opentelemetry.proto.resource.v1 import resource_pb2 as opentelemetry_dot_proto_dot_resource_dot_v1_dot_resource__pb2 DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n(opentelemetry/proto/trace/v1/trace.proto\x12\x1copentelemetry.proto.trace.v1\x1a*opentelemetry/proto/common/v1/common.proto\x1a.opentelemetry/proto/resource/v1/resource.proto\"Q\n\nTracesData\x12\x43\n\x0eresource_spans\x18\x01 \x03(\x0b\x32+.opentelemetry.proto.trace.v1.ResourceSpans\"\xa7\x01\n\rResourceSpans\x12;\n\x08resource\x18\x01 \x01(\x0b\x32).opentelemetry.proto.resource.v1.Resource\x12=\n\x0bscope_spans\x18\x02 \x03(\x0b\x32(.opentelemetry.proto.trace.v1.ScopeSpans\x12\x12\n\nschema_url\x18\x03 \x01(\tJ\x06\x08\xe8\x07\x10\xe9\x07\"\x97\x01\n\nScopeSpans\x12\x42\n\x05scope\x18\x01 \x01(\x0b\x32\x33.opentelemetry.proto.common.v1.InstrumentationScope\x12\x31\n\x05spans\x18\x02 \x03(\x0b\x32\".opentelemetry.proto.trace.v1.Span\x12\x12\n\nschema_url\x18\x03 \x01(\t\"\x84\x08\n\x04Span\x12\x10\n\x08trace_id\x18\x01 \x01(\x0c\x12\x0f\n\x07span_id\x18\x02 \x01(\x0c\x12\x13\n\x0btrace_state\x18\x03 \x01(\t\x12\x16\n\x0eparent_span_id\x18\x04 \x01(\x0c\x12\r\n\x05\x66lags\x18\x10 \x01(\x07\x12\x0c\n\x04name\x18\x05 \x01(\t\x12\x39\n\x04kind\x18\x06 \x01(\x0e\x32+.opentelemetry.proto.trace.v1.Span.SpanKind\x12\x1c\n\x14start_time_unix_nano\x18\x07 \x01(\x06\x12\x1a\n\x12\x65nd_time_unix_nano\x18\x08 \x01(\x06\x12;\n\nattributes\x18\t \x03(\x0b\x32\'.opentelemetry.proto.common.v1.KeyValue\x12 \n\x18\x64ropped_attributes_count\x18\n \x01(\r\x12\x38\n\x06\x65vents\x18\x0b \x03(\x0b\x32(.opentelemetry.proto.trace.v1.Span.Event\x12\x1c\n\x14\x64ropped_events_count\x18\x0c \x01(\r\x12\x36\n\x05links\x18\r \x03(\x0b\x32\'.opentelemetry.proto.trace.v1.Span.Link\x12\x1b\n\x13\x64ropped_links_count\x18\x0e \x01(\r\x12\x34\n\x06status\x18\x0f \x01(\x0b\x32$.opentelemetry.proto.trace.v1.Status\x1a\x8c\x01\n\x05\x45vent\x12\x16\n\x0etime_unix_nano\x18\x01 \x01(\x06\x12\x0c\n\x04name\x18\x02 \x01(\t\x12;\n\nattributes\x18\x03 \x03(\x0b\x32\'.opentelemetry.proto.common.v1.KeyValue\x12 \n\x18\x64ropped_attributes_count\x18\x04 \x01(\r\x1a\xac\x01\n\x04Link\x12\x10\n\x08trace_id\x18\x01 \x01(\x0c\x12\x0f\n\x07span_id\x18\x02 \x01(\x0c\x12\x13\n\x0btrace_state\x18\x03 \x01(\t\x12;\n\nattributes\x18\x04 \x03(\x0b\x32\'.opentelemetry.proto.common.v1.KeyValue\x12 \n\x18\x64ropped_attributes_count\x18\x05 \x01(\r\x12\r\n\x05\x66lags\x18\x06 \x01(\x07\"\x99\x01\n\x08SpanKind\x12\x19\n\x15SPAN_KIND_UNSPECIFIED\x10\x00\x12\x16\n\x12SPAN_KIND_INTERNAL\x10\x01\x12\x14\n\x10SPAN_KIND_SERVER\x10\x02\x12\x14\n\x10SPAN_KIND_CLIENT\x10\x03\x12\x16\n\x12SPAN_KIND_PRODUCER\x10\x04\x12\x16\n\x12SPAN_KIND_CONSUMER\x10\x05\"\xae\x01\n\x06Status\x12\x0f\n\x07message\x18\x02 \x01(\t\x12=\n\x04\x63ode\x18\x03 \x01(\x0e\x32/.opentelemetry.proto.trace.v1.Status.StatusCode\"N\n\nStatusCode\x12\x15\n\x11STATUS_CODE_UNSET\x10\x00\x12\x12\n\x0eSTATUS_CODE_OK\x10\x01\x12\x15\n\x11STATUS_CODE_ERROR\x10\x02J\x04\x08\x01\x10\x02*\x9c\x01\n\tSpanFlags\x12\x19\n\x15SPAN_FLAGS_DO_NOT_USE\x10\x00\x12 \n\x1bSPAN_FLAGS_TRACE_FLAGS_MASK\x10\xff\x01\x12*\n%SPAN_FLAGS_CONTEXT_HAS_IS_REMOTE_MASK\x10\x80\x02\x12&\n!SPAN_FLAGS_CONTEXT_IS_REMOTE_MASK\x10\x80\x04\x42w\n\x1fio.opentelemetry.proto.trace.v1B\nTraceProtoP\x01Z\'go.opentelemetry.io/proto/otlp/trace/v1\xaa\x02\x1cOpenTelemetry.Proto.Trace.V1b\x06proto3') _globals = globals() _builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) _builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'opentelemetry.proto.trace.v1.trace_pb2', _globals) if not _descriptor._USE_C_DESCRIPTORS: _globals['DESCRIPTOR']._loaded_options = None _globals['DESCRIPTOR']._serialized_options = b'\n\037io.opentelemetry.proto.trace.v1B\nTraceProtoP\001Z\'go.opentelemetry.io/proto/otlp/trace/v1\252\002\034OpenTelemetry.Proto.Trace.V1' _globals['_SPANFLAGS']._serialized_start=1782 _globals['_SPANFLAGS']._serialized_end=1938 _globals['_TRACESDATA']._serialized_start=166 _globals['_TRACESDATA']._serialized_end=247 _globals['_RESOURCESPANS']._serialized_start=250 _globals['_RESOURCESPANS']._serialized_end=417 _globals['_SCOPESPANS']._serialized_start=420 _globals['_SCOPESPANS']._serialized_end=571 _globals['_SPAN']._serialized_start=574 _globals['_SPAN']._serialized_end=1602 _globals['_SPAN_EVENT']._serialized_start=1131 _globals['_SPAN_EVENT']._serialized_end=1271 _globals['_SPAN_LINK']._serialized_start=1274 _globals['_SPAN_LINK']._serialized_end=1446 _globals['_SPAN_SPANKIND']._serialized_start=1449 _globals['_SPAN_SPANKIND']._serialized_end=1602 _globals['_STATUS']._serialized_start=1605 _globals['_STATUS']._serialized_end=1779 _globals['_STATUS_STATUSCODE']._serialized_start=1695 _globals['_STATUS_STATUSCODE']._serialized_end=1773 # @@protoc_insertion_point(module_scope) python-opentelemetry-1.39.1/opentelemetry-proto/src/opentelemetry/proto/trace/v1/trace_pb2.pyi000066400000000000000000000674131511654350100327270ustar00rootroot00000000000000""" @generated by mypy-protobuf. Do not edit manually! isort:skip_file Copyright 2019, OpenTelemetry Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ import builtins import collections.abc import google.protobuf.descriptor import google.protobuf.internal.containers import google.protobuf.internal.enum_type_wrapper import google.protobuf.message import opentelemetry.proto.common.v1.common_pb2 import opentelemetry.proto.resource.v1.resource_pb2 import sys import typing if sys.version_info >= (3, 10): import typing as typing_extensions else: import typing_extensions DESCRIPTOR: google.protobuf.descriptor.FileDescriptor class _SpanFlags: ValueType = typing.NewType("ValueType", builtins.int) V: typing_extensions.TypeAlias = ValueType class _SpanFlagsEnumTypeWrapper(google.protobuf.internal.enum_type_wrapper._EnumTypeWrapper[_SpanFlags.ValueType], builtins.type): DESCRIPTOR: google.protobuf.descriptor.EnumDescriptor SPAN_FLAGS_DO_NOT_USE: _SpanFlags.ValueType # 0 """The zero value for the enum. Should not be used for comparisons. Instead use bitwise "and" with the appropriate mask as shown above. """ SPAN_FLAGS_TRACE_FLAGS_MASK: _SpanFlags.ValueType # 255 """Bits 0-7 are used for trace flags.""" SPAN_FLAGS_CONTEXT_HAS_IS_REMOTE_MASK: _SpanFlags.ValueType # 256 """Bits 8 and 9 are used to indicate that the parent span or link span is remote. Bit 8 (`HAS_IS_REMOTE`) indicates whether the value is known. Bit 9 (`IS_REMOTE`) indicates whether the span or link is remote. """ SPAN_FLAGS_CONTEXT_IS_REMOTE_MASK: _SpanFlags.ValueType # 512 class SpanFlags(_SpanFlags, metaclass=_SpanFlagsEnumTypeWrapper): """SpanFlags represents constants used to interpret the Span.flags field, which is protobuf 'fixed32' type and is to be used as bit-fields. Each non-zero value defined in this enum is a bit-mask. To extract the bit-field, for example, use an expression like: (span.flags & SPAN_FLAGS_TRACE_FLAGS_MASK) See https://www.w3.org/TR/trace-context-2/#trace-flags for the flag definitions. Note that Span flags were introduced in version 1.1 of the OpenTelemetry protocol. Older Span producers do not set this field, consequently consumers should not rely on the absence of a particular flag bit to indicate the presence of a particular feature. """ SPAN_FLAGS_DO_NOT_USE: SpanFlags.ValueType # 0 """The zero value for the enum. Should not be used for comparisons. Instead use bitwise "and" with the appropriate mask as shown above. """ SPAN_FLAGS_TRACE_FLAGS_MASK: SpanFlags.ValueType # 255 """Bits 0-7 are used for trace flags.""" SPAN_FLAGS_CONTEXT_HAS_IS_REMOTE_MASK: SpanFlags.ValueType # 256 """Bits 8 and 9 are used to indicate that the parent span or link span is remote. Bit 8 (`HAS_IS_REMOTE`) indicates whether the value is known. Bit 9 (`IS_REMOTE`) indicates whether the span or link is remote. """ SPAN_FLAGS_CONTEXT_IS_REMOTE_MASK: SpanFlags.ValueType # 512 global___SpanFlags = SpanFlags @typing_extensions.final class TracesData(google.protobuf.message.Message): """TracesData represents the traces data that can be stored in a persistent storage, OR can be embedded by other protocols that transfer OTLP traces data but do not implement the OTLP protocol. The main difference between this message and collector protocol is that in this message there will not be any "control" or "metadata" specific to OTLP protocol. When new fields are added into this message, the OTLP request MUST be updated as well. """ DESCRIPTOR: google.protobuf.descriptor.Descriptor RESOURCE_SPANS_FIELD_NUMBER: builtins.int @property def resource_spans(self) -> google.protobuf.internal.containers.RepeatedCompositeFieldContainer[global___ResourceSpans]: """An array of ResourceSpans. For data coming from a single resource this array will typically contain one element. Intermediary nodes that receive data from multiple origins typically batch the data before forwarding further and in that case this array will contain multiple elements. """ def __init__( self, *, resource_spans: collections.abc.Iterable[global___ResourceSpans] | None = ..., ) -> None: ... def ClearField(self, field_name: typing_extensions.Literal["resource_spans", b"resource_spans"]) -> None: ... global___TracesData = TracesData @typing_extensions.final class ResourceSpans(google.protobuf.message.Message): """A collection of ScopeSpans from a Resource.""" DESCRIPTOR: google.protobuf.descriptor.Descriptor RESOURCE_FIELD_NUMBER: builtins.int SCOPE_SPANS_FIELD_NUMBER: builtins.int SCHEMA_URL_FIELD_NUMBER: builtins.int @property def resource(self) -> opentelemetry.proto.resource.v1.resource_pb2.Resource: """The resource for the spans in this message. If this field is not set then no resource info is known. """ @property def scope_spans(self) -> google.protobuf.internal.containers.RepeatedCompositeFieldContainer[global___ScopeSpans]: """A list of ScopeSpans that originate from a resource.""" schema_url: builtins.str """The Schema URL, if known. This is the identifier of the Schema that the resource data is recorded in. Notably, the last part of the URL path is the version number of the schema: http[s]://server[:port]/path/. To learn more about Schema URL see https://opentelemetry.io/docs/specs/otel/schemas/#schema-url This schema_url applies to the data in the "resource" field. It does not apply to the data in the "scope_spans" field which have their own schema_url field. """ def __init__( self, *, resource: opentelemetry.proto.resource.v1.resource_pb2.Resource | None = ..., scope_spans: collections.abc.Iterable[global___ScopeSpans] | None = ..., schema_url: builtins.str = ..., ) -> None: ... def HasField(self, field_name: typing_extensions.Literal["resource", b"resource"]) -> builtins.bool: ... def ClearField(self, field_name: typing_extensions.Literal["resource", b"resource", "schema_url", b"schema_url", "scope_spans", b"scope_spans"]) -> None: ... global___ResourceSpans = ResourceSpans @typing_extensions.final class ScopeSpans(google.protobuf.message.Message): """A collection of Spans produced by an InstrumentationScope.""" DESCRIPTOR: google.protobuf.descriptor.Descriptor SCOPE_FIELD_NUMBER: builtins.int SPANS_FIELD_NUMBER: builtins.int SCHEMA_URL_FIELD_NUMBER: builtins.int @property def scope(self) -> opentelemetry.proto.common.v1.common_pb2.InstrumentationScope: """The instrumentation scope information for the spans in this message. Semantically when InstrumentationScope isn't set, it is equivalent with an empty instrumentation scope name (unknown). """ @property def spans(self) -> google.protobuf.internal.containers.RepeatedCompositeFieldContainer[global___Span]: """A list of Spans that originate from an instrumentation scope.""" schema_url: builtins.str """The Schema URL, if known. This is the identifier of the Schema that the span data is recorded in. Notably, the last part of the URL path is the version number of the schema: http[s]://server[:port]/path/. To learn more about Schema URL see https://opentelemetry.io/docs/specs/otel/schemas/#schema-url This schema_url applies to all spans and span events in the "spans" field. """ def __init__( self, *, scope: opentelemetry.proto.common.v1.common_pb2.InstrumentationScope | None = ..., spans: collections.abc.Iterable[global___Span] | None = ..., schema_url: builtins.str = ..., ) -> None: ... def HasField(self, field_name: typing_extensions.Literal["scope", b"scope"]) -> builtins.bool: ... def ClearField(self, field_name: typing_extensions.Literal["schema_url", b"schema_url", "scope", b"scope", "spans", b"spans"]) -> None: ... global___ScopeSpans = ScopeSpans @typing_extensions.final class Span(google.protobuf.message.Message): """A Span represents a single operation performed by a single component of the system. The next available field id is 17. """ DESCRIPTOR: google.protobuf.descriptor.Descriptor class _SpanKind: ValueType = typing.NewType("ValueType", builtins.int) V: typing_extensions.TypeAlias = ValueType class _SpanKindEnumTypeWrapper(google.protobuf.internal.enum_type_wrapper._EnumTypeWrapper[Span._SpanKind.ValueType], builtins.type): DESCRIPTOR: google.protobuf.descriptor.EnumDescriptor SPAN_KIND_UNSPECIFIED: Span._SpanKind.ValueType # 0 """Unspecified. Do NOT use as default. Implementations MAY assume SpanKind to be INTERNAL when receiving UNSPECIFIED. """ SPAN_KIND_INTERNAL: Span._SpanKind.ValueType # 1 """Indicates that the span represents an internal operation within an application, as opposed to an operation happening at the boundaries. Default value. """ SPAN_KIND_SERVER: Span._SpanKind.ValueType # 2 """Indicates that the span covers server-side handling of an RPC or other remote network request. """ SPAN_KIND_CLIENT: Span._SpanKind.ValueType # 3 """Indicates that the span describes a request to some remote service.""" SPAN_KIND_PRODUCER: Span._SpanKind.ValueType # 4 """Indicates that the span describes a producer sending a message to a broker. Unlike CLIENT and SERVER, there is often no direct critical path latency relationship between producer and consumer spans. A PRODUCER span ends when the message was accepted by the broker while the logical processing of the message might span a much longer time. """ SPAN_KIND_CONSUMER: Span._SpanKind.ValueType # 5 """Indicates that the span describes consumer receiving a message from a broker. Like the PRODUCER kind, there is often no direct critical path latency relationship between producer and consumer spans. """ class SpanKind(_SpanKind, metaclass=_SpanKindEnumTypeWrapper): """SpanKind is the type of span. Can be used to specify additional relationships between spans in addition to a parent/child relationship. """ SPAN_KIND_UNSPECIFIED: Span.SpanKind.ValueType # 0 """Unspecified. Do NOT use as default. Implementations MAY assume SpanKind to be INTERNAL when receiving UNSPECIFIED. """ SPAN_KIND_INTERNAL: Span.SpanKind.ValueType # 1 """Indicates that the span represents an internal operation within an application, as opposed to an operation happening at the boundaries. Default value. """ SPAN_KIND_SERVER: Span.SpanKind.ValueType # 2 """Indicates that the span covers server-side handling of an RPC or other remote network request. """ SPAN_KIND_CLIENT: Span.SpanKind.ValueType # 3 """Indicates that the span describes a request to some remote service.""" SPAN_KIND_PRODUCER: Span.SpanKind.ValueType # 4 """Indicates that the span describes a producer sending a message to a broker. Unlike CLIENT and SERVER, there is often no direct critical path latency relationship between producer and consumer spans. A PRODUCER span ends when the message was accepted by the broker while the logical processing of the message might span a much longer time. """ SPAN_KIND_CONSUMER: Span.SpanKind.ValueType # 5 """Indicates that the span describes consumer receiving a message from a broker. Like the PRODUCER kind, there is often no direct critical path latency relationship between producer and consumer spans. """ @typing_extensions.final class Event(google.protobuf.message.Message): """Event is a time-stamped annotation of the span, consisting of user-supplied text description and key-value pairs. """ DESCRIPTOR: google.protobuf.descriptor.Descriptor TIME_UNIX_NANO_FIELD_NUMBER: builtins.int NAME_FIELD_NUMBER: builtins.int ATTRIBUTES_FIELD_NUMBER: builtins.int DROPPED_ATTRIBUTES_COUNT_FIELD_NUMBER: builtins.int time_unix_nano: builtins.int """time_unix_nano is the time the event occurred.""" name: builtins.str """name of the event. This field is semantically required to be set to non-empty string. """ @property def attributes(self) -> google.protobuf.internal.containers.RepeatedCompositeFieldContainer[opentelemetry.proto.common.v1.common_pb2.KeyValue]: """attributes is a collection of attribute key/value pairs on the event. Attribute keys MUST be unique (it is not allowed to have more than one attribute with the same key). """ dropped_attributes_count: builtins.int """dropped_attributes_count is the number of dropped attributes. If the value is 0, then no attributes were dropped. """ def __init__( self, *, time_unix_nano: builtins.int = ..., name: builtins.str = ..., attributes: collections.abc.Iterable[opentelemetry.proto.common.v1.common_pb2.KeyValue] | None = ..., dropped_attributes_count: builtins.int = ..., ) -> None: ... def ClearField(self, field_name: typing_extensions.Literal["attributes", b"attributes", "dropped_attributes_count", b"dropped_attributes_count", "name", b"name", "time_unix_nano", b"time_unix_nano"]) -> None: ... @typing_extensions.final class Link(google.protobuf.message.Message): """A pointer from the current span to another span in the same trace or in a different trace. For example, this can be used in batching operations, where a single batch handler processes multiple requests from different traces or when the handler receives a request from a different project. """ DESCRIPTOR: google.protobuf.descriptor.Descriptor TRACE_ID_FIELD_NUMBER: builtins.int SPAN_ID_FIELD_NUMBER: builtins.int TRACE_STATE_FIELD_NUMBER: builtins.int ATTRIBUTES_FIELD_NUMBER: builtins.int DROPPED_ATTRIBUTES_COUNT_FIELD_NUMBER: builtins.int FLAGS_FIELD_NUMBER: builtins.int trace_id: builtins.bytes """A unique identifier of a trace that this linked span is part of. The ID is a 16-byte array. """ span_id: builtins.bytes """A unique identifier for the linked span. The ID is an 8-byte array.""" trace_state: builtins.str """The trace_state associated with the link.""" @property def attributes(self) -> google.protobuf.internal.containers.RepeatedCompositeFieldContainer[opentelemetry.proto.common.v1.common_pb2.KeyValue]: """attributes is a collection of attribute key/value pairs on the link. Attribute keys MUST be unique (it is not allowed to have more than one attribute with the same key). """ dropped_attributes_count: builtins.int """dropped_attributes_count is the number of dropped attributes. If the value is 0, then no attributes were dropped. """ flags: builtins.int """Flags, a bit field. Bits 0-7 (8 least significant bits) are the trace flags as defined in W3C Trace Context specification. To read the 8-bit W3C trace flag, use `flags & SPAN_FLAGS_TRACE_FLAGS_MASK`. See https://www.w3.org/TR/trace-context-2/#trace-flags for the flag definitions. Bits 8 and 9 represent the 3 states of whether the link is remote. The states are (unknown, is not remote, is remote). To read whether the value is known, use `(flags & SPAN_FLAGS_CONTEXT_HAS_IS_REMOTE_MASK) != 0`. To read whether the link is remote, use `(flags & SPAN_FLAGS_CONTEXT_IS_REMOTE_MASK) != 0`. Readers MUST NOT assume that bits 10-31 (22 most significant bits) will be zero. When creating new spans, bits 10-31 (most-significant 22-bits) MUST be zero. [Optional]. """ def __init__( self, *, trace_id: builtins.bytes = ..., span_id: builtins.bytes = ..., trace_state: builtins.str = ..., attributes: collections.abc.Iterable[opentelemetry.proto.common.v1.common_pb2.KeyValue] | None = ..., dropped_attributes_count: builtins.int = ..., flags: builtins.int = ..., ) -> None: ... def ClearField(self, field_name: typing_extensions.Literal["attributes", b"attributes", "dropped_attributes_count", b"dropped_attributes_count", "flags", b"flags", "span_id", b"span_id", "trace_id", b"trace_id", "trace_state", b"trace_state"]) -> None: ... TRACE_ID_FIELD_NUMBER: builtins.int SPAN_ID_FIELD_NUMBER: builtins.int TRACE_STATE_FIELD_NUMBER: builtins.int PARENT_SPAN_ID_FIELD_NUMBER: builtins.int FLAGS_FIELD_NUMBER: builtins.int NAME_FIELD_NUMBER: builtins.int KIND_FIELD_NUMBER: builtins.int START_TIME_UNIX_NANO_FIELD_NUMBER: builtins.int END_TIME_UNIX_NANO_FIELD_NUMBER: builtins.int ATTRIBUTES_FIELD_NUMBER: builtins.int DROPPED_ATTRIBUTES_COUNT_FIELD_NUMBER: builtins.int EVENTS_FIELD_NUMBER: builtins.int DROPPED_EVENTS_COUNT_FIELD_NUMBER: builtins.int LINKS_FIELD_NUMBER: builtins.int DROPPED_LINKS_COUNT_FIELD_NUMBER: builtins.int STATUS_FIELD_NUMBER: builtins.int trace_id: builtins.bytes """A unique identifier for a trace. All spans from the same trace share the same `trace_id`. The ID is a 16-byte array. An ID with all zeroes OR of length other than 16 bytes is considered invalid (empty string in OTLP/JSON is zero-length and thus is also invalid). This field is required. """ span_id: builtins.bytes """A unique identifier for a span within a trace, assigned when the span is created. The ID is an 8-byte array. An ID with all zeroes OR of length other than 8 bytes is considered invalid (empty string in OTLP/JSON is zero-length and thus is also invalid). This field is required. """ trace_state: builtins.str """trace_state conveys information about request position in multiple distributed tracing graphs. It is a trace_state in w3c-trace-context format: https://www.w3.org/TR/trace-context/#tracestate-header See also https://github.com/w3c/distributed-tracing for more details about this field. """ parent_span_id: builtins.bytes """The `span_id` of this span's parent span. If this is a root span, then this field must be empty. The ID is an 8-byte array. """ flags: builtins.int """Flags, a bit field. Bits 0-7 (8 least significant bits) are the trace flags as defined in W3C Trace Context specification. To read the 8-bit W3C trace flag, use `flags & SPAN_FLAGS_TRACE_FLAGS_MASK`. See https://www.w3.org/TR/trace-context-2/#trace-flags for the flag definitions. Bits 8 and 9 represent the 3 states of whether a span's parent is remote. The states are (unknown, is not remote, is remote). To read whether the value is known, use `(flags & SPAN_FLAGS_CONTEXT_HAS_IS_REMOTE_MASK) != 0`. To read whether the span is remote, use `(flags & SPAN_FLAGS_CONTEXT_IS_REMOTE_MASK) != 0`. When creating span messages, if the message is logically forwarded from another source with an equivalent flags fields (i.e., usually another OTLP span message), the field SHOULD be copied as-is. If creating from a source that does not have an equivalent flags field (such as a runtime representation of an OpenTelemetry span), the high 22 bits MUST be set to zero. Readers MUST NOT assume that bits 10-31 (22 most significant bits) will be zero. [Optional]. """ name: builtins.str """A description of the span's operation. For example, the name can be a qualified method name or a file name and a line number where the operation is called. A best practice is to use the same display name at the same call point in an application. This makes it easier to correlate spans in different traces. This field is semantically required to be set to non-empty string. Empty value is equivalent to an unknown span name. This field is required. """ kind: global___Span.SpanKind.ValueType """Distinguishes between spans generated in a particular context. For example, two spans with the same name may be distinguished using `CLIENT` (caller) and `SERVER` (callee) to identify queueing latency associated with the span. """ start_time_unix_nano: builtins.int """start_time_unix_nano is the start time of the span. On the client side, this is the time kept by the local machine where the span execution starts. On the server side, this is the time when the server's application handler starts running. Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January 1970. This field is semantically required and it is expected that end_time >= start_time. """ end_time_unix_nano: builtins.int """end_time_unix_nano is the end time of the span. On the client side, this is the time kept by the local machine where the span execution ends. On the server side, this is the time when the server application handler stops running. Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January 1970. This field is semantically required and it is expected that end_time >= start_time. """ @property def attributes(self) -> google.protobuf.internal.containers.RepeatedCompositeFieldContainer[opentelemetry.proto.common.v1.common_pb2.KeyValue]: """attributes is a collection of key/value pairs. Note, global attributes like server name can be set using the resource API. Examples of attributes: "/http/user_agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36" "/http/server_latency": 300 "example.com/myattribute": true "example.com/score": 10.239 The OpenTelemetry API specification further restricts the allowed value types: https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/common/README.md#attribute Attribute keys MUST be unique (it is not allowed to have more than one attribute with the same key). """ dropped_attributes_count: builtins.int """dropped_attributes_count is the number of attributes that were discarded. Attributes can be discarded because their keys are too long or because there are too many attributes. If this value is 0, then no attributes were dropped. """ @property def events(self) -> google.protobuf.internal.containers.RepeatedCompositeFieldContainer[global___Span.Event]: """events is a collection of Event items.""" dropped_events_count: builtins.int """dropped_events_count is the number of dropped events. If the value is 0, then no events were dropped. """ @property def links(self) -> google.protobuf.internal.containers.RepeatedCompositeFieldContainer[global___Span.Link]: """links is a collection of Links, which are references from this span to a span in the same or different trace. """ dropped_links_count: builtins.int """dropped_links_count is the number of dropped links after the maximum size was enforced. If this value is 0, then no links were dropped. """ @property def status(self) -> global___Status: """An optional final status for this span. Semantically when Status isn't set, it means span's status code is unset, i.e. assume STATUS_CODE_UNSET (code = 0). """ def __init__( self, *, trace_id: builtins.bytes = ..., span_id: builtins.bytes = ..., trace_state: builtins.str = ..., parent_span_id: builtins.bytes = ..., flags: builtins.int = ..., name: builtins.str = ..., kind: global___Span.SpanKind.ValueType = ..., start_time_unix_nano: builtins.int = ..., end_time_unix_nano: builtins.int = ..., attributes: collections.abc.Iterable[opentelemetry.proto.common.v1.common_pb2.KeyValue] | None = ..., dropped_attributes_count: builtins.int = ..., events: collections.abc.Iterable[global___Span.Event] | None = ..., dropped_events_count: builtins.int = ..., links: collections.abc.Iterable[global___Span.Link] | None = ..., dropped_links_count: builtins.int = ..., status: global___Status | None = ..., ) -> None: ... def HasField(self, field_name: typing_extensions.Literal["status", b"status"]) -> builtins.bool: ... def ClearField(self, field_name: typing_extensions.Literal["attributes", b"attributes", "dropped_attributes_count", b"dropped_attributes_count", "dropped_events_count", b"dropped_events_count", "dropped_links_count", b"dropped_links_count", "end_time_unix_nano", b"end_time_unix_nano", "events", b"events", "flags", b"flags", "kind", b"kind", "links", b"links", "name", b"name", "parent_span_id", b"parent_span_id", "span_id", b"span_id", "start_time_unix_nano", b"start_time_unix_nano", "status", b"status", "trace_id", b"trace_id", "trace_state", b"trace_state"]) -> None: ... global___Span = Span @typing_extensions.final class Status(google.protobuf.message.Message): """The Status type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. """ DESCRIPTOR: google.protobuf.descriptor.Descriptor class _StatusCode: ValueType = typing.NewType("ValueType", builtins.int) V: typing_extensions.TypeAlias = ValueType class _StatusCodeEnumTypeWrapper(google.protobuf.internal.enum_type_wrapper._EnumTypeWrapper[Status._StatusCode.ValueType], builtins.type): DESCRIPTOR: google.protobuf.descriptor.EnumDescriptor STATUS_CODE_UNSET: Status._StatusCode.ValueType # 0 """The default status.""" STATUS_CODE_OK: Status._StatusCode.ValueType # 1 """The Span has been validated by an Application developer or Operator to have completed successfully. """ STATUS_CODE_ERROR: Status._StatusCode.ValueType # 2 """The Span contains an error.""" class StatusCode(_StatusCode, metaclass=_StatusCodeEnumTypeWrapper): """For the semantics of status codes see https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/trace/api.md#set-status """ STATUS_CODE_UNSET: Status.StatusCode.ValueType # 0 """The default status.""" STATUS_CODE_OK: Status.StatusCode.ValueType # 1 """The Span has been validated by an Application developer or Operator to have completed successfully. """ STATUS_CODE_ERROR: Status.StatusCode.ValueType # 2 """The Span contains an error.""" MESSAGE_FIELD_NUMBER: builtins.int CODE_FIELD_NUMBER: builtins.int message: builtins.str """A developer-facing human readable error message.""" code: global___Status.StatusCode.ValueType """The status code.""" def __init__( self, *, message: builtins.str = ..., code: global___Status.StatusCode.ValueType = ..., ) -> None: ... def ClearField(self, field_name: typing_extensions.Literal["code", b"code", "message", b"message"]) -> None: ... global___Status = Status python-opentelemetry-1.39.1/opentelemetry-proto/src/opentelemetry/proto/version/000077500000000000000000000000001511654350100303715ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-proto/src/opentelemetry/proto/version/__init__.py000066400000000000000000000011401511654350100324760ustar00rootroot00000000000000# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. __version__ = "1.39.1" python-opentelemetry-1.39.1/opentelemetry-proto/test-requirements.in000066400000000000000000000001471511654350100261100ustar00rootroot00000000000000colorama>=0.4.6 iniconfig>=2.0.0 packaging>=24.0 protobuf>=5.29.5 pytest>=7.4.4 -e opentelemetry-proto python-opentelemetry-1.39.1/opentelemetry-proto/test-requirements.latest.txt000066400000000000000000000020211511654350100276050ustar00rootroot00000000000000# This file was autogenerated by uv via the following command: # uv pip compile --python 3.9 --universal -c dev-requirements.txt opentelemetry-proto/test-requirements.in -o opentelemetry-proto/test-requirements.latest.txt -e opentelemetry-proto # via -r opentelemetry-proto/test-requirements.in colorama==0.4.6 # via # -r opentelemetry-proto/test-requirements.in # pytest exceptiongroup==1.3.0 ; python_full_version < '3.11' # via pytest iniconfig==2.1.0 # via # -r opentelemetry-proto/test-requirements.in # pytest packaging==25.0 # via # -r opentelemetry-proto/test-requirements.in # pytest pluggy==1.6.0 # via pytest protobuf==6.31.1 # via # -r opentelemetry-proto/test-requirements.in # opentelemetry-proto pytest==7.4.4 # via # -c dev-requirements.txt # -r opentelemetry-proto/test-requirements.in tomli==2.2.1 ; python_full_version < '3.11' # via pytest typing-extensions==4.14.0 ; python_full_version < '3.11' # via exceptiongroup python-opentelemetry-1.39.1/opentelemetry-proto/test-requirements.oldest.txt000066400000000000000000000020451511654350100276110ustar00rootroot00000000000000# This file was autogenerated by uv via the following command: # uv pip compile --python 3.9 --universal --resolution lowest -c dev-requirements.txt opentelemetry-proto/test-requirements.in -o opentelemetry-proto/test-requirements.oldest.txt -e opentelemetry-proto # via -r opentelemetry-proto/test-requirements.in colorama==0.4.6 # via # -r opentelemetry-proto/test-requirements.in # pytest exceptiongroup==1.3.0 ; python_full_version < '3.11' # via pytest iniconfig==2.1.0 # via # -r opentelemetry-proto/test-requirements.in # pytest packaging==25.0 # via # -r opentelemetry-proto/test-requirements.in # pytest pluggy==1.6.0 # via pytest protobuf==5.29.5 # via # -r opentelemetry-proto/test-requirements.in # opentelemetry-proto pytest==7.4.4 # via # -c dev-requirements.txt # -r opentelemetry-proto/test-requirements.in tomli==2.2.1 ; python_full_version < '3.11' # via pytest typing-extensions==4.14.0 ; python_full_version < '3.11' # via exceptiongroup python-opentelemetry-1.39.1/opentelemetry-proto/tests/000077500000000000000000000000001511654350100232205ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-proto/tests/__init__.py000066400000000000000000000000001511654350100253170ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-proto/tests/test_proto.py000066400000000000000000000015111511654350100257720ustar00rootroot00000000000000# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # type: ignore from importlib.util import find_spec from unittest import TestCase class TestInstrumentor(TestCase): def test_proto(self): if find_spec("opentelemetry.proto") is None: self.fail("opentelemetry-proto not installed") python-opentelemetry-1.39.1/opentelemetry-sdk/000077500000000000000000000000001511654350100214745ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-sdk/LICENSE000066400000000000000000000261351511654350100225100ustar00rootroot00000000000000 Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. Copyright [yyyy] [name of copyright owner] Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. python-opentelemetry-1.39.1/opentelemetry-sdk/README.rst000066400000000000000000000005711511654350100231660ustar00rootroot00000000000000OpenTelemetry Python SDK ============================================================================ |pypi| .. |pypi| image:: https://badge.fury.io/py/opentelemetry-sdk.svg :target: https://pypi.org/project/opentelemetry-sdk/ Installation ------------ :: pip install opentelemetry-sdk References ---------- * `OpenTelemetry Project `_ python-opentelemetry-1.39.1/opentelemetry-sdk/benchmark-requirements.txt000066400000000000000000000000301511654350100267010ustar00rootroot00000000000000pytest-benchmark==4.0.0 python-opentelemetry-1.39.1/opentelemetry-sdk/benchmarks/000077500000000000000000000000001511654350100236115ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-sdk/benchmarks/logs/000077500000000000000000000000001511654350100245555ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-sdk/benchmarks/logs/test_benchmark_logging_handler.py000066400000000000000000000021371511654350100333260ustar00rootroot00000000000000import logging import pytest from opentelemetry.sdk._logs import LoggerProvider, LoggingHandler from opentelemetry.sdk._logs.export import ( InMemoryLogRecordExporter, SimpleLogRecordProcessor, ) def _set_up_logging_handler(level): logger_provider = LoggerProvider() exporter = InMemoryLogRecordExporter() processor = SimpleLogRecordProcessor(exporter=exporter) logger_provider.add_log_record_processor(processor) handler = LoggingHandler(level=level, logger_provider=logger_provider) return handler def _create_logger(handler, name): logger = logging.getLogger(name) logger.addHandler(handler) return logger @pytest.mark.parametrize("num_loggers", [1, 10, 100, 1000]) def test_simple_get_logger_different_names(benchmark, num_loggers): handler = _set_up_logging_handler(level=logging.DEBUG) loggers = [ _create_logger(handler, str(f"logger_{i}")) for i in range(num_loggers) ] def benchmark_get_logger(): for index in range(1000): loggers[index % num_loggers].warning("test message") benchmark(benchmark_get_logger) python-opentelemetry-1.39.1/opentelemetry-sdk/benchmarks/metrics/000077500000000000000000000000001511654350100252575ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-sdk/benchmarks/metrics/test_benchmark_metrics.py000066400000000000000000000050071511654350100323520ustar00rootroot00000000000000# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import pytest from opentelemetry.sdk.metrics import Counter, MeterProvider from opentelemetry.sdk.metrics.export import ( AggregationTemporality, InMemoryMetricReader, ) reader_cumulative = InMemoryMetricReader() reader_delta = InMemoryMetricReader( preferred_temporality={ Counter: AggregationTemporality.DELTA, }, ) provider_reader_cumulative = MeterProvider( metric_readers=[reader_cumulative], ) provider_reader_delta = MeterProvider(metric_readers=[reader_delta]) meter_cumulative = provider_reader_cumulative.get_meter("sdk_meter_provider") meter_delta = provider_reader_delta.get_meter("sdk_meter_provider_delta") counter_cumulative = meter_cumulative.create_counter("test_counter") counter_delta = meter_delta.create_counter("test_counter2") udcounter = meter_cumulative.create_up_down_counter("test_udcounter") @pytest.mark.parametrize( ("num_labels", "temporality"), [ (0, "delta"), (1, "delta"), (3, "delta"), (5, "delta"), (10, "delta"), (0, "cumulative"), (1, "cumulative"), (3, "cumulative"), (5, "cumulative"), (10, "cumulative"), ], ) def test_counter_add(benchmark, num_labels, temporality): labels = {} # pylint: disable=invalid-name for i in range(num_labels): labels = {f"Key{i}": f"Value{i}" for i in range(num_labels)} def benchmark_counter_add(): if temporality == "cumulative": counter_cumulative.add(1, labels) else: counter_delta.add(1, labels) benchmark(benchmark_counter_add) @pytest.mark.parametrize("num_labels", [0, 1, 3, 5, 10]) def test_up_down_counter_add(benchmark, num_labels): labels = {} # pylint: disable=invalid-name for i in range(num_labels): labels = {f"Key{i}": f"Value{i}" for i in range(num_labels)} def benchmark_up_down_counter_add(): udcounter.add(1, labels) benchmark(benchmark_up_down_counter_add) python-opentelemetry-1.39.1/opentelemetry-sdk/benchmarks/metrics/test_benchmark_metrics_histogram.py000066400000000000000000000074561511654350100344410ustar00rootroot00000000000000# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # pylint: disable=invalid-name import random import pytest from opentelemetry.sdk.metrics import MeterProvider from opentelemetry.sdk.metrics.export import InMemoryMetricReader from opentelemetry.sdk.metrics.view import ( ExplicitBucketHistogramAggregation, View, ) MAX_BOUND_VALUE = 10000 def _generate_bounds(bound_count): bounds = [] for i in range(bound_count): bounds.append(i * MAX_BOUND_VALUE / bound_count) return bounds hist_view_10 = View( instrument_name="test_histogram_10_bound", aggregation=ExplicitBucketHistogramAggregation(_generate_bounds(10)), ) hist_view_49 = View( instrument_name="test_histogram_49_bound", aggregation=ExplicitBucketHistogramAggregation(_generate_bounds(49)), ) hist_view_50 = View( instrument_name="test_histogram_50_bound", aggregation=ExplicitBucketHistogramAggregation(_generate_bounds(50)), ) hist_view_1000 = View( instrument_name="test_histogram_1000_bound", aggregation=ExplicitBucketHistogramAggregation(_generate_bounds(1000)), ) reader = InMemoryMetricReader() provider = MeterProvider( metric_readers=[reader], views=[ hist_view_10, hist_view_49, hist_view_50, hist_view_1000, ], ) meter = provider.get_meter("sdk_meter_provider") hist = meter.create_histogram("test_histogram_default") hist10 = meter.create_histogram("test_histogram_10_bound") hist49 = meter.create_histogram("test_histogram_49_bound") hist50 = meter.create_histogram("test_histogram_50_bound") hist1000 = meter.create_histogram("test_histogram_1000_bound") @pytest.mark.parametrize("num_labels", [0, 1, 3, 5, 7]) def test_histogram_record(benchmark, num_labels): labels = {} for i in range(num_labels): labels[f"Key{i}"] = "Value{i}" def benchmark_histogram_record(): hist.record(random.random() * MAX_BOUND_VALUE) benchmark(benchmark_histogram_record) @pytest.mark.parametrize("num_labels", [0, 1, 3, 5, 7]) def test_histogram_record_10(benchmark, num_labels): labels = {} for i in range(num_labels): labels[f"Key{i}"] = "Value{i}" def benchmark_histogram_record_10(): hist10.record(random.random() * MAX_BOUND_VALUE) benchmark(benchmark_histogram_record_10) @pytest.mark.parametrize("num_labels", [0, 1, 3, 5, 7]) def test_histogram_record_49(benchmark, num_labels): labels = {} for i in range(num_labels): labels[f"Key{i}"] = "Value{i}" def benchmark_histogram_record_49(): hist49.record(random.random() * MAX_BOUND_VALUE) benchmark(benchmark_histogram_record_49) @pytest.mark.parametrize("num_labels", [0, 1, 3, 5, 7]) def test_histogram_record_50(benchmark, num_labels): labels = {} for i in range(num_labels): labels[f"Key{i}"] = "Value{i}" def benchmark_histogram_record_50(): hist50.record(random.random() * MAX_BOUND_VALUE) benchmark(benchmark_histogram_record_50) @pytest.mark.parametrize("num_labels", [0, 1, 3, 5, 7]) def test_histogram_record_1000(benchmark, num_labels): labels = {} for i in range(num_labels): labels[f"Key{i}"] = "Value{i}" def benchmark_histogram_record_1000(): hist1000.record(random.random() * MAX_BOUND_VALUE) benchmark(benchmark_histogram_record_1000) test_benchmark_metrics_histogram_steady.py000066400000000000000000000062631511654350100357260ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-sdk/benchmarks/metrics# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # pylint: disable=invalid-name import itertools from opentelemetry.sdk.metrics import MeterProvider from opentelemetry.sdk.metrics.export import InMemoryMetricReader from opentelemetry.sdk.metrics.view import ( ExplicitBucketHistogramAggregation, View, ) MAX_BOUND_VALUE = 10000 def _generate_bounds(bound_count): bounds = [] for i in range(bound_count): bounds.append(i * MAX_BOUND_VALUE / bound_count) return bounds hist_view_10 = View( instrument_name="test_histogram_10_bound", aggregation=ExplicitBucketHistogramAggregation(_generate_bounds(10)), ) hist_view_49 = View( instrument_name="test_histogram_49_bound", aggregation=ExplicitBucketHistogramAggregation(_generate_bounds(49)), ) hist_view_50 = View( instrument_name="test_histogram_50_bound", aggregation=ExplicitBucketHistogramAggregation(_generate_bounds(50)), ) hist_view_1000 = View( instrument_name="test_histogram_1000_bound", aggregation=ExplicitBucketHistogramAggregation(_generate_bounds(1000)), ) reader = InMemoryMetricReader() provider = MeterProvider( metric_readers=[reader], views=[ hist_view_10, hist_view_49, hist_view_50, hist_view_1000, ], ) meter = provider.get_meter("sdk_meter_provider") hist = meter.create_histogram("test_histogram_default") hist10 = meter.create_histogram("test_histogram_10_bound") hist49 = meter.create_histogram("test_histogram_49_bound") hist50 = meter.create_histogram("test_histogram_50_bound") hist1000 = meter.create_histogram("test_histogram_1000_bound") def test_histogram_record(benchmark): values = itertools.cycle(_generate_bounds(10)) def benchmark_histogram_record(): hist.record(next(values)) benchmark(benchmark_histogram_record) def test_histogram_record_10(benchmark): values = itertools.cycle(_generate_bounds(10)) def benchmark_histogram_record_10(): hist10.record(next(values)) benchmark(benchmark_histogram_record_10) def test_histogram_record_49(benchmark): values = itertools.cycle(_generate_bounds(49)) def benchmark_histogram_record_49(): hist49.record(next(values)) benchmark(benchmark_histogram_record_49) def test_histogram_record_50(benchmark): values = itertools.cycle(_generate_bounds(50)) def benchmark_histogram_record_50(): hist50.record(next(values)) benchmark(benchmark_histogram_record_50) def test_histogram_record_1000(benchmark): values = itertools.cycle(_generate_bounds(1000)) def benchmark_histogram_record_1000(): hist1000.record(next(values)) benchmark(benchmark_histogram_record_1000) python-opentelemetry-1.39.1/opentelemetry-sdk/benchmarks/test_baggage.py000066400000000000000000000034301511654350100265770ustar00rootroot00000000000000# pylint: disable=redefined-outer-name, invalid-name import pytest from opentelemetry import trace from opentelemetry.baggage import ( clear, get_all, get_baggage, remove_baggage, set_baggage, ) tracer = trace.get_tracer(__name__) @pytest.fixture(params=[10, 100, 1000, 10000]) def baggage_size(request): return request.param def set_baggage_operation(size=10): with tracer.start_span(name="root span"): ctx = get_all() for i in range(size): ctx = set_baggage(f"foo{i}", f"bar{i}", context=ctx) return ctx def test_set_baggage(benchmark, baggage_size): ctx = benchmark(set_baggage_operation, baggage_size) result = get_all(ctx) assert len(result) == baggage_size def test_get_baggage(benchmark, baggage_size): ctx = set_baggage_operation(baggage_size) def get_baggage_operation(): return [get_baggage(f"foo{i}", ctx) for i in range(baggage_size)] result = benchmark(get_baggage_operation) assert result == [f"bar{i}" for i in range(baggage_size)] def test_remove_baggage(benchmark, baggage_size): ctx = set_baggage_operation(baggage_size) def remove_operation(): tmp_ctx = ctx for i in range(baggage_size): tmp_ctx = remove_baggage(f"foo{i}", tmp_ctx) return tmp_ctx cleared_context = benchmark(remove_operation) result = get_all(cleared_context) # After removing all baggage items, it should be empty. assert len(result) == 0 def test_clear_baggage(benchmark, baggage_size): ctx = set_baggage_operation(baggage_size) def clear_operation(): return clear(ctx) cleared_context = benchmark(clear_operation) result = get_all(cleared_context) # After clearing the baggage should be empty. assert len(result) == 0 python-opentelemetry-1.39.1/opentelemetry-sdk/benchmarks/trace/000077500000000000000000000000001511654350100247075ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-sdk/benchmarks/trace/test_benchmark_trace.py000066400000000000000000000032221511654350100314270ustar00rootroot00000000000000# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from opentelemetry.sdk.resources import Resource from opentelemetry.sdk.trace import TracerProvider, sampling tracer = TracerProvider( sampler=sampling.DEFAULT_ON, resource=Resource( { "service.name": "A123456789", "service.version": "1.34567890", "service.instance.id": "123ab456-a123-12ab-12ab-12340a1abc12", } ), ).get_tracer("sdk_tracer_provider") def test_simple_start_span(benchmark): def benchmark_start_as_current_span(): span = tracer.start_span( "benchmarkedSpan", attributes={"long.attribute": -10000000001000000000}, ) span.add_event("benchmarkEvent") span.end() benchmark(benchmark_start_as_current_span) def test_simple_start_as_current_span(benchmark): def benchmark_start_as_current_span(): with tracer.start_as_current_span( "benchmarkedSpan", attributes={"long.attribute": -10000000001000000000}, ) as span: span.add_event("benchmarkEvent") benchmark(benchmark_start_as_current_span) python-opentelemetry-1.39.1/opentelemetry-sdk/pyproject.toml000066400000000000000000000057441511654350100244220ustar00rootroot00000000000000[build-system] requires = ["hatchling"] build-backend = "hatchling.build" [project] name = "opentelemetry-sdk" dynamic = ["version"] description = "OpenTelemetry Python SDK" readme = "README.rst" license = "Apache-2.0" requires-python = ">=3.9" authors = [ { name = "OpenTelemetry Authors", email = "cncf-opentelemetry-contributors@lists.cncf.io" }, ] classifiers = [ "Development Status :: 5 - Production/Stable", "Framework :: OpenTelemetry", "Intended Audience :: Developers", "Programming Language :: Python", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.9", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", "Programming Language :: Python :: 3.12", "Programming Language :: Python :: 3.13", "Typing :: Typed", ] dependencies = [ "opentelemetry-api == 1.39.1", "opentelemetry-semantic-conventions == 0.60b1", "typing-extensions >= 4.5.0", ] [project.entry-points.opentelemetry_environment_variables] sdk = "opentelemetry.sdk.environment_variables" [project.entry-points.opentelemetry_id_generator] random = "opentelemetry.sdk.trace.id_generator:RandomIdGenerator" [project.entry-points.opentelemetry_traces_sampler] always_on = "opentelemetry.sdk.trace.sampling:_AlwaysOn" always_off = "opentelemetry.sdk.trace.sampling:_AlwaysOff" parentbased_always_on = "opentelemetry.sdk.trace.sampling:_ParentBasedAlwaysOn" parentbased_always_off = "opentelemetry.sdk.trace.sampling:_ParentBasedAlwaysOff" traceidratio = "opentelemetry.sdk.trace.sampling:TraceIdRatioBased" parentbased_traceidratio = "opentelemetry.sdk.trace.sampling:ParentBasedTraceIdRatio" [project.entry-points.opentelemetry_logger_provider] sdk_logger_provider = "opentelemetry.sdk._logs:LoggerProvider" [project.entry-points.opentelemetry_logs_exporter] console = "opentelemetry.sdk._logs.export:ConsoleLogRecordExporter" [project.entry-points.opentelemetry_meter_provider] sdk_meter_provider = "opentelemetry.sdk.metrics:MeterProvider" [project.entry-points.opentelemetry_metrics_exporter] console = "opentelemetry.sdk.metrics.export:ConsoleMetricExporter" [project.entry-points.opentelemetry_tracer_provider] sdk_tracer_provider = "opentelemetry.sdk.trace:TracerProvider" [project.entry-points.opentelemetry_traces_exporter] console = "opentelemetry.sdk.trace.export:ConsoleSpanExporter" [project.entry-points.opentelemetry_resource_detector] otel = "opentelemetry.sdk.resources:OTELResourceDetector" process = "opentelemetry.sdk.resources:ProcessResourceDetector" os = "opentelemetry.sdk.resources:OsResourceDetector" host = "opentelemetry.sdk.resources:_HostResourceDetector" [project.urls] Homepage = "https://github.com/open-telemetry/opentelemetry-python/tree/main/opentelemetry-sdk" Repository = "https://github.com/open-telemetry/opentelemetry-python" [tool.hatch.version] path = "src/opentelemetry/sdk/version/__init__.py" [tool.hatch.build.targets.sdist] include = [ "/src", "/tests", ] [tool.hatch.build.targets.wheel] packages = ["src/opentelemetry"] python-opentelemetry-1.39.1/opentelemetry-sdk/src/000077500000000000000000000000001511654350100222635ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-sdk/src/opentelemetry/000077500000000000000000000000001511654350100251575ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-sdk/src/opentelemetry/sdk/000077500000000000000000000000001511654350100257405ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-sdk/src/opentelemetry/sdk/__init__.pyi000066400000000000000000000012351511654350100302230ustar00rootroot00000000000000# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ The OpenTelemetry SDK package is an implementation of the OpenTelemetry API """ python-opentelemetry-1.39.1/opentelemetry-sdk/src/opentelemetry/sdk/_configuration/000077500000000000000000000000001511654350100307465ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-sdk/src/opentelemetry/sdk/_configuration/__init__.py000066400000000000000000000420401511654350100330570ustar00rootroot00000000000000# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """ OpenTelemetry SDK Configurator for Easy Instrumentation with Distros """ from __future__ import annotations import logging import logging.config import os import warnings from abc import ABC, abstractmethod from os import environ from typing import Any, Callable, Mapping, Sequence, Type, Union from typing_extensions import Literal from opentelemetry._logs import set_logger_provider from opentelemetry.environment_variables import ( OTEL_LOGS_EXPORTER, OTEL_METRICS_EXPORTER, OTEL_PYTHON_ID_GENERATOR, OTEL_TRACES_EXPORTER, ) from opentelemetry.metrics import set_meter_provider from opentelemetry.sdk._logs import LoggerProvider, LoggingHandler from opentelemetry.sdk._logs.export import ( BatchLogRecordProcessor, LogRecordExporter, ) from opentelemetry.sdk.environment_variables import ( _OTEL_PYTHON_LOGGING_AUTO_INSTRUMENTATION_ENABLED, OTEL_EXPORTER_OTLP_LOGS_PROTOCOL, OTEL_EXPORTER_OTLP_METRICS_PROTOCOL, OTEL_EXPORTER_OTLP_PROTOCOL, OTEL_EXPORTER_OTLP_TRACES_PROTOCOL, OTEL_TRACES_SAMPLER, OTEL_TRACES_SAMPLER_ARG, ) from opentelemetry.sdk.metrics import MeterProvider from opentelemetry.sdk.metrics.export import ( MetricExporter, MetricReader, PeriodicExportingMetricReader, ) from opentelemetry.sdk.resources import Attributes, Resource from opentelemetry.sdk.trace import TracerProvider from opentelemetry.sdk.trace.export import BatchSpanProcessor, SpanExporter from opentelemetry.sdk.trace.id_generator import IdGenerator from opentelemetry.sdk.trace.sampling import Sampler from opentelemetry.semconv.resource import ResourceAttributes from opentelemetry.trace import set_tracer_provider from opentelemetry.util._importlib_metadata import entry_points _EXPORTER_OTLP = "otlp" _EXPORTER_OTLP_PROTO_GRPC = "otlp_proto_grpc" _EXPORTER_OTLP_PROTO_HTTP = "otlp_proto_http" _EXPORTER_BY_OTLP_PROTOCOL = { "grpc": _EXPORTER_OTLP_PROTO_GRPC, "http/protobuf": _EXPORTER_OTLP_PROTO_HTTP, } _EXPORTER_ENV_BY_SIGNAL_TYPE = { "traces": OTEL_TRACES_EXPORTER, "metrics": OTEL_METRICS_EXPORTER, "logs": OTEL_LOGS_EXPORTER, } _PROTOCOL_ENV_BY_SIGNAL_TYPE = { "traces": OTEL_EXPORTER_OTLP_TRACES_PROTOCOL, "metrics": OTEL_EXPORTER_OTLP_METRICS_PROTOCOL, "logs": OTEL_EXPORTER_OTLP_LOGS_PROTOCOL, } _RANDOM_ID_GENERATOR = "random" _DEFAULT_ID_GENERATOR = _RANDOM_ID_GENERATOR _OTEL_SAMPLER_ENTRY_POINT_GROUP = "opentelemetry_traces_sampler" _logger = logging.getLogger(__name__) ExporterArgsMap = Mapping[ Union[ Type[SpanExporter], Type[MetricExporter], Type[MetricReader], Type[LogRecordExporter], ], Mapping[str, Any], ] def _import_config_components( selected_components: Sequence[str], entry_point_name: str ) -> list[tuple[str, Type]]: component_implementations = [] for selected_component in selected_components: try: component_implementations.append( ( selected_component, next( iter( entry_points( group=entry_point_name, name=selected_component ) ) ).load(), ) ) except KeyError: raise RuntimeError( f"Requested entry point '{entry_point_name}' not found" ) except StopIteration: raise RuntimeError( f"Requested component '{selected_component}' not found in " f"entry point '{entry_point_name}'" ) return component_implementations def _get_sampler() -> str | None: return environ.get(OTEL_TRACES_SAMPLER, None) def _get_id_generator() -> str: return environ.get(OTEL_PYTHON_ID_GENERATOR, _DEFAULT_ID_GENERATOR) def _get_exporter_entry_point( exporter_name: str, signal_type: Literal["traces", "metrics", "logs"] ): if exporter_name not in ( _EXPORTER_OTLP, _EXPORTER_OTLP_PROTO_GRPC, _EXPORTER_OTLP_PROTO_HTTP, ): return exporter_name # Checking env vars for OTLP protocol (grpc/http). otlp_protocol = environ.get( _PROTOCOL_ENV_BY_SIGNAL_TYPE[signal_type] ) or environ.get(OTEL_EXPORTER_OTLP_PROTOCOL) if not otlp_protocol: if exporter_name == _EXPORTER_OTLP: return _EXPORTER_OTLP_PROTO_GRPC return exporter_name otlp_protocol = otlp_protocol.strip() if exporter_name == _EXPORTER_OTLP: if otlp_protocol not in _EXPORTER_BY_OTLP_PROTOCOL: # Invalid value was set by the env var raise RuntimeError( f"Unsupported OTLP protocol '{otlp_protocol}' is configured" ) return _EXPORTER_BY_OTLP_PROTOCOL[otlp_protocol] # grpc/http already specified by exporter_name, only add a warning in case # of a conflict. exporter_name_by_env = _EXPORTER_BY_OTLP_PROTOCOL.get(otlp_protocol) if exporter_name_by_env and exporter_name != exporter_name_by_env: _logger.warning( "Conflicting values for %s OTLP exporter protocol, using '%s'", signal_type, exporter_name, ) return exporter_name def _get_exporter_names( signal_type: Literal["traces", "metrics", "logs"], ) -> list[str]: names = environ.get(_EXPORTER_ENV_BY_SIGNAL_TYPE.get(signal_type, "")) if not names or names.lower().strip() == "none": return [] return [ _get_exporter_entry_point(_exporter.strip(), signal_type) for _exporter in names.split(",") ] def _init_tracing( exporters: dict[str, Type[SpanExporter]], id_generator: IdGenerator | None = None, sampler: Sampler | None = None, resource: Resource | None = None, exporter_args_map: ExporterArgsMap | None = None, ): provider = TracerProvider( id_generator=id_generator, sampler=sampler, resource=resource, ) set_tracer_provider(provider) exporter_args_map = exporter_args_map or {} for _, exporter_class in exporters.items(): exporter_args = exporter_args_map.get(exporter_class, {}) provider.add_span_processor( BatchSpanProcessor(exporter_class(**exporter_args)) ) def _init_metrics( exporters_or_readers: dict[ str, Union[Type[MetricExporter], Type[MetricReader]] ], resource: Resource | None = None, exporter_args_map: ExporterArgsMap | None = None, ): metric_readers = [] exporter_args_map = exporter_args_map or {} for _, exporter_or_reader_class in exporters_or_readers.items(): exporter_args = exporter_args_map.get(exporter_or_reader_class, {}) if issubclass(exporter_or_reader_class, MetricReader): metric_readers.append(exporter_or_reader_class(**exporter_args)) else: metric_readers.append( PeriodicExportingMetricReader( exporter_or_reader_class(**exporter_args) ) ) provider = MeterProvider(resource=resource, metric_readers=metric_readers) set_meter_provider(provider) def _init_logging( exporters: dict[str, Type[LogRecordExporter]], resource: Resource | None = None, setup_logging_handler: bool = True, exporter_args_map: ExporterArgsMap | None = None, ): provider = LoggerProvider(resource=resource) set_logger_provider(provider) exporter_args_map = exporter_args_map or {} for _, exporter_class in exporters.items(): exporter_args = exporter_args_map.get(exporter_class, {}) provider.add_log_record_processor( BatchLogRecordProcessor(exporter_class(**exporter_args)) ) # silence warnings from internal users until we drop the deprecated Events API with warnings.catch_warnings(): warnings.simplefilter("ignore", category=DeprecationWarning) # pylint: disable=import-outside-toplevel from opentelemetry._events import ( # noqa: PLC0415 set_event_logger_provider, ) from opentelemetry.sdk._events import ( # noqa: PLC0415 EventLoggerProvider, ) event_logger_provider = EventLoggerProvider(logger_provider=provider) set_event_logger_provider(event_logger_provider) if setup_logging_handler: # Add OTel handler handler = LoggingHandler( level=logging.NOTSET, logger_provider=provider ) logging.getLogger().addHandler(handler) _overwrite_logging_config_fns(handler) def _overwrite_logging_config_fns(handler: LoggingHandler) -> None: root = logging.getLogger() def wrapper(config_fn: Callable) -> Callable: def overwritten_config_fn(*args, **kwargs): removed_handler = False # We don't want the OTLP handler to be modified or deleted by the logging config functions. # So we remove it and then add it back after the function call. if handler in root.handlers: removed_handler = True root.handlers.remove(handler) try: config_fn(*args, **kwargs) finally: # Ensure handler is added back if logging function throws exception. if removed_handler: root.addHandler(handler) return overwritten_config_fn logging.config.fileConfig = wrapper(logging.config.fileConfig) logging.config.dictConfig = wrapper(logging.config.dictConfig) logging.basicConfig = wrapper(logging.basicConfig) def _import_exporters( trace_exporter_names: Sequence[str], metric_exporter_names: Sequence[str], log_exporter_names: Sequence[str], ) -> tuple[ dict[str, Type[SpanExporter]], dict[str, Union[Type[MetricExporter], Type[MetricReader]]], dict[str, Type[LogRecordExporter]], ]: trace_exporters = {} metric_exporters = {} log_exporters = {} for ( exporter_name, exporter_impl, ) in _import_config_components( trace_exporter_names, "opentelemetry_traces_exporter" ): if issubclass(exporter_impl, SpanExporter): trace_exporters[exporter_name] = exporter_impl else: raise RuntimeError(f"{exporter_name} is not a trace exporter") for ( exporter_name, exporter_impl, ) in _import_config_components( metric_exporter_names, "opentelemetry_metrics_exporter" ): # The metric exporter components may be push MetricExporter or pull exporters which # subclass MetricReader directly if issubclass(exporter_impl, (MetricExporter, MetricReader)): metric_exporters[exporter_name] = exporter_impl else: raise RuntimeError(f"{exporter_name} is not a metric exporter") for ( exporter_name, exporter_impl, ) in _import_config_components( log_exporter_names, "opentelemetry_logs_exporter" ): if issubclass(exporter_impl, LogRecordExporter): log_exporters[exporter_name] = exporter_impl else: raise RuntimeError(f"{exporter_name} is not a log exporter") return trace_exporters, metric_exporters, log_exporters def _import_sampler_factory( sampler_name: str, ) -> Callable[[float | str | None], Sampler]: _, sampler_impl = _import_config_components( [sampler_name.strip()], _OTEL_SAMPLER_ENTRY_POINT_GROUP )[0] return sampler_impl def _import_sampler(sampler_name: str | None) -> Sampler | None: if not sampler_name: return None try: sampler_factory = _import_sampler_factory(sampler_name) arg = None if sampler_name in ("traceidratio", "parentbased_traceidratio"): try: rate = float(os.getenv(OTEL_TRACES_SAMPLER_ARG, "")) except (ValueError, TypeError): _logger.warning( "Could not convert TRACES_SAMPLER_ARG to float. Using default value 1.0." ) rate = 1.0 arg = rate else: arg = os.getenv(OTEL_TRACES_SAMPLER_ARG) sampler = sampler_factory(arg) if not isinstance(sampler, Sampler): message = f"Sampler factory, {sampler_factory}, produced output, {sampler}, which is not a Sampler." _logger.warning(message) raise ValueError(message) return sampler except Exception as exc: # pylint: disable=broad-exception-caught _logger.warning( "Using default sampler. Failed to initialize sampler, %s: %s", sampler_name, exc, ) return None def _import_id_generator(id_generator_name: str) -> IdGenerator: id_generator_name, id_generator_impl = _import_config_components( [id_generator_name.strip()], "opentelemetry_id_generator" )[0] if issubclass(id_generator_impl, IdGenerator): return id_generator_impl() raise RuntimeError(f"{id_generator_name} is not an IdGenerator") def _initialize_components( auto_instrumentation_version: str | None = None, trace_exporter_names: list[str] | None = None, metric_exporter_names: list[str] | None = None, log_exporter_names: list[str] | None = None, sampler: Sampler | None = None, resource_attributes: Attributes | None = None, id_generator: IdGenerator | None = None, setup_logging_handler: bool | None = None, exporter_args_map: ExporterArgsMap | None = None, ): if trace_exporter_names is None: trace_exporter_names = [] if metric_exporter_names is None: metric_exporter_names = [] if log_exporter_names is None: log_exporter_names = [] span_exporters, metric_exporters, log_exporters = _import_exporters( trace_exporter_names + _get_exporter_names("traces"), metric_exporter_names + _get_exporter_names("metrics"), log_exporter_names + _get_exporter_names("logs"), ) if sampler is None: sampler_name = _get_sampler() sampler = _import_sampler(sampler_name) if id_generator is None: id_generator_name = _get_id_generator() id_generator = _import_id_generator(id_generator_name) if resource_attributes is None: resource_attributes = {} # populate version if using auto-instrumentation if auto_instrumentation_version: resource_attributes[ResourceAttributes.TELEMETRY_AUTO_VERSION] = ( # type: ignore[reportIndexIssue] auto_instrumentation_version ) # if env var OTEL_RESOURCE_ATTRIBUTES is given, it will read the service_name # from the env variable else defaults to "unknown_service" resource = Resource.create(resource_attributes) _init_tracing( exporters=span_exporters, id_generator=id_generator, sampler=sampler, resource=resource, exporter_args_map=exporter_args_map, ) _init_metrics( metric_exporters, resource, exporter_args_map=exporter_args_map ) if setup_logging_handler is None: setup_logging_handler = ( os.getenv( _OTEL_PYTHON_LOGGING_AUTO_INSTRUMENTATION_ENABLED, "false" ) .strip() .lower() == "true" ) _init_logging( log_exporters, resource, setup_logging_handler, exporter_args_map=exporter_args_map, ) class _BaseConfigurator(ABC): """An ABC for configurators Configurators are used to configure SDKs (i.e. TracerProvider, MeterProvider, Processors...) to reduce the amount of manual configuration required. """ _instance = None _is_instrumented = False def __new__(cls, *args, **kwargs): if cls._instance is None: cls._instance = object.__new__(cls, *args, **kwargs) return cls._instance @abstractmethod def _configure(self, **kwargs): """Configure the SDK""" def configure(self, **kwargs): """Configure the SDK""" self._configure(**kwargs) class _OTelSDKConfigurator(_BaseConfigurator): """A basic Configurator by OTel Python for initializing OTel SDK components Initializes several crucial OTel SDK components (i.e. TracerProvider, MeterProvider, Processors...) according to a default implementation. Other Configurators can subclass and slightly alter this initialization. NOTE: This class should not be instantiated nor should it become an entry point on the `opentelemetry-sdk` package. Instead, distros should subclass this Configurator and enhance it as needed. """ def _configure(self, **kwargs): _initialize_components(**kwargs) python-opentelemetry-1.39.1/opentelemetry-sdk/src/opentelemetry/sdk/_events/000077500000000000000000000000001511654350100274035ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-sdk/src/opentelemetry/sdk/_events/__init__.py000066400000000000000000000070041511654350100315150ustar00rootroot00000000000000# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging from time import time_ns from typing import Optional from typing_extensions import deprecated from opentelemetry import trace from opentelemetry._events import Event from opentelemetry._events import EventLogger as APIEventLogger from opentelemetry._events import EventLoggerProvider as APIEventLoggerProvider from opentelemetry._logs import ( LogRecord, NoOpLogger, SeverityNumber, get_logger_provider, ) from opentelemetry.sdk._logs import Logger, LoggerProvider from opentelemetry.util.types import _ExtendedAttributes _logger = logging.getLogger(__name__) @deprecated( "You should use `Logger` instead. " "Deprecated since version 1.39.0 and will be removed in a future release." ) class EventLogger(APIEventLogger): def __init__( self, logger_provider: LoggerProvider, name: str, version: Optional[str] = None, schema_url: Optional[str] = None, attributes: Optional[_ExtendedAttributes] = None, ): super().__init__( name=name, version=version, schema_url=schema_url, attributes=attributes, ) self._logger: Logger = logger_provider.get_logger( name, version, schema_url, attributes ) def emit(self, event: Event) -> None: if isinstance(self._logger, NoOpLogger): # Do nothing if SDK is disabled return span_context = trace.get_current_span().get_span_context() log_record = LogRecord( timestamp=event.timestamp or time_ns(), observed_timestamp=None, trace_id=event.trace_id or span_context.trace_id, span_id=event.span_id or span_context.span_id, trace_flags=event.trace_flags or span_context.trace_flags, severity_text=None, severity_number=event.severity_number or SeverityNumber.INFO, body=event.body, attributes=event.attributes, ) self._logger.emit(log_record) @deprecated( "You should use `LoggerProvider` instead. " "Deprecated since version 1.39.0 and will be removed in a future release." ) class EventLoggerProvider(APIEventLoggerProvider): def __init__(self, logger_provider: Optional[LoggerProvider] = None): self._logger_provider = logger_provider or get_logger_provider() def get_event_logger( self, name: str, version: Optional[str] = None, schema_url: Optional[str] = None, attributes: Optional[_ExtendedAttributes] = None, ) -> EventLogger: if not name: _logger.warning("EventLogger created with invalid name: %s", name) return EventLogger( self._logger_provider, name, version, schema_url, attributes ) def shutdown(self): self._logger_provider.shutdown() def force_flush(self, timeout_millis: int = 30000) -> bool: self._logger_provider.force_flush(timeout_millis) python-opentelemetry-1.39.1/opentelemetry-sdk/src/opentelemetry/sdk/_logs/000077500000000000000000000000001511654350100270435ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-sdk/src/opentelemetry/sdk/_logs/__init__.py000066400000000000000000000021541511654350100311560ustar00rootroot00000000000000# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from opentelemetry.sdk._logs._internal import ( LogDroppedAttributesWarning, Logger, LoggerProvider, LoggingHandler, LogLimits, LogRecordDroppedAttributesWarning, LogRecordLimits, LogRecordProcessor, ReadableLogRecord, ReadWriteLogRecord, ) __all__ = [ "Logger", "LoggerProvider", "LoggingHandler", "LogLimits", "LogRecordLimits", "LogRecordProcessor", "LogDroppedAttributesWarning", "LogRecordDroppedAttributesWarning", "ReadableLogRecord", "ReadWriteLogRecord", ] python-opentelemetry-1.39.1/opentelemetry-sdk/src/opentelemetry/sdk/_logs/_internal/000077500000000000000000000000001511654350100310165ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-sdk/src/opentelemetry/sdk/_logs/_internal/__init__.py000066400000000000000000000712651511654350100331420ustar00rootroot00000000000000# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import annotations import abc import atexit import base64 import concurrent.futures import json import logging import threading import traceback import warnings from dataclasses import dataclass, field from os import environ from threading import Lock from time import time_ns from typing import Any, Callable, Tuple, Union, cast, overload # noqa from typing_extensions import deprecated from opentelemetry._logs import Logger as APILogger from opentelemetry._logs import LoggerProvider as APILoggerProvider from opentelemetry._logs import ( LogRecord, NoOpLogger, SeverityNumber, get_logger, get_logger_provider, ) from opentelemetry.attributes import _VALID_ANY_VALUE_TYPES, BoundedAttributes from opentelemetry.context import get_current from opentelemetry.context.context import Context from opentelemetry.sdk.environment_variables import ( OTEL_ATTRIBUTE_COUNT_LIMIT, OTEL_ATTRIBUTE_VALUE_LENGTH_LIMIT, OTEL_SDK_DISABLED, ) from opentelemetry.sdk.resources import Resource from opentelemetry.sdk.util import ns_to_iso_str from opentelemetry.sdk.util.instrumentation import InstrumentationScope from opentelemetry.semconv._incubating.attributes import code_attributes from opentelemetry.semconv.attributes import exception_attributes from opentelemetry.trace import ( format_span_id, format_trace_id, ) from opentelemetry.util.types import AnyValue, _ExtendedAttributes _DEFAULT_OTEL_ATTRIBUTE_COUNT_LIMIT = 128 _ENV_VALUE_UNSET = "" class BytesEncoder(json.JSONEncoder): def default(self, o): if isinstance(o, bytes): return base64.b64encode(o).decode() return super().default(o) class LogRecordDroppedAttributesWarning(UserWarning): """Custom warning to indicate dropped log attributes due to limits. This class is used to filter and handle these specific warnings separately from other warnings, ensuring that they are only shown once without interfering with default user warnings. """ warnings.simplefilter("once", LogRecordDroppedAttributesWarning) @deprecated( "Use LogRecordDroppedAttributesWarning. Since logs are not stable yet this WILL be removed in future releases." ) class LogDroppedAttributesWarning(LogRecordDroppedAttributesWarning): pass class LogRecordLimits: """This class is based on a SpanLimits class in the Tracing module. This class represents the limits that should be enforced on recorded data such as events, links, attributes etc. This class does not enforce any limits itself. It only provides a way to read limits from env, default values and from user provided arguments. All limit arguments must be either a non-negative integer or ``None``. - All limit arguments are optional. - If a limit argument is not set, the class will try to read its value from the corresponding environment variable. - If the environment variable is not set, the default value, if any, will be used. Limit precedence: - If a model specific limit is set, it will be used. - Else if the corresponding global limit is set, it will be used. - Else if the model specific limit has a default value, the default value will be used. - Else if the global limit has a default value, the default value will be used. Args: max_attributes: Maximum number of attributes that can be added to a span, event, and link. Environment variable: ``OTEL_ATTRIBUTE_COUNT_LIMIT`` Default: {_DEFAULT_OTEL_ATTRIBUTE_COUNT_LIMIT} max_attribute_length: Maximum length an attribute value can have. Values longer than the specified length will be truncated. """ def __init__( self, max_attributes: int | None = None, max_attribute_length: int | None = None, ): # attribute count global_max_attributes = self._from_env_if_absent( max_attributes, OTEL_ATTRIBUTE_COUNT_LIMIT ) self.max_attributes = ( global_max_attributes if global_max_attributes is not None else _DEFAULT_OTEL_ATTRIBUTE_COUNT_LIMIT ) # attribute length self.max_attribute_length = self._from_env_if_absent( max_attribute_length, OTEL_ATTRIBUTE_VALUE_LENGTH_LIMIT, ) def __repr__(self): return f"{type(self).__name__}(max_attributes={self.max_attributes}, max_attribute_length={self.max_attribute_length})" @classmethod def _from_env_if_absent( cls, value: int | None, env_var: str, default: int | None = None ) -> int | None: err_msg = "{} must be a non-negative integer but got {}" # if no value is provided for the limit, try to load it from env if value is None: # return default value if env var is not set if env_var not in environ: return default str_value = environ.get(env_var, "").strip().lower() if str_value == _ENV_VALUE_UNSET: return None try: value = int(str_value) except ValueError: raise ValueError(err_msg.format(env_var, str_value)) if value < 0: raise ValueError(err_msg.format(env_var, value)) return value @deprecated( "Use LogRecordLimits. Since logs are not stable yet this WILL be removed in future releases." ) class LogLimits(LogRecordLimits): pass @dataclass(frozen=True) class ReadableLogRecord: """Readable LogRecord should be kept exactly in-sync with ReadWriteLogRecord, only difference is the frozen=True param.""" log_record: LogRecord resource: Resource instrumentation_scope: InstrumentationScope | None = None limits: LogRecordLimits | None = None @property def dropped_attributes(self) -> int: if isinstance(self.log_record.attributes, BoundedAttributes): return self.log_record.attributes.dropped return 0 def to_json(self, indent: int | None = 4) -> str: return json.dumps( { "body": self.log_record.body, "severity_number": self.log_record.severity_number.value if self.log_record.severity_number is not None else None, "severity_text": self.log_record.severity_text, "attributes": ( dict(self.log_record.attributes) if bool(self.log_record.attributes) else None ), "dropped_attributes": self.dropped_attributes, "timestamp": ns_to_iso_str(self.log_record.timestamp) if self.log_record.timestamp is not None else None, "observed_timestamp": ns_to_iso_str( self.log_record.observed_timestamp ), "trace_id": ( f"0x{format_trace_id(self.log_record.trace_id)}" if self.log_record.trace_id is not None else "" ), "span_id": ( f"0x{format_span_id(self.log_record.span_id)}" if self.log_record.span_id is not None else "" ), "trace_flags": self.log_record.trace_flags, "resource": json.loads(self.resource.to_json()), "event_name": self.log_record.event_name if self.log_record.event_name else "", }, indent=indent, cls=BytesEncoder, ) @dataclass class ReadWriteLogRecord: """A ReadWriteLogRecord instance represents an event being logged. ReadWriteLogRecord instances are created and emitted via `Logger` every time something is logged. They contain all the information pertinent to the event being logged. """ log_record: LogRecord resource: Resource | None = Resource.create({}) instrumentation_scope: InstrumentationScope | None = None limits: LogRecordLimits = field(default_factory=LogRecordLimits) def __post_init__(self): self.log_record.attributes = BoundedAttributes( maxlen=self.limits.max_attributes, attributes=self.log_record.attributes if self.log_record.attributes else None, immutable=False, max_value_len=self.limits.max_attribute_length, extended_attributes=True, ) if self.dropped_attributes > 0: warnings.warn( "Log record attributes were dropped due to limits", LogRecordDroppedAttributesWarning, stacklevel=2, ) def __eq__(self, other: object) -> bool: if not isinstance(other, ReadWriteLogRecord): return NotImplemented return self.__dict__ == other.__dict__ @property def dropped_attributes(self) -> int: if isinstance(self.log_record.attributes, BoundedAttributes): return self.log_record.attributes.dropped return 0 @classmethod def _from_api_log_record( cls, *, record: LogRecord, resource: Resource, instrumentation_scope: InstrumentationScope | None = None, ) -> ReadWriteLogRecord: return cls( log_record=record, resource=resource, instrumentation_scope=instrumentation_scope, ) class LogRecordProcessor(abc.ABC): """Interface to hook the log record emitting action. Log processors can be registered directly using :func:`LoggerProvider.add_log_record_processor` and they are invoked in the same order as they were registered. """ @abc.abstractmethod def on_emit(self, log_record: ReadWriteLogRecord): """Emits the `ReadWriteLogRecord`""" @abc.abstractmethod def shutdown(self): """Called when a :class:`opentelemetry.sdk._logs.Logger` is shutdown""" @abc.abstractmethod def force_flush(self, timeout_millis: int = 30000) -> bool: """Export all the received logs to the configured Exporter that have not yet been exported. Args: timeout_millis: The maximum amount of time to wait for logs to be exported. Returns: False if the timeout is exceeded, True otherwise. """ # Temporary fix until https://github.com/PyCQA/pylint/issues/4098 is resolved # pylint:disable=no-member class SynchronousMultiLogRecordProcessor(LogRecordProcessor): """Implementation of class:`LogRecordProcessor` that forwards all received events to a list of log processors sequentially. The underlying log processors are called in sequential order as they were added. """ def __init__(self): # use a tuple to avoid race conditions when adding a new log and # iterating through it on "emit". self._log_record_processors = () # type: Tuple[LogRecordProcessor, ...] self._lock = threading.Lock() def add_log_record_processor( self, log_record_processor: LogRecordProcessor ) -> None: """Adds a Logprocessor to the list of log processors handled by this instance""" with self._lock: self._log_record_processors += (log_record_processor,) def on_emit(self, log_record: ReadWriteLogRecord) -> None: for lp in self._log_record_processors: lp.on_emit(log_record) def shutdown(self) -> None: """Shutdown the log processors one by one""" for lp in self._log_record_processors: lp.shutdown() def force_flush(self, timeout_millis: int = 30000) -> bool: """Force flush the log processors one by one Args: timeout_millis: The maximum amount of time to wait for logs to be exported. If the first n log processors exceeded the timeout then remaining log processors will not be flushed. Returns: True if all the log processors flushes the logs within timeout, False otherwise. """ deadline_ns = time_ns() + timeout_millis * 1000000 for lp in self._log_record_processors: current_ts = time_ns() if current_ts >= deadline_ns: return False if not lp.force_flush((deadline_ns - current_ts) // 1000000): return False return True class ConcurrentMultiLogRecordProcessor(LogRecordProcessor): """Implementation of :class:`LogRecordProcessor` that forwards all received events to a list of log processors in parallel. Calls to the underlying log processors are forwarded in parallel by submitting them to a thread pool executor and waiting until each log processor finished its work. Args: max_workers: The number of threads managed by the thread pool executor and thus defining how many log processors can work in parallel. """ def __init__(self, max_workers: int = 2): # use a tuple to avoid race conditions when adding a new log and # iterating through it on "emit". self._log_record_processors = () # type: Tuple[LogRecordProcessor, ...] self._lock = threading.Lock() self._executor = concurrent.futures.ThreadPoolExecutor( max_workers=max_workers ) def add_log_record_processor( self, log_record_processor: LogRecordProcessor ): with self._lock: self._log_record_processors += (log_record_processor,) def _submit_and_wait( self, func: Callable[[LogRecordProcessor], Callable[..., None]], *args: Any, **kwargs: Any, ): futures = [] for lp in self._log_record_processors: future = self._executor.submit(func(lp), *args, **kwargs) futures.append(future) for future in futures: future.result() def on_emit(self, log_record: ReadWriteLogRecord): self._submit_and_wait(lambda lp: lp.on_emit, log_record) def shutdown(self): self._submit_and_wait(lambda lp: lp.shutdown) def force_flush(self, timeout_millis: int = 30000) -> bool: """Force flush the log processors in parallel. Args: timeout_millis: The maximum amount of time to wait for logs to be exported. Returns: True if all the log processors flushes the logs within timeout, False otherwise. """ futures = [] for lp in self._log_record_processors: future = self._executor.submit(lp.force_flush, timeout_millis) futures.append(future) done_futures, not_done_futures = concurrent.futures.wait( futures, timeout_millis / 1e3 ) if not_done_futures: return False for future in done_futures: if not future.result(): return False return True # skip natural LogRecord attributes # http://docs.python.org/library/logging.html#logrecord-attributes _RESERVED_ATTRS = frozenset( ( "asctime", "args", "created", "exc_info", "exc_text", "filename", "funcName", "getMessage", "message", "levelname", "levelno", "lineno", "module", "msecs", "msg", "name", "pathname", "process", "processName", "relativeCreated", "stack_info", "thread", "threadName", "taskName", ) ) class LoggingHandler(logging.Handler): """A handler class which writes logging records, in OTLP format, to a network destination or file. Supports signals from the `logging` module. https://docs.python.org/3/library/logging.html """ def __init__( self, level: int = logging.NOTSET, logger_provider: APILoggerProvider | None = None, ) -> None: super().__init__(level=level) self._logger_provider = logger_provider or get_logger_provider() @staticmethod def _get_attributes(record: logging.LogRecord) -> _ExtendedAttributes: attributes = { k: v for k, v in vars(record).items() if k not in _RESERVED_ATTRS } # Add standard code attributes for logs. attributes[code_attributes.CODE_FILE_PATH] = record.pathname attributes[code_attributes.CODE_FUNCTION_NAME] = record.funcName attributes[code_attributes.CODE_LINE_NUMBER] = record.lineno if record.exc_info: exctype, value, tb = record.exc_info if exctype is not None: attributes[exception_attributes.EXCEPTION_TYPE] = ( exctype.__name__ ) if value is not None and value.args: attributes[exception_attributes.EXCEPTION_MESSAGE] = str( value.args[0] ) if tb is not None: # https://opentelemetry.io/docs/specs/semconv/exceptions/exceptions-spans/#stacktrace-representation attributes[exception_attributes.EXCEPTION_STACKTRACE] = ( "".join(traceback.format_exception(*record.exc_info)) ) return attributes def _translate(self, record: logging.LogRecord) -> LogRecord: timestamp = int(record.created * 1e9) observered_timestamp = time_ns() attributes = self._get_attributes(record) severity_number = std_to_otel(record.levelno) if self.formatter: body = self.format(record) else: # `record.getMessage()` uses `record.msg` as a template to format # `record.args` into. There is a special case in `record.getMessage()` # where it will only attempt formatting if args are provided, # otherwise, it just stringifies `record.msg`. # # Since the OTLP body field has a type of 'any' and the logging module # is sometimes used in such a way that objects incorrectly end up # set as record.msg, in those cases we would like to bypass # `record.getMessage()` completely and set the body to the object # itself instead of its string representation. # For more background, see: https://github.com/open-telemetry/opentelemetry-python/pull/4216 if not record.args and not isinstance(record.msg, str): # if record.msg is not a value we can export, cast it to string if not isinstance(record.msg, _VALID_ANY_VALUE_TYPES): body = str(record.msg) else: body = record.msg else: body = record.getMessage() # related to https://github.com/open-telemetry/opentelemetry-python/issues/3548 # Severity Text = WARN as defined in https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/logs/data-model.md#displaying-severity. level_name = ( "WARN" if record.levelname == "WARNING" else record.levelname ) return LogRecord( timestamp=timestamp, observed_timestamp=observered_timestamp, context=get_current() or None, severity_text=level_name, severity_number=severity_number, body=body, attributes=attributes, ) def emit(self, record: logging.LogRecord) -> None: """ Emit a record. Skip emitting if logger is NoOp. The record is translated to OTel format, and then sent across the pipeline. """ logger = get_logger(record.name, logger_provider=self._logger_provider) if not isinstance(logger, NoOpLogger): logger.emit(self._translate(record)) def flush(self) -> None: """ Flushes the logging output. Skip flushing if logging_provider has no force_flush method. """ if hasattr(self._logger_provider, "force_flush") and callable( self._logger_provider.force_flush # type: ignore[reportAttributeAccessIssue] ): # This is done in a separate thread to avoid a potential deadlock, for # details see https://github.com/open-telemetry/opentelemetry-python/pull/4636. thread = threading.Thread(target=self._logger_provider.force_flush) # type: ignore[reportAttributeAccessIssue] thread.start() class Logger(APILogger): def __init__( self, resource: Resource, multi_log_record_processor: Union[ SynchronousMultiLogRecordProcessor, ConcurrentMultiLogRecordProcessor, ], instrumentation_scope: InstrumentationScope, ): super().__init__( instrumentation_scope.name, instrumentation_scope.version, instrumentation_scope.schema_url, instrumentation_scope.attributes, ) self._resource = resource self._multi_log_record_processor = multi_log_record_processor self._instrumentation_scope = instrumentation_scope @property def resource(self): return self._resource # pylint: disable=arguments-differ def emit( self, record: LogRecord | None = None, *, timestamp: int | None = None, observed_timestamp: int | None = None, context: Context | None = None, severity_number: SeverityNumber | None = None, severity_text: str | None = None, body: AnyValue | None = None, attributes: _ExtendedAttributes | None = None, event_name: str | None = None, ) -> None: """Emits the :class:`ReadWriteLogRecord` by setting instrumentation scope and forwarding to the processor. """ # If a record is provided, use it directly if record is not None: if not isinstance(record, ReadWriteLogRecord): # pylint:disable=protected-access writable_record = ReadWriteLogRecord._from_api_log_record( record=record, resource=self._resource, instrumentation_scope=self._instrumentation_scope, ) else: writable_record = record else: # Create a record from individual parameters log_record = LogRecord( timestamp=timestamp, observed_timestamp=observed_timestamp, context=context, severity_number=severity_number, severity_text=severity_text, body=body, attributes=attributes, event_name=event_name, ) # pylint:disable=protected-access writable_record = ReadWriteLogRecord._from_api_log_record( record=log_record, resource=self._resource, instrumentation_scope=self._instrumentation_scope, ) self._multi_log_record_processor.on_emit(writable_record) class LoggerProvider(APILoggerProvider): def __init__( self, resource: Resource | None = None, shutdown_on_exit: bool = True, multi_log_record_processor: SynchronousMultiLogRecordProcessor | ConcurrentMultiLogRecordProcessor | None = None, ): if resource is None: self._resource = Resource.create({}) else: self._resource = resource self._multi_log_record_processor = ( multi_log_record_processor or SynchronousMultiLogRecordProcessor() ) disabled = environ.get(OTEL_SDK_DISABLED, "") self._disabled = disabled.lower().strip() == "true" self._at_exit_handler = None if shutdown_on_exit: self._at_exit_handler = atexit.register(self.shutdown) self._logger_cache = {} self._logger_cache_lock = Lock() @property def resource(self): return self._resource def _get_logger_no_cache( self, name: str, version: str | None = None, schema_url: str | None = None, attributes: _ExtendedAttributes | None = None, ) -> Logger: return Logger( self._resource, self._multi_log_record_processor, InstrumentationScope( name, version, schema_url, attributes, ), ) def _get_logger_cached( self, name: str, version: str | None = None, schema_url: str | None = None, ) -> Logger: with self._logger_cache_lock: key = (name, version, schema_url) if key in self._logger_cache: return self._logger_cache[key] self._logger_cache[key] = self._get_logger_no_cache( name, version, schema_url ) return self._logger_cache[key] def get_logger( self, name: str, version: str | None = None, schema_url: str | None = None, attributes: _ExtendedAttributes | None = None, ) -> APILogger: if self._disabled: return NoOpLogger( name, version=version, schema_url=schema_url, attributes=attributes, ) if attributes is None: return self._get_logger_cached(name, version, schema_url) return self._get_logger_no_cache(name, version, schema_url, attributes) def add_log_record_processor( self, log_record_processor: LogRecordProcessor ): """Registers a new :class:`LogRecordProcessor` for this `LoggerProvider` instance. The log processors are invoked in the same order they are registered. """ self._multi_log_record_processor.add_log_record_processor( log_record_processor ) def shutdown(self): """Shuts down the log processors.""" self._multi_log_record_processor.shutdown() if self._at_exit_handler is not None: atexit.unregister(self._at_exit_handler) self._at_exit_handler = None def force_flush(self, timeout_millis: int = 30000) -> bool: """Force flush the log processors. Args: timeout_millis: The maximum amount of time to wait for logs to be exported. Returns: True if all the log processors flushes the logs within timeout, False otherwise. """ return self._multi_log_record_processor.force_flush(timeout_millis) _STD_TO_OTEL = { 10: SeverityNumber.DEBUG, 11: SeverityNumber.DEBUG2, 12: SeverityNumber.DEBUG3, 13: SeverityNumber.DEBUG4, 14: SeverityNumber.DEBUG4, 15: SeverityNumber.DEBUG4, 16: SeverityNumber.DEBUG4, 17: SeverityNumber.DEBUG4, 18: SeverityNumber.DEBUG4, 19: SeverityNumber.DEBUG4, 20: SeverityNumber.INFO, 21: SeverityNumber.INFO2, 22: SeverityNumber.INFO3, 23: SeverityNumber.INFO4, 24: SeverityNumber.INFO4, 25: SeverityNumber.INFO4, 26: SeverityNumber.INFO4, 27: SeverityNumber.INFO4, 28: SeverityNumber.INFO4, 29: SeverityNumber.INFO4, 30: SeverityNumber.WARN, 31: SeverityNumber.WARN2, 32: SeverityNumber.WARN3, 33: SeverityNumber.WARN4, 34: SeverityNumber.WARN4, 35: SeverityNumber.WARN4, 36: SeverityNumber.WARN4, 37: SeverityNumber.WARN4, 38: SeverityNumber.WARN4, 39: SeverityNumber.WARN4, 40: SeverityNumber.ERROR, 41: SeverityNumber.ERROR2, 42: SeverityNumber.ERROR3, 43: SeverityNumber.ERROR4, 44: SeverityNumber.ERROR4, 45: SeverityNumber.ERROR4, 46: SeverityNumber.ERROR4, 47: SeverityNumber.ERROR4, 48: SeverityNumber.ERROR4, 49: SeverityNumber.ERROR4, 50: SeverityNumber.FATAL, 51: SeverityNumber.FATAL2, 52: SeverityNumber.FATAL3, 53: SeverityNumber.FATAL4, } def std_to_otel(levelno: int) -> SeverityNumber: """ Map python log levelno as defined in https://docs.python.org/3/library/logging.html#logging-levels to OTel log severity number as defined here: https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/logs/data-model.md#field-severitynumber """ if levelno < 10: return SeverityNumber.UNSPECIFIED if levelno > 53: return SeverityNumber.FATAL4 return _STD_TO_OTEL[levelno] python-opentelemetry-1.39.1/opentelemetry-sdk/src/opentelemetry/sdk/_logs/_internal/export/000077500000000000000000000000001511654350100323375ustar00rootroot00000000000000__init__.py000066400000000000000000000253001511654350100343710ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-sdk/src/opentelemetry/sdk/_logs/_internal/export# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import annotations import abc import enum import logging import sys from os import environ, linesep from typing import IO, Callable, Optional, Sequence from typing_extensions import deprecated from opentelemetry.context import ( _SUPPRESS_INSTRUMENTATION_KEY, attach, detach, set_value, ) from opentelemetry.sdk._logs import ( LogRecordProcessor, ReadableLogRecord, ReadWriteLogRecord, ) from opentelemetry.sdk._shared_internal import BatchProcessor, DuplicateFilter from opentelemetry.sdk.environment_variables import ( OTEL_BLRP_EXPORT_TIMEOUT, OTEL_BLRP_MAX_EXPORT_BATCH_SIZE, OTEL_BLRP_MAX_QUEUE_SIZE, OTEL_BLRP_SCHEDULE_DELAY, ) from opentelemetry.sdk.resources import Resource _DEFAULT_SCHEDULE_DELAY_MILLIS = 5000 _DEFAULT_MAX_EXPORT_BATCH_SIZE = 512 _DEFAULT_EXPORT_TIMEOUT_MILLIS = 30000 _DEFAULT_MAX_QUEUE_SIZE = 2048 _ENV_VAR_INT_VALUE_ERROR_MESSAGE = ( "Unable to parse value for %s as integer. Defaulting to %s." ) _logger = logging.getLogger(__name__) _logger.addFilter(DuplicateFilter()) class LogRecordExportResult(enum.Enum): SUCCESS = 0 FAILURE = 1 @deprecated( "Use LogRecordExportResult. Since logs are not stable yet this WILL be removed in future releases." ) class LogExportResult(enum.Enum): SUCCESS = 0 FAILURE = 1 class LogRecordExporter(abc.ABC): """Interface for exporting logs. Interface to be implemented by services that want to export logs received in their own format. To export data this MUST be registered to the :class`opentelemetry.sdk._logs.Logger` using a log processor. """ @abc.abstractmethod def export( self, batch: Sequence[ReadableLogRecord] ) -> LogRecordExportResult: """Exports a batch of logs. Args: batch: The list of `ReadableLogRecord` objects to be exported Returns: The result of the export """ @abc.abstractmethod def shutdown(self): """Shuts down the exporter. Called when the SDK is shut down. """ @deprecated( "Use LogRecordExporter. Since logs are not stable yet this WILL be removed in future releases." ) class LogExporter(LogRecordExporter): pass class ConsoleLogRecordExporter(LogRecordExporter): """Implementation of :class:`LogRecordExporter` that prints log records to the console. This class can be used for diagnostic purposes. It prints the exported log records to the console STDOUT. """ def __init__( self, out: IO = sys.stdout, formatter: Callable[ [ReadableLogRecord], str ] = lambda record: record.to_json() + linesep, ): self.out = out self.formatter = formatter def export(self, batch: Sequence[ReadableLogRecord]): for log_record in batch: self.out.write(self.formatter(log_record)) self.out.flush() return LogRecordExportResult.SUCCESS def shutdown(self): pass @deprecated( "Use ConsoleLogRecordExporter. Since logs are not stable yet this WILL be removed in future releases." ) class ConsoleLogExporter(ConsoleLogRecordExporter): pass class SimpleLogRecordProcessor(LogRecordProcessor): """This is an implementation of LogRecordProcessor which passes received logs directly to the configured LogRecordExporter, as soon as they are emitted. """ def __init__(self, exporter: LogRecordExporter): self._exporter = exporter self._shutdown = False def on_emit(self, log_record: ReadWriteLogRecord): if self._shutdown: _logger.warning("Processor is already shutdown, ignoring call") return token = attach(set_value(_SUPPRESS_INSTRUMENTATION_KEY, True)) try: # Convert ReadWriteLogRecord to ReadableLogRecord before exporting # Note: resource should not be None at this point as it's set during Logger.emit() resource = ( log_record.resource if log_record.resource is not None else Resource.create({}) ) readable_log_record = ReadableLogRecord( log_record=log_record.log_record, resource=resource, instrumentation_scope=log_record.instrumentation_scope, limits=log_record.limits, ) self._exporter.export((readable_log_record,)) except Exception: # pylint: disable=broad-exception-caught _logger.exception("Exception while exporting logs.") detach(token) def shutdown(self): self._shutdown = True self._exporter.shutdown() def force_flush(self, timeout_millis: int = 30000) -> bool: # pylint: disable=no-self-use return True class BatchLogRecordProcessor(LogRecordProcessor): """This is an implementation of LogRecordProcessor which creates batches of received logs and sends them to the configured LogRecordExporter. `BatchLogRecordProcessor` is configurable with the following environment variables which correspond to constructor parameters: - :envvar:`OTEL_BLRP_SCHEDULE_DELAY` - :envvar:`OTEL_BLRP_MAX_QUEUE_SIZE` - :envvar:`OTEL_BLRP_MAX_EXPORT_BATCH_SIZE` - :envvar:`OTEL_BLRP_EXPORT_TIMEOUT` All the logic for emitting logs, shutting down etc. resides in the BatchProcessor class. """ def __init__( self, exporter: LogRecordExporter, schedule_delay_millis: float | None = None, max_export_batch_size: int | None = None, export_timeout_millis: float | None = None, max_queue_size: int | None = None, ): if max_queue_size is None: max_queue_size = BatchLogRecordProcessor._default_max_queue_size() if schedule_delay_millis is None: schedule_delay_millis = ( BatchLogRecordProcessor._default_schedule_delay_millis() ) if max_export_batch_size is None: max_export_batch_size = ( BatchLogRecordProcessor._default_max_export_batch_size() ) # Not used. No way currently to pass timeout to export. if export_timeout_millis is None: export_timeout_millis = ( BatchLogRecordProcessor._default_export_timeout_millis() ) BatchLogRecordProcessor._validate_arguments( max_queue_size, schedule_delay_millis, max_export_batch_size ) # Initializes BatchProcessor self._batch_processor = BatchProcessor( exporter, schedule_delay_millis, max_export_batch_size, export_timeout_millis, max_queue_size, "Log", ) def on_emit(self, log_record: ReadWriteLogRecord) -> None: # Convert ReadWriteLogRecord to ReadableLogRecord before passing to BatchProcessor # Note: resource should not be None at this point as it's set during Logger.emit() resource = ( log_record.resource if log_record.resource is not None else Resource.create({}) ) readable_log_record = ReadableLogRecord( log_record=log_record.log_record, resource=resource, instrumentation_scope=log_record.instrumentation_scope, limits=log_record.limits, ) return self._batch_processor.emit(readable_log_record) def shutdown(self): return self._batch_processor.shutdown() def force_flush(self, timeout_millis: Optional[int] = None) -> bool: return self._batch_processor.force_flush(timeout_millis) @staticmethod def _default_max_queue_size(): try: return int( environ.get(OTEL_BLRP_MAX_QUEUE_SIZE, _DEFAULT_MAX_QUEUE_SIZE) ) except ValueError: _logger.exception( _ENV_VAR_INT_VALUE_ERROR_MESSAGE, OTEL_BLRP_MAX_QUEUE_SIZE, _DEFAULT_MAX_QUEUE_SIZE, ) return _DEFAULT_MAX_QUEUE_SIZE @staticmethod def _default_schedule_delay_millis(): try: return int( environ.get( OTEL_BLRP_SCHEDULE_DELAY, _DEFAULT_SCHEDULE_DELAY_MILLIS ) ) except ValueError: _logger.exception( _ENV_VAR_INT_VALUE_ERROR_MESSAGE, OTEL_BLRP_SCHEDULE_DELAY, _DEFAULT_SCHEDULE_DELAY_MILLIS, ) return _DEFAULT_SCHEDULE_DELAY_MILLIS @staticmethod def _default_max_export_batch_size(): try: return int( environ.get( OTEL_BLRP_MAX_EXPORT_BATCH_SIZE, _DEFAULT_MAX_EXPORT_BATCH_SIZE, ) ) except ValueError: _logger.exception( _ENV_VAR_INT_VALUE_ERROR_MESSAGE, OTEL_BLRP_MAX_EXPORT_BATCH_SIZE, _DEFAULT_MAX_EXPORT_BATCH_SIZE, ) return _DEFAULT_MAX_EXPORT_BATCH_SIZE @staticmethod def _default_export_timeout_millis(): try: return int( environ.get( OTEL_BLRP_EXPORT_TIMEOUT, _DEFAULT_EXPORT_TIMEOUT_MILLIS ) ) except ValueError: _logger.exception( _ENV_VAR_INT_VALUE_ERROR_MESSAGE, OTEL_BLRP_EXPORT_TIMEOUT, _DEFAULT_EXPORT_TIMEOUT_MILLIS, ) return _DEFAULT_EXPORT_TIMEOUT_MILLIS @staticmethod def _validate_arguments( max_queue_size, schedule_delay_millis, max_export_batch_size ): if max_queue_size <= 0: raise ValueError("max_queue_size must be a positive integer.") if schedule_delay_millis <= 0: raise ValueError("schedule_delay_millis must be positive.") if max_export_batch_size <= 0: raise ValueError( "max_export_batch_size must be a positive integer." ) if max_export_batch_size > max_queue_size: raise ValueError( "max_export_batch_size must be less than or equal to max_queue_size." ) in_memory_log_exporter.py000066400000000000000000000037221511654350100374250ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-sdk/src/opentelemetry/sdk/_logs/_internal/export# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import threading import typing from typing_extensions import deprecated from opentelemetry.sdk._logs import ReadableLogRecord from opentelemetry.sdk._logs.export import ( LogRecordExporter, LogRecordExportResult, ) class InMemoryLogRecordExporter(LogRecordExporter): """Implementation of :class:`.LogRecordExporter` that stores logs in memory. This class can be used for testing purposes. It stores the exported logs in a list in memory that can be retrieved using the :func:`.get_finished_logs` method. """ def __init__(self): self._logs = [] self._lock = threading.Lock() self._stopped = False def clear(self) -> None: with self._lock: self._logs.clear() def get_finished_logs(self) -> typing.Tuple[ReadableLogRecord, ...]: with self._lock: return tuple(self._logs) def export( self, batch: typing.Sequence[ReadableLogRecord] ) -> LogRecordExportResult: if self._stopped: return LogRecordExportResult.FAILURE with self._lock: self._logs.extend(batch) return LogRecordExportResult.SUCCESS def shutdown(self) -> None: self._stopped = True @deprecated( "Use InMemoryLogRecordExporter. Since logs are not stable yet this WILL be removed in future releases." ) class InMemoryLogExporter(InMemoryLogRecordExporter): pass python-opentelemetry-1.39.1/opentelemetry-sdk/src/opentelemetry/sdk/_logs/export/000077500000000000000000000000001511654350100303645ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-sdk/src/opentelemetry/sdk/_logs/export/__init__.py000066400000000000000000000025061511654350100325000ustar00rootroot00000000000000# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from opentelemetry.sdk._logs._internal.export import ( BatchLogRecordProcessor, ConsoleLogExporter, ConsoleLogRecordExporter, LogExporter, LogExportResult, LogRecordExporter, LogRecordExportResult, SimpleLogRecordProcessor, ) # The point module is not in the export directory to avoid a circular import. from opentelemetry.sdk._logs._internal.export.in_memory_log_exporter import ( InMemoryLogExporter, InMemoryLogRecordExporter, ) __all__ = [ "BatchLogRecordProcessor", "ConsoleLogExporter", "ConsoleLogRecordExporter", "LogExporter", "LogRecordExporter", "LogExportResult", "LogRecordExportResult", "SimpleLogRecordProcessor", "InMemoryLogExporter", "InMemoryLogRecordExporter", ] python-opentelemetry-1.39.1/opentelemetry-sdk/src/opentelemetry/sdk/_shared_internal/000077500000000000000000000000001511654350100312415ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-sdk/src/opentelemetry/sdk/_shared_internal/__init__.py000066400000000000000000000231421511654350100333540ustar00rootroot00000000000000# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import annotations import collections import enum import inspect import logging import os import threading import time import weakref from abc import abstractmethod from typing import ( Generic, Optional, Protocol, TypeVar, ) from opentelemetry.context import ( _SUPPRESS_INSTRUMENTATION_KEY, attach, detach, set_value, ) from opentelemetry.util._once import Once class DuplicateFilter(logging.Filter): """Filter that can be applied to internal `logger`'s. Currently applied to `logger`s on the export logs path that could otherwise cause endless logging of errors or a recursion depth exceeded issue in cases where logging itself results in an exception.""" def filter(self, record): current_log = ( record.module, record.levelno, record.msg, # We need to pick a time longer than the OTLP LogExporter timeout # which defaults to 10 seconds, but not pick something so long that # it filters out useful logs. time.time() // 20, ) if current_log != getattr(self, "last_log", None): self.last_log = current_log # pylint: disable=attribute-defined-outside-init return True # False means python's `logging` module will no longer process this log. return False class BatchExportStrategy(enum.Enum): EXPORT_ALL = 0 EXPORT_WHILE_BATCH_EXCEEDS_THRESHOLD = 1 EXPORT_AT_LEAST_ONE_BATCH = 2 Telemetry = TypeVar("Telemetry") class Exporter(Protocol[Telemetry]): @abstractmethod def export(self, batch: list[Telemetry], /): raise NotImplementedError @abstractmethod def shutdown(self): raise NotImplementedError class BatchProcessor(Generic[Telemetry]): """This class can be used with exporter's that implement the above Exporter interface to buffer and send telemetry in batch through the exporter.""" def __init__( self, exporter: Exporter[Telemetry], schedule_delay_millis: float, max_export_batch_size: int, export_timeout_millis: float, max_queue_size: int, exporting: str, ): self._bsp_reset_once = Once() self._exporter = exporter self._max_queue_size = max_queue_size self._schedule_delay_millis = schedule_delay_millis self._schedule_delay = schedule_delay_millis / 1e3 self._max_export_batch_size = max_export_batch_size # Not used. No way currently to pass timeout to export. # TODO(https://github.com/open-telemetry/opentelemetry-python/issues/4555): figure out what this should do. self._export_timeout_millis = export_timeout_millis # Deque is thread safe. self._queue = collections.deque([], max_queue_size) self._worker_thread = threading.Thread( name=f"OtelBatch{exporting}RecordProcessor", target=self.worker, daemon=True, ) self._logger = logging.getLogger(__name__) self._logger.addFilter(DuplicateFilter()) self._exporting = exporting self._shutdown = False self._shutdown_timeout_exceeded = False self._export_lock = threading.Lock() self._worker_awaken = threading.Event() self._worker_thread.start() if hasattr(os, "register_at_fork"): weak_reinit = weakref.WeakMethod(self._at_fork_reinit) os.register_at_fork(after_in_child=lambda: weak_reinit()()) # pyright: ignore[reportOptionalCall] pylint: disable=unnecessary-lambda self._pid = os.getpid() def _should_export_batch( self, batch_strategy: BatchExportStrategy, num_iterations: int ) -> bool: if not self._queue or self._shutdown_timeout_exceeded: return False # Always continue to export while queue length exceeds max batch size. if len(self._queue) >= self._max_export_batch_size: return True if batch_strategy is BatchExportStrategy.EXPORT_ALL: return True if batch_strategy is BatchExportStrategy.EXPORT_AT_LEAST_ONE_BATCH: return num_iterations == 0 return False def _at_fork_reinit(self): self._export_lock = threading.Lock() self._worker_awaken = threading.Event() self._queue.clear() self._worker_thread = threading.Thread( name=f"OtelBatch{self._exporting}RecordProcessor", target=self.worker, daemon=True, ) self._worker_thread.start() self._pid = os.getpid() def worker(self): while not self._shutdown: # Lots of strategies in the spec for setting next timeout. # https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/trace/sdk.md#batching-processor. # Shutdown will interrupt this sleep. Emit will interrupt this sleep only if the queue is bigger then threshold. sleep_interrupted = self._worker_awaken.wait(self._schedule_delay) if self._shutdown: break self._export( BatchExportStrategy.EXPORT_WHILE_BATCH_EXCEEDS_THRESHOLD if sleep_interrupted else BatchExportStrategy.EXPORT_AT_LEAST_ONE_BATCH ) self._worker_awaken.clear() self._export(BatchExportStrategy.EXPORT_ALL) def _export(self, batch_strategy: BatchExportStrategy) -> None: with self._export_lock: iteration = 0 # We could see concurrent export calls from worker and force_flush. We call _should_export_batch # once the lock is obtained to see if we still need to make the requested export. while self._should_export_batch(batch_strategy, iteration): iteration += 1 token = attach(set_value(_SUPPRESS_INSTRUMENTATION_KEY, True)) try: self._exporter.export( [ # Oldest records are at the back, so pop from there. self._queue.pop() for _ in range( min( self._max_export_batch_size, len(self._queue), ) ) ] ) except Exception: # pylint: disable=broad-exception-caught self._logger.exception( "Exception while exporting %s.", self._exporting ) detach(token) # Do not add any logging.log statements to this function, they can be being routed back to this `emit` function, # resulting in endless recursive calls that crash the program. # See https://github.com/open-telemetry/opentelemetry-python/issues/4261 def emit(self, data: Telemetry) -> None: if self._shutdown: return if self._pid != os.getpid(): self._bsp_reset_once.do_once(self._at_fork_reinit) # This will drop a log from the right side if the queue is at _max_queue_length. self._queue.appendleft(data) if len(self._queue) >= self._max_export_batch_size: self._worker_awaken.set() def shutdown(self, timeout_millis: int = 30000): if self._shutdown: return shutdown_should_end = time.time() + (timeout_millis / 1000) # Causes emit to reject telemetry and makes force_flush a no-op. self._shutdown = True # Interrupts sleep in the worker if it's sleeping. self._worker_awaken.set() self._worker_thread.join(timeout_millis / 1000) # Stops worker thread from calling export again if queue is still not empty. self._shutdown_timeout_exceeded = True # We want to shutdown immediately only if we already waited `timeout_secs`. # Otherwise we pass the remaining timeout to the exporter. # Some exporter's shutdown support a timeout param. if ( "timeout_millis" in inspect.getfullargspec(self._exporter.shutdown).args ): remaining_millis = (shutdown_should_end - time.time()) * 1000 self._exporter.shutdown(timeout_millis=max(0, remaining_millis)) # type: ignore else: self._exporter.shutdown() # Worker thread **should** be finished at this point, because we called shutdown on the exporter, # and set shutdown_is_occuring to prevent further export calls. It's possible that a single export # call is ongoing and the thread isn't finished. In this case we will return instead of waiting on # the thread to finish. # TODO: Fix force flush so the timeout is used https://github.com/open-telemetry/opentelemetry-python/issues/4568. def force_flush(self, timeout_millis: Optional[int] = None) -> bool: if self._shutdown: return False # Blocking call to export. self._export(BatchExportStrategy.EXPORT_ALL) return True python-opentelemetry-1.39.1/opentelemetry-sdk/src/opentelemetry/sdk/environment_variables/000077500000000000000000000000001511654350100323345ustar00rootroot00000000000000__init__.py000066400000000000000000000733351511654350100344010ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-sdk/src/opentelemetry/sdk/environment_variables# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. OTEL_SDK_DISABLED = "OTEL_SDK_DISABLED" """ .. envvar:: OTEL_SDK_DISABLED The :envvar:`OTEL_SDK_DISABLED` environment variable disables the SDK for all signals Default: "false" """ OTEL_RESOURCE_ATTRIBUTES = "OTEL_RESOURCE_ATTRIBUTES" """ .. envvar:: OTEL_RESOURCE_ATTRIBUTES The :envvar:`OTEL_RESOURCE_ATTRIBUTES` environment variable allows resource attributes to be passed to the SDK at process invocation. The attributes from :envvar:`OTEL_RESOURCE_ATTRIBUTES` are merged with those passed to `Resource.create`, meaning :envvar:`OTEL_RESOURCE_ATTRIBUTES` takes *lower* priority. Attributes should be in the format ``key1=value1,key2=value2``. Additional details are available `in the specification `__. .. code-block:: console $ OTEL_RESOURCE_ATTRIBUTES="service.name=shoppingcard,will_be_overridden=foo" python - <`__. """ OTEL_EXPORTER_OTLP_TIMEOUT = "OTEL_EXPORTER_OTLP_TIMEOUT" """ .. envvar:: OTEL_EXPORTER_OTLP_TIMEOUT The :envvar:`OTEL_EXPORTER_OTLP_TIMEOUT` is the maximum time the OTLP exporter will wait for each batch export. Default: 10 """ OTEL_EXPORTER_OTLP_ENDPOINT = "OTEL_EXPORTER_OTLP_ENDPOINT" """ .. envvar:: OTEL_EXPORTER_OTLP_ENDPOINT The :envvar:`OTEL_EXPORTER_OTLP_ENDPOINT` target to which the exporter is going to send spans or metrics. The endpoint MUST be a valid URL host, and MAY contain a scheme (http or https), port and path. A scheme of https indicates a secure connection and takes precedence over the insecure configuration setting. Default: "http://localhost:4317" """ OTEL_EXPORTER_OTLP_INSECURE = "OTEL_EXPORTER_OTLP_INSECURE" """ .. envvar:: OTEL_EXPORTER_OTLP_INSECURE The :envvar:`OTEL_EXPORTER_OTLP_INSECURE` represents whether to enable client transport security for gRPC requests. A scheme of https takes precedence over this configuration setting. Default: False """ OTEL_EXPORTER_OTLP_TRACES_INSECURE = "OTEL_EXPORTER_OTLP_TRACES_INSECURE" """ .. envvar:: OTEL_EXPORTER_OTLP_TRACES_INSECURE The :envvar:`OTEL_EXPORTER_OTLP_TRACES_INSECURE` represents whether to enable client transport security for gRPC requests for spans. A scheme of https takes precedence over the this configuration setting. Default: False """ OTEL_EXPORTER_OTLP_TRACES_ENDPOINT = "OTEL_EXPORTER_OTLP_TRACES_ENDPOINT" """ .. envvar:: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT The :envvar:`OTEL_EXPORTER_OTLP_TRACES_ENDPOINT` target to which the span exporter is going to send spans. The endpoint MUST be a valid URL host, and MAY contain a scheme (http or https), port and path. A scheme of https indicates a secure connection and takes precedence over this configuration setting. """ OTEL_EXPORTER_OTLP_METRICS_ENDPOINT = "OTEL_EXPORTER_OTLP_METRICS_ENDPOINT" """ .. envvar:: OTEL_EXPORTER_OTLP_METRICS_ENDPOINT The :envvar:`OTEL_EXPORTER_OTLP_METRICS_ENDPOINT` target to which the metrics exporter is going to send metrics. The endpoint MUST be a valid URL host, and MAY contain a scheme (http or https), port and path. A scheme of https indicates a secure connection and takes precedence over this configuration setting. """ OTEL_EXPORTER_OTLP_LOGS_ENDPOINT = "OTEL_EXPORTER_OTLP_LOGS_ENDPOINT" """ .. envvar:: OTEL_EXPORTER_OTLP_LOGS_ENDPOINT The :envvar:`OTEL_EXPORTER_OTLP_LOGS_ENDPOINT` target to which the log exporter is going to send logs. The endpoint MUST be a valid URL host, and MAY contain a scheme (http or https), port and path. A scheme of https indicates a secure connection and takes precedence over this configuration setting. """ _OTEL_PYTHON_EXPORTER_OTLP_GRPC_LOGS_CREDENTIAL_PROVIDER = ( "OTEL_PYTHON_EXPORTER_OTLP_GRPC_LOGS_CREDENTIAL_PROVIDER" ) """ .. envvar:: OTEL_PYTHON_EXPORTER_OTLP_GRPC_LOGS_CREDENTIAL_PROVIDER The :envvar:`OTEL_PYTHON_EXPORTER_OTLP_GRPC_LOGS_CREDENTIAL_PROVIDER` provides `grpc.ChannelCredentials` to the grpc OTLP Log exporter, Entry point providers should implement the following: .. code-block:: python import grpc # Add a reference to this function under the `opentelemetry_otlp_credential_provider` entry point. def channel_credential_provider() -> grpc.ChannelCredentials: Note: This environment variable is experimental and subject to change. """ _OTEL_PYTHON_EXPORTER_OTLP_HTTP_LOGS_CREDENTIAL_PROVIDER = ( "OTEL_PYTHON_EXPORTER_OTLP_HTTP_LOGS_CREDENTIAL_PROVIDER" ) """ .. envvar:: OTEL_PYTHON_EXPORTER_OTLP_HTTP_LOGS_CREDENTIAL_PROVIDER The :envvar:`OTEL_PYTHON_EXPORTER_OTLP_HTTP_LOGS_CREDENTIAL_PROVIDER` provides `requests.Session` for the HTTP OTLP Log exporter. Entry point providers should implement the following: .. code-block:: python import requests # Add a reference to this function under the `opentelemetry_otlp_credential_provider` entry point. def request_session_provder() -> requests.Session: Note: This environment variable is experimental and subject to change. """ _OTEL_PYTHON_EXPORTER_OTLP_HTTP_CREDENTIAL_PROVIDER = ( "OTEL_PYTHON_EXPORTER_OTLP_HTTP_CREDENTIAL_PROVIDER" ) """ .. envvar:: OTEL_PYTHON_EXPORTER_OTLP_HTTP_CREDENTIAL_PROVIDER The :envvar:`OTEL_PYTHON_EXPORTER_OTLP_HTTP_CREDENTIAL_PROVIDER` provides `requests.Session` for all HTTP OTLP exporters. Entry point providers should implement the following: .. code-block:: python import requests # Add a reference to this function under the `opentelemetry_otlp_credential_provider` entry point. def request_session_provder() -> requests.Session: Note: This environment variable is experimental and subject to change. """ _OTEL_PYTHON_EXPORTER_OTLP_GRPC_CREDENTIAL_PROVIDER = ( "OTEL_PYTHON_EXPORTER_OTLP_GRPC_CREDENTIAL_PROVIDER" ) """ .. envvar:: OTEL_PYTHON_EXPORTER_OTLP_GRPC_CREDENTIAL_PROVIDER The :envvar:`OTEL_PYTHON_EXPORTER_OTLP_GRPC_CREDENTIAL_PROVIDER` provides `grpc.ChannelCredentials` for all GRPC OTLP exporters. Entry point providers should implement the following: .. code-block:: python import grpc # Add a reference to this function under the `opentelemetry_otlp_credential_provider` entry point. def channel_credential_provider() -> grpc.ChannelCredentials: Note: This environment variable is experimental and subject to change. """ _OTEL_PYTHON_EXPORTER_OTLP_HTTP_TRACES_CREDENTIAL_PROVIDER = ( "OTEL_PYTHON_EXPORTER_OTLP_HTTP_TRACES_CREDENTIAL_PROVIDER" ) """ .. envvar:: OTEL_PYTHON_EXPORTER_OTLP_HTTP_TRACES_CREDENTIAL_PROVIDER The :envvar:`OTEL_PYTHON_EXPORTER_OTLP_HTTP_TRACES_CREDENTIAL_PROVIDER` provides `requests.Session` to the HTTP OTLP Span exporter. Entry point providers should implement the following: .. code-block:: python import requests # Add a reference to this function under the `opentelemetry_otlp_credential_provider` entry point. def request_session_provder() -> requests.Session: Note: This environment variable is experimental and subject to change. """ _OTEL_PYTHON_EXPORTER_OTLP_GRPC_TRACES_CREDENTIAL_PROVIDER = ( "OTEL_PYTHON_EXPORTER_OTLP_GRPC_TRACES_CREDENTIAL_PROVIDER" ) """ .. envvar:: OTEL_PYTHON_EXPORTER_OTLP_GRPC_TRACES_CREDENTIAL_PROVIDER The :envvar:`OTEL_PYTHON_EXPORTER_OTLP_GRPC_TRACES_CREDENTIAL_PROVIDER` provides `grpc.ChannelCredentials` to the GRPC OTLP Span exporter. Entry point providers should implement the following: .. code-block:: python import grpc # Add a reference to this function under the `opentelemetry_otlp_credential_provider` entry point. def channel_credential_provider() -> grpc.ChannelCredentials: Note: This environment variable is experimental and subject to change. """ _OTEL_PYTHON_EXPORTER_OTLP_HTTP_METRICS_CREDENTIAL_PROVIDER = ( "OTEL_PYTHON_EXPORTER_OTLP_HTTP_METRICS_CREDENTIAL_PROVIDER" ) """ .. envvar:: OTEL_PYTHON_EXPORTER_OTLP_HTTP_METRICS_CREDENTIAL_PROVIDER The :envvar:`OTEL_PYTHON_EXPORTER_OTLP_HTTP_METRICS_CREDENTIAL_PROVIDER` provides `requests.Session` to the HTTP OTLP Metric exporter. Entry point providers should implement the following: .. code-block:: python import requests # Add a reference to this function under the `opentelemetry_otlp_credential_provider` entry point. def request_session_provder() -> requests.Session: Note: This environment variable is experimental and subject to change. """ _OTEL_PYTHON_EXPORTER_OTLP_GRPC_METRICS_CREDENTIAL_PROVIDER = ( "OTEL_PYTHON_EXPORTER_OTLP_GRPC_METRICS_CREDENTIAL_PROVIDER" ) """ .. envvar:: OTEL_PYTHON_EXPORTER_OTLP_GRPC_METRICS_CREDENTIAL_PROVIDER The :envvar:`OTEL_PYTHON_EXPORTER_OTLP_GRPC_METRICS_CREDENTIAL_PROVIDER` provides `grpc.ChannelCredentials` to the GRPC OTLP Metric exporter. Entry point providers should implement the following: .. code-block:: python import grpc # Add a reference to this function under the `opentelemetry_otlp_credential_provider` entry point. def channel_credential_provider() -> grpc.ChannelCredentials: Note: This environment variable is experimental and subject to change. """ OTEL_EXPORTER_OTLP_TRACES_CERTIFICATE = "OTEL_EXPORTER_OTLP_TRACES_CERTIFICATE" """ .. envvar:: OTEL_EXPORTER_OTLP_TRACES_CERTIFICATE The :envvar:`OTEL_EXPORTER_OTLP_TRACES_CERTIFICATE` stores the path to the certificate file for TLS credentials of gRPC client for traces. Should only be used for a secure connection for tracing. """ OTEL_EXPORTER_OTLP_METRICS_CERTIFICATE = ( "OTEL_EXPORTER_OTLP_METRICS_CERTIFICATE" ) """ .. envvar:: OTEL_EXPORTER_OTLP_METRICS_CERTIFICATE The :envvar:`OTEL_EXPORTER_OTLP_METRICS_CERTIFICATE` stores the path to the certificate file for TLS credentials of gRPC client for metrics. Should only be used for a secure connection for exporting metrics. """ OTEL_EXPORTER_OTLP_CLIENT_KEY = "OTEL_EXPORTER_OTLP_CLIENT_KEY" """ .. envvar:: OTEL_EXPORTER_OTLP_CLIENT_KEY The :envvar:`OTEL_EXPORTER_OTLP_CLIENT_KEY` stores the path to the client private key to use in mTLS communication in PEM format. """ OTEL_EXPORTER_OTLP_TRACES_CLIENT_KEY = "OTEL_EXPORTER_OTLP_TRACES_CLIENT_KEY" """ .. envvar:: OTEL_EXPORTER_OTLP_TRACES_CLIENT_KEY The :envvar:`OTEL_EXPORTER_OTLP_TRACES_CLIENT_KEY` stores the path to the client private key to use in mTLS communication in PEM format for traces. """ OTEL_EXPORTER_OTLP_METRICS_CLIENT_KEY = "OTEL_EXPORTER_OTLP_METRICS_CLIENT_KEY" """ .. envvar:: OTEL_EXPORTER_OTLP_METRICS_CLIENT_KEY The :envvar:`OTEL_EXPORTER_OTLP_METRICS_CLIENT_KEY` stores the path to the client private key to use in mTLS communication in PEM format for metrics. """ OTEL_EXPORTER_OTLP_LOGS_CLIENT_KEY = "OTEL_EXPORTER_OTLP_LOGS_CLIENT_KEY" """ .. envvar:: OTEL_EXPORTER_OTLP_LOGS_CLIENT_KEY The :envvar:`OTEL_EXPORTER_OTLP_LOGS_CLIENT_KEY` stores the path to the client private key to use in mTLS communication in PEM format for logs. """ OTEL_EXPORTER_OTLP_CLIENT_CERTIFICATE = "OTEL_EXPORTER_OTLP_CLIENT_CERTIFICATE" """ .. envvar:: OTEL_EXPORTER_OTLP_CLIENT_CERTIFICATE The :envvar:`OTEL_EXPORTER_OTLP_CLIENT_CERTIFICATE` stores the path to the client certificate/chain trust for clients private key to use in mTLS communication in PEM format. """ OTEL_EXPORTER_OTLP_TRACES_CLIENT_CERTIFICATE = ( "OTEL_EXPORTER_OTLP_TRACES_CLIENT_CERTIFICATE" ) """ .. envvar:: OTEL_EXPORTER_OTLP_TRACES_CLIENT_CERTIFICATE The :envvar:`OTEL_EXPORTER_OTLP_TRACES_CLIENT_CERTIFICATE` stores the path to the client certificate/chain trust for clients private key to use in mTLS communication in PEM format for traces. """ OTEL_EXPORTER_OTLP_METRICS_CLIENT_CERTIFICATE = ( "OTEL_EXPORTER_OTLP_METRICS_CLIENT_CERTIFICATE" ) """ .. envvar:: OTEL_EXPORTER_OTLP_METRICS_CLIENT_CERTIFICATE The :envvar:`OTEL_EXPORTER_OTLP_METRICS_CLIENT_CERTIFICATE` stores the path to the client certificate/chain trust for clients private key to use in mTLS communication in PEM format for metrics. """ OTEL_EXPORTER_OTLP_LOGS_CLIENT_CERTIFICATE = ( "OTEL_EXPORTER_OTLP_LOGS_CLIENT_CERTIFICATE" ) """ .. envvar:: OTEL_EXPORTER_OTLP_LOGS_CLIENT_CERTIFICATE The :envvar:`OTEL_EXPORTER_OTLP_LOGS_CLIENT_CERTIFICATE` stores the path to the client certificate/chain trust for clients private key to use in mTLS communication in PEM format for logs. """ OTEL_EXPORTER_OTLP_TRACES_HEADERS = "OTEL_EXPORTER_OTLP_TRACES_HEADERS" """ .. envvar:: OTEL_EXPORTER_OTLP_TRACES_HEADERS The :envvar:`OTEL_EXPORTER_OTLP_TRACES_HEADERS` contains the key-value pairs to be used as headers for spans associated with gRPC or HTTP requests. """ OTEL_EXPORTER_OTLP_METRICS_HEADERS = "OTEL_EXPORTER_OTLP_METRICS_HEADERS" """ .. envvar:: OTEL_EXPORTER_OTLP_METRICS_HEADERS The :envvar:`OTEL_EXPORTER_OTLP_METRICS_HEADERS` contains the key-value pairs to be used as headers for metrics associated with gRPC or HTTP requests. """ OTEL_EXPORTER_OTLP_LOGS_HEADERS = "OTEL_EXPORTER_OTLP_LOGS_HEADERS" """ .. envvar:: OTEL_EXPORTER_OTLP_LOGS_HEADERS The :envvar:`OTEL_EXPORTER_OTLP_LOGS_HEADERS` contains the key-value pairs to be used as headers for logs associated with gRPC or HTTP requests. """ OTEL_EXPORTER_OTLP_TRACES_COMPRESSION = "OTEL_EXPORTER_OTLP_TRACES_COMPRESSION" """ .. envvar:: OTEL_EXPORTER_OTLP_TRACES_COMPRESSION Same as :envvar:`OTEL_EXPORTER_OTLP_COMPRESSION` but only for the span exporter. If both are present, this takes higher precedence. """ OTEL_EXPORTER_OTLP_METRICS_COMPRESSION = ( "OTEL_EXPORTER_OTLP_METRICS_COMPRESSION" ) """ .. envvar:: OTEL_EXPORTER_OTLP_METRICS_COMPRESSION Same as :envvar:`OTEL_EXPORTER_OTLP_COMPRESSION` but only for the metric exporter. If both are present, this takes higher precedence. """ OTEL_EXPORTER_OTLP_LOGS_COMPRESSION = "OTEL_EXPORTER_OTLP_LOGS_COMPRESSION" """ .. envvar:: OTEL_EXPORTER_OTLP_LOGS_COMPRESSION Same as :envvar:`OTEL_EXPORTER_OTLP_COMPRESSION` but only for the log exporter. If both are present, this takes higher precedence. """ OTEL_EXPORTER_OTLP_TRACES_TIMEOUT = "OTEL_EXPORTER_OTLP_TRACES_TIMEOUT" """ .. envvar:: OTEL_EXPORTER_OTLP_TRACES_TIMEOUT The :envvar:`OTEL_EXPORTER_OTLP_TRACES_TIMEOUT` is the maximum time the OTLP exporter will wait for each batch export for spans. """ OTEL_EXPORTER_OTLP_METRICS_TIMEOUT = "OTEL_EXPORTER_OTLP_METRICS_TIMEOUT" """ .. envvar:: OTEL_EXPORTER_OTLP_METRICS_TIMEOUT The :envvar:`OTEL_EXPORTER_OTLP_METRICS_TIMEOUT` is the maximum time the OTLP exporter will wait for each batch export for metrics. """ OTEL_EXPORTER_OTLP_METRICS_INSECURE = "OTEL_EXPORTER_OTLP_METRICS_INSECURE" """ .. envvar:: OTEL_EXPORTER_OTLP_METRICS_INSECURE The :envvar:`OTEL_EXPORTER_OTLP_METRICS_INSECURE` represents whether to enable client transport security for gRPC requests for metrics. A scheme of https takes precedence over the this configuration setting. Default: False """ OTEL_EXPORTER_OTLP_LOGS_INSECURE = "OTEL_EXPORTER_OTLP_LOGS_INSECURE" """ .. envvar:: OTEL_EXPORTER_OTLP_LOGS_INSECURE The :envvar:`OTEL_EXPORTER_OTLP_LOGS_INSECURE` represents whether to enable client transport security for gRPC requests for logs. A scheme of https takes precedence over the this configuration setting. Default: False """ OTEL_EXPORTER_OTLP_LOGS_CERTIFICATE = "OTEL_EXPORTER_OTLP_LOGS_CERTIFICATE" """ .. envvar:: OTEL_EXPORTER_OTLP_LOGS_CERTIFICATE The :envvar:`OTEL_EXPORTER_OTLP_LOGS_CERTIFICATE` stores the path to the certificate file for TLS credentials of gRPC client for logs. Should only be used for a secure connection for logs. """ OTEL_EXPORTER_OTLP_LOGS_TIMEOUT = "OTEL_EXPORTER_OTLP_LOGS_TIMEOUT" """ .. envvar:: OTEL_EXPORTER_OTLP_LOGS_TIMEOUT The :envvar:`OTEL_EXPORTER_OTLP_LOGS_TIMEOUT` is the maximum time the OTLP exporter will wait for each batch export for logs. """ OTEL_EXPORTER_JAEGER_CERTIFICATE = "OTEL_EXPORTER_JAEGER_CERTIFICATE" """ .. envvar:: OTEL_EXPORTER_JAEGER_CERTIFICATE The :envvar:`OTEL_EXPORTER_JAEGER_CERTIFICATE` stores the path to the certificate file for TLS credentials of gRPC client for Jaeger. Should only be used for a secure connection with Jaeger. """ OTEL_EXPORTER_JAEGER_AGENT_SPLIT_OVERSIZED_BATCHES = ( "OTEL_EXPORTER_JAEGER_AGENT_SPLIT_OVERSIZED_BATCHES" ) """ .. envvar:: OTEL_EXPORTER_JAEGER_AGENT_SPLIT_OVERSIZED_BATCHES The :envvar:`OTEL_EXPORTER_JAEGER_AGENT_SPLIT_OVERSIZED_BATCHES` is a boolean flag to determine whether to split a large span batch to admire the udp packet size limit. """ OTEL_SERVICE_NAME = "OTEL_SERVICE_NAME" """ .. envvar:: OTEL_SERVICE_NAME Convenience environment variable for setting the service name resource attribute. The following two environment variables have the same effect .. code-block:: console OTEL_SERVICE_NAME=my-python-service OTEL_RESOURCE_ATTRIBUTES=service.name=my-python-service If both are set, :envvar:`OTEL_SERVICE_NAME` takes precedence. """ _OTEL_PYTHON_LOGGING_AUTO_INSTRUMENTATION_ENABLED = ( "OTEL_PYTHON_LOGGING_AUTO_INSTRUMENTATION_ENABLED" ) """ .. envvar:: OTEL_PYTHON_LOGGING_AUTO_INSTRUMENTATION_ENABLED The :envvar:`OTEL_PYTHON_LOGGING_AUTO_INSTRUMENTATION_ENABLED` environment variable allows users to enable/disable the auto instrumentation for the python logging module. Default: False Note: Logs SDK and its related settings are experimental. """ OTEL_EXPORTER_OTLP_METRICS_TEMPORALITY_PREFERENCE = ( "OTEL_EXPORTER_OTLP_METRICS_TEMPORALITY_PREFERENCE" ) """ .. envvar:: OTEL_EXPORTER_OTLP_METRICS_TEMPORALITY_PREFERENCE The :envvar:`OTEL_EXPORTER_OTLP_METRICS_TEMPORALITY_PREFERENCE` environment variable allows users to set the default aggregation temporality policy to use on the basis of instrument kind. The valid (case-insensitive) values are: ``CUMULATIVE``: Use ``CUMULATIVE`` aggregation temporality for all instrument kinds. ``DELTA``: Use ``DELTA`` aggregation temporality for ``Counter``, ``Asynchronous Counter`` and ``Histogram``. Use ``CUMULATIVE`` aggregation temporality for ``UpDownCounter`` and ``Asynchronous UpDownCounter``. ``LOWMEMORY``: Use ``DELTA`` aggregation temporality for ``Counter`` and ``Histogram``. Use ``CUMULATIVE`` aggregation temporality for ``UpDownCounter``, ``AsynchronousCounter`` and ``Asynchronous UpDownCounter``. """ OTEL_EXPORTER_JAEGER_GRPC_INSECURE = "OTEL_EXPORTER_JAEGER_GRPC_INSECURE" """ .. envvar:: OTEL_EXPORTER_JAEGER_GRPC_INSECURE The :envvar:`OTEL_EXPORTER_JAEGER_GRPC_INSECURE` is a boolean flag to True if collector has no encryption or authentication. """ OTEL_METRIC_EXPORT_INTERVAL = "OTEL_METRIC_EXPORT_INTERVAL" """ .. envvar:: OTEL_METRIC_EXPORT_INTERVAL The :envvar:`OTEL_METRIC_EXPORT_INTERVAL` is the time interval (in milliseconds) between the start of two export attempts. """ OTEL_METRIC_EXPORT_TIMEOUT = "OTEL_METRIC_EXPORT_TIMEOUT" """ .. envvar:: OTEL_METRIC_EXPORT_TIMEOUT The :envvar:`OTEL_METRIC_EXPORT_TIMEOUT` is the maximum allowed time (in milliseconds) to export data. """ OTEL_METRICS_EXEMPLAR_FILTER = "OTEL_METRICS_EXEMPLAR_FILTER" """ .. envvar:: OTEL_METRICS_EXEMPLAR_FILTER The :envvar:`OTEL_METRICS_EXEMPLAR_FILTER` is the filter for which measurements can become Exemplars. """ OTEL_EXPORTER_OTLP_METRICS_DEFAULT_HISTOGRAM_AGGREGATION = ( "OTEL_EXPORTER_OTLP_METRICS_DEFAULT_HISTOGRAM_AGGREGATION" ) """ .. envvar:: OTEL_EXPORTER_OTLP_METRICS_DEFAULT_HISTOGRAM_AGGREGATION The :envvar:`OTEL_EXPORTER_OTLP_METRICS_DEFAULT_HISTOGRAM_AGGREGATION` is the default aggregation to use for histogram instruments. """ OTEL_EXPERIMENTAL_RESOURCE_DETECTORS = "OTEL_EXPERIMENTAL_RESOURCE_DETECTORS" """ .. envvar:: OTEL_EXPERIMENTAL_RESOURCE_DETECTORS The :envvar:`OTEL_EXPERIMENTAL_RESOURCE_DETECTORS` is a comma-separated string of names of resource detectors. These names must be the same as the names of entry points for the ```opentelemetry_resource_detector``` entry point. This is an experimental feature and the name of this variable and its behavior can change in a non-backwards compatible way. """ OTEL_EXPORTER_PROMETHEUS_HOST = "OTEL_EXPORTER_PROMETHEUS_HOST" """ .. envvar:: OTEL_EXPORTER_PROMETHEUS_HOST The :envvar:`OTEL_EXPORTER_PROMETHEUS_HOST` environment variable configures the host used by the Prometheus exporter. Default: "localhost" This is an experimental environment variable and the name of this variable and its behavior can change in a non-backwards compatible way. """ OTEL_EXPORTER_PROMETHEUS_PORT = "OTEL_EXPORTER_PROMETHEUS_PORT" """ .. envvar:: OTEL_EXPORTER_PROMETHEUS_PORT The :envvar:`OTEL_EXPORTER_PROMETHEUS_PORT` environment variable configures the port used by the Prometheus exporter. Default: 9464 This is an experimental environment variable and the name of this variable and its behavior can change in a non-backwards compatible way. """ python-opentelemetry-1.39.1/opentelemetry-sdk/src/opentelemetry/sdk/error_handler/000077500000000000000000000000001511654350100305665ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-sdk/src/opentelemetry/sdk/error_handler/__init__.py000066400000000000000000000110061511654350100326750ustar00rootroot00000000000000# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Global Error Handler This module provides a global error handler and an interface that allows error handlers to be registered with the global error handler via entry points. A default error handler is also provided. To use this feature, users can create an error handler that is registered using the ``opentelemetry_error_handler`` entry point. A class is to be registered in this entry point, this class must inherit from the ``opentelemetry.sdk.error_handler.ErrorHandler`` class and implement the corresponding ``handle`` method. This method will receive the exception object that is to be handled. The error handler class should also inherit from the exception classes it wants to handle. For example, this would be an error handler that handles ``ZeroDivisionError``: .. code:: python from opentelemetry.sdk.error_handler import ErrorHandler from logging import getLogger logger = getLogger(__name__) class ErrorHandler0(ErrorHandler, ZeroDivisionError): def _handle(self, error: Exception, *args, **kwargs): logger.exception("ErrorHandler0 handling a ZeroDivisionError") To use the global error handler, just instantiate it as a context manager where you want exceptions to be handled: .. code:: python from opentelemetry.sdk.error_handler import GlobalErrorHandler with GlobalErrorHandler(): 1 / 0 If the class of the exception raised in the scope of the ``GlobalErrorHandler`` object is not parent of any registered error handler, then the default error handler will handle the exception. This default error handler will only log the exception to standard logging, the exception won't be raised any further. """ from abc import ABC, abstractmethod from logging import getLogger from opentelemetry.util._importlib_metadata import entry_points logger = getLogger(__name__) class ErrorHandler(ABC): @abstractmethod def _handle(self, error: Exception, *args, **kwargs): """ Handle an exception """ class _DefaultErrorHandler(ErrorHandler): """ Default error handler This error handler just logs the exception using standard logging. """ # pylint: disable=useless-return def _handle(self, error: Exception, *args, **kwargs): logger.exception("Error handled by default error handler: ") return None class GlobalErrorHandler: """ Global error handler This is a singleton class that can be instantiated anywhere to get the global error handler. This object provides a handle method that receives an exception object that will be handled by the registered error handlers. """ _instance = None def __new__(cls) -> "GlobalErrorHandler": if cls._instance is None: cls._instance = super().__new__(cls) return cls._instance def __enter__(self): pass # pylint: disable=no-self-use def __exit__(self, exc_type, exc_value, traceback): if exc_value is None: return None plugin_handled = False error_handler_entry_points = entry_points( group="opentelemetry_error_handler" ) for error_handler_entry_point in error_handler_entry_points: error_handler_class = error_handler_entry_point.load() if issubclass(error_handler_class, exc_value.__class__): try: error_handler_class()._handle(exc_value) plugin_handled = True # pylint: disable=broad-exception-caught except Exception as error_handling_error: logger.exception( "%s error while handling error %s by error handler %s", error_handling_error.__class__.__name__, exc_value.__class__.__name__, error_handler_class.__name__, ) if not plugin_handled: _DefaultErrorHandler()._handle(exc_value) return True python-opentelemetry-1.39.1/opentelemetry-sdk/src/opentelemetry/sdk/metrics/000077500000000000000000000000001511654350100274065ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-sdk/src/opentelemetry/sdk/metrics/__init__.py000066400000000000000000000033211511654350100315160ustar00rootroot00000000000000# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from opentelemetry.sdk.metrics._internal import Meter, MeterProvider from opentelemetry.sdk.metrics._internal.exceptions import MetricsTimeoutError from opentelemetry.sdk.metrics._internal.exemplar import ( AlignedHistogramBucketExemplarReservoir, AlwaysOffExemplarFilter, AlwaysOnExemplarFilter, Exemplar, ExemplarFilter, ExemplarReservoir, SimpleFixedSizeExemplarReservoir, TraceBasedExemplarFilter, ) from opentelemetry.sdk.metrics._internal.instrument import ( Counter, Histogram, ObservableCounter, ObservableGauge, ObservableUpDownCounter, UpDownCounter, ) from opentelemetry.sdk.metrics._internal.instrument import Gauge as _Gauge __all__ = [ "AlignedHistogramBucketExemplarReservoir", "AlwaysOnExemplarFilter", "AlwaysOffExemplarFilter", "Exemplar", "ExemplarFilter", "ExemplarReservoir", "Meter", "MeterProvider", "MetricsTimeoutError", "Counter", "Histogram", "_Gauge", "ObservableCounter", "ObservableGauge", "ObservableUpDownCounter", "SimpleFixedSizeExemplarReservoir", "UpDownCounter", "TraceBasedExemplarFilter", ] python-opentelemetry-1.39.1/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/000077500000000000000000000000001511654350100313615ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/__init__.py000066400000000000000000000504651511654350100335040ustar00rootroot00000000000000# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import weakref from atexit import register, unregister from logging import getLogger from os import environ from threading import Lock from time import time_ns from typing import Optional, Sequence # This kind of import is needed to avoid Sphinx errors. import opentelemetry.sdk.metrics from opentelemetry.metrics import Counter as APICounter from opentelemetry.metrics import Histogram as APIHistogram from opentelemetry.metrics import Meter as APIMeter from opentelemetry.metrics import MeterProvider as APIMeterProvider from opentelemetry.metrics import NoOpMeter from opentelemetry.metrics import ObservableCounter as APIObservableCounter from opentelemetry.metrics import ObservableGauge as APIObservableGauge from opentelemetry.metrics import ( ObservableUpDownCounter as APIObservableUpDownCounter, ) from opentelemetry.metrics import UpDownCounter as APIUpDownCounter from opentelemetry.metrics import _Gauge as APIGauge from opentelemetry.sdk.environment_variables import ( OTEL_METRICS_EXEMPLAR_FILTER, OTEL_SDK_DISABLED, ) from opentelemetry.sdk.metrics._internal.exceptions import MetricsTimeoutError from opentelemetry.sdk.metrics._internal.exemplar import ( AlwaysOffExemplarFilter, AlwaysOnExemplarFilter, ExemplarFilter, TraceBasedExemplarFilter, ) from opentelemetry.sdk.metrics._internal.instrument import ( _Counter, _Gauge, _Histogram, _ObservableCounter, _ObservableGauge, _ObservableUpDownCounter, _UpDownCounter, ) from opentelemetry.sdk.metrics._internal.measurement_consumer import ( MeasurementConsumer, SynchronousMeasurementConsumer, ) from opentelemetry.sdk.metrics._internal.sdk_configuration import ( SdkConfiguration, ) from opentelemetry.sdk.resources import Resource from opentelemetry.sdk.util.instrumentation import InstrumentationScope from opentelemetry.util._once import Once from opentelemetry.util.types import ( Attributes, ) _logger = getLogger(__name__) class Meter(APIMeter): """See `opentelemetry.metrics.Meter`.""" def __init__( self, instrumentation_scope: InstrumentationScope, measurement_consumer: MeasurementConsumer, ): super().__init__( name=instrumentation_scope.name, version=instrumentation_scope.version, schema_url=instrumentation_scope.schema_url, ) self._instrumentation_scope = instrumentation_scope self._measurement_consumer = measurement_consumer self._instrument_id_instrument = {} self._instrument_id_instrument_lock = Lock() def create_counter(self, name, unit="", description="") -> APICounter: status = self._register_instrument(name, _Counter, unit, description) if status.conflict: # FIXME #2558 go through all views here and check if this # instrument registration conflict can be fixed. If it can be, do # not log the following warning. self._log_instrument_registration_conflict( name, APICounter.__name__, unit, description, status, ) if status.already_registered: with self._instrument_id_instrument_lock: return self._instrument_id_instrument[status.instrument_id] instrument = _Counter( name, self._instrumentation_scope, self._measurement_consumer, unit, description, ) with self._instrument_id_instrument_lock: self._instrument_id_instrument[status.instrument_id] = instrument return instrument def create_up_down_counter( self, name, unit="", description="" ) -> APIUpDownCounter: status = self._register_instrument( name, _UpDownCounter, unit, description ) if status.conflict: # FIXME #2558 go through all views here and check if this # instrument registration conflict can be fixed. If it can be, do # not log the following warning. self._log_instrument_registration_conflict( name, APIUpDownCounter.__name__, unit, description, status, ) if status.already_registered: with self._instrument_id_instrument_lock: return self._instrument_id_instrument[status.instrument_id] instrument = _UpDownCounter( name, self._instrumentation_scope, self._measurement_consumer, unit, description, ) with self._instrument_id_instrument_lock: self._instrument_id_instrument[status.instrument_id] = instrument return instrument def create_observable_counter( self, name, callbacks=None, unit="", description="", ) -> APIObservableCounter: status = self._register_instrument( name, _ObservableCounter, unit, description ) if status.conflict: # FIXME #2558 go through all views here and check if this # instrument registration conflict can be fixed. If it can be, do # not log the following warning. self._log_instrument_registration_conflict( name, APIObservableCounter.__name__, unit, description, status, ) if status.already_registered: with self._instrument_id_instrument_lock: return self._instrument_id_instrument[status.instrument_id] instrument = _ObservableCounter( name, self._instrumentation_scope, self._measurement_consumer, callbacks, unit, description, ) self._measurement_consumer.register_asynchronous_instrument(instrument) with self._instrument_id_instrument_lock: self._instrument_id_instrument[status.instrument_id] = instrument return instrument def create_histogram( self, name: str, unit: str = "", description: str = "", *, explicit_bucket_boundaries_advisory: Optional[Sequence[float]] = None, ) -> APIHistogram: if explicit_bucket_boundaries_advisory is not None: invalid_advisory = False if isinstance(explicit_bucket_boundaries_advisory, Sequence): try: invalid_advisory = not ( all( isinstance(e, (float, int)) for e in explicit_bucket_boundaries_advisory ) ) except (KeyError, TypeError): invalid_advisory = True else: invalid_advisory = True if invalid_advisory: explicit_bucket_boundaries_advisory = None _logger.warning( "explicit_bucket_boundaries_advisory must be a sequence of numbers" ) status = self._register_instrument( name, _Histogram, unit, description, explicit_bucket_boundaries_advisory, ) if status.conflict: # FIXME #2558 go through all views here and check if this # instrument registration conflict can be fixed. If it can be, do # not log the following warning. self._log_instrument_registration_conflict( name, APIHistogram.__name__, unit, description, status, ) if status.already_registered: with self._instrument_id_instrument_lock: return self._instrument_id_instrument[status.instrument_id] instrument = _Histogram( name, self._instrumentation_scope, self._measurement_consumer, unit, description, explicit_bucket_boundaries_advisory, ) with self._instrument_id_instrument_lock: self._instrument_id_instrument[status.instrument_id] = instrument return instrument def create_gauge(self, name, unit="", description="") -> APIGauge: status = self._register_instrument(name, _Gauge, unit, description) if status.conflict: # FIXME #2558 go through all views here and check if this # instrument registration conflict can be fixed. If it can be, do # not log the following warning. self._log_instrument_registration_conflict( name, APIGauge.__name__, unit, description, status, ) if status.already_registered: with self._instrument_id_instrument_lock: return self._instrument_id_instrument[status.instrument_id] instrument = _Gauge( name, self._instrumentation_scope, self._measurement_consumer, unit, description, ) with self._instrument_id_instrument_lock: self._instrument_id_instrument[status.instrument_id] = instrument return instrument def create_observable_gauge( self, name, callbacks=None, unit="", description="" ) -> APIObservableGauge: status = self._register_instrument( name, _ObservableGauge, unit, description ) if status.conflict: # FIXME #2558 go through all views here and check if this # instrument registration conflict can be fixed. If it can be, do # not log the following warning. self._log_instrument_registration_conflict( name, APIObservableGauge.__name__, unit, description, status, ) if status.already_registered: with self._instrument_id_instrument_lock: return self._instrument_id_instrument[status.instrument_id] instrument = _ObservableGauge( name, self._instrumentation_scope, self._measurement_consumer, callbacks, unit, description, ) self._measurement_consumer.register_asynchronous_instrument(instrument) with self._instrument_id_instrument_lock: self._instrument_id_instrument[status.instrument_id] = instrument return instrument def create_observable_up_down_counter( self, name, callbacks=None, unit="", description="" ) -> APIObservableUpDownCounter: status = self._register_instrument( name, _ObservableUpDownCounter, unit, description ) if status.conflict: # FIXME #2558 go through all views here and check if this # instrument registration conflict can be fixed. If it can be, do # not log the following warning. self._log_instrument_registration_conflict( name, APIObservableUpDownCounter.__name__, unit, description, status, ) if status.already_registered: with self._instrument_id_instrument_lock: return self._instrument_id_instrument[status.instrument_id] instrument = _ObservableUpDownCounter( name, self._instrumentation_scope, self._measurement_consumer, callbacks, unit, description, ) self._measurement_consumer.register_asynchronous_instrument(instrument) with self._instrument_id_instrument_lock: self._instrument_id_instrument[status.instrument_id] = instrument return instrument def _get_exemplar_filter(exemplar_filter: str) -> ExemplarFilter: if exemplar_filter == "trace_based": return TraceBasedExemplarFilter() if exemplar_filter == "always_on": return AlwaysOnExemplarFilter() if exemplar_filter == "always_off": return AlwaysOffExemplarFilter() msg = f"Unknown exemplar filter '{exemplar_filter}'." raise ValueError(msg) class MeterProvider(APIMeterProvider): r"""See `opentelemetry.metrics.MeterProvider`. Args: metric_readers: Register metric readers to collect metrics from the SDK on demand. Each :class:`opentelemetry.sdk.metrics.export.MetricReader` is completely independent and will collect separate streams of metrics. TODO: reference ``PeriodicExportingMetricReader`` usage with push exporters here. resource: The resource representing what the metrics emitted from the SDK pertain to. shutdown_on_exit: If true, registers an `atexit` handler to call `MeterProvider.shutdown` views: The views to configure the metric output the SDK By default, instruments which do not match any :class:`opentelemetry.sdk.metrics.view.View` (or if no :class:`opentelemetry.sdk.metrics.view.View`\ s are provided) will report metrics with the default aggregation for the instrument's kind. To disable instruments by default, configure a match-all :class:`opentelemetry.sdk.metrics.view.View` with `DropAggregation` and then create :class:`opentelemetry.sdk.metrics.view.View`\ s to re-enable individual instruments: .. code-block:: python :caption: Disable default views MeterProvider( views=[ View(instrument_name="*", aggregation=DropAggregation()), View(instrument_name="mycounter"), ], # ... ) """ _all_metric_readers_lock = Lock() _all_metric_readers = weakref.WeakSet() def __init__( self, metric_readers: Sequence[ "opentelemetry.sdk.metrics.export.MetricReader" ] = (), resource: Optional[Resource] = None, exemplar_filter: Optional[ExemplarFilter] = None, shutdown_on_exit: bool = True, views: Sequence["opentelemetry.sdk.metrics.view.View"] = (), ): self._lock = Lock() self._meter_lock = Lock() self._atexit_handler = None if resource is None: resource = Resource.create({}) self._sdk_config = SdkConfiguration( exemplar_filter=( exemplar_filter or _get_exemplar_filter( environ.get(OTEL_METRICS_EXEMPLAR_FILTER, "trace_based") ) ), resource=resource, metric_readers=metric_readers, views=views, ) self._measurement_consumer = SynchronousMeasurementConsumer( sdk_config=self._sdk_config ) disabled = environ.get(OTEL_SDK_DISABLED, "") self._disabled = disabled.lower().strip() == "true" if shutdown_on_exit: self._atexit_handler = register(self.shutdown) self._meters = {} self._shutdown_once = Once() self._shutdown = False for metric_reader in self._sdk_config.metric_readers: with self._all_metric_readers_lock: if metric_reader in self._all_metric_readers: # pylint: disable=broad-exception-raised raise Exception( f"MetricReader {metric_reader} has been registered " "already in other MeterProvider instance" ) self._all_metric_readers.add(metric_reader) metric_reader._set_collect_callback( self._measurement_consumer.collect ) def force_flush(self, timeout_millis: float = 10_000) -> bool: deadline_ns = time_ns() + timeout_millis * 10**6 metric_reader_error = {} for metric_reader in self._sdk_config.metric_readers: current_ts = time_ns() try: if current_ts >= deadline_ns: raise MetricsTimeoutError( "Timed out while flushing metric readers" ) metric_reader.force_flush( timeout_millis=(deadline_ns - current_ts) / 10**6 ) # pylint: disable=broad-exception-caught except Exception as error: metric_reader_error[metric_reader] = error if metric_reader_error: metric_reader_error_string = "\n".join( [ f"{metric_reader.__class__.__name__}: {repr(error)}" for metric_reader, error in metric_reader_error.items() ] ) # pylint: disable=broad-exception-raised raise Exception( "MeterProvider.force_flush failed because the following " "metric readers failed during collect:\n" f"{metric_reader_error_string}" ) return True def shutdown(self, timeout_millis: float = 30_000): deadline_ns = time_ns() + timeout_millis * 10**6 def _shutdown(): self._shutdown = True did_shutdown = self._shutdown_once.do_once(_shutdown) if not did_shutdown: _logger.warning("shutdown can only be called once") return metric_reader_error = {} for metric_reader in self._sdk_config.metric_readers: current_ts = time_ns() try: if current_ts >= deadline_ns: # pylint: disable=broad-exception-raised raise Exception( "Didn't get to execute, deadline already exceeded" ) metric_reader.shutdown( timeout_millis=(deadline_ns - current_ts) / 10**6 ) # pylint: disable=broad-exception-caught except Exception as error: metric_reader_error[metric_reader] = error if self._atexit_handler is not None: unregister(self._atexit_handler) self._atexit_handler = None if metric_reader_error: metric_reader_error_string = "\n".join( [ f"{metric_reader.__class__.__name__}: {repr(error)}" for metric_reader, error in metric_reader_error.items() ] ) # pylint: disable=broad-exception-raised raise Exception( ( "MeterProvider.shutdown failed because the following " "metric readers failed during shutdown:\n" f"{metric_reader_error_string}" ) ) def get_meter( self, name: str, version: Optional[str] = None, schema_url: Optional[str] = None, attributes: Optional[Attributes] = None, ) -> Meter: if self._disabled: return NoOpMeter(name, version=version, schema_url=schema_url) if self._shutdown: _logger.warning( "A shutdown `MeterProvider` can not provide a `Meter`" ) return NoOpMeter(name, version=version, schema_url=schema_url) if not name: _logger.warning("Meter name cannot be None or empty.") return NoOpMeter(name, version=version, schema_url=schema_url) info = InstrumentationScope(name, version, schema_url, attributes) with self._meter_lock: if not self._meters.get(info): # FIXME #2558 pass SDKConfig object to meter so that the meter # has access to views. self._meters[info] = Meter( info, self._measurement_consumer, ) return self._meters[info] _view_instrument_match.py000066400000000000000000000134551511654350100364410ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from logging import getLogger from threading import Lock from time import time_ns from typing import Dict, List, Optional, Sequence from opentelemetry.metrics import Instrument from opentelemetry.sdk.metrics._internal.aggregation import ( Aggregation, DefaultAggregation, _Aggregation, _SumAggregation, ) from opentelemetry.sdk.metrics._internal.export import AggregationTemporality from opentelemetry.sdk.metrics._internal.measurement import Measurement from opentelemetry.sdk.metrics._internal.point import DataPointT from opentelemetry.sdk.metrics._internal.view import View _logger = getLogger(__name__) class _ViewInstrumentMatch: def __init__( self, view: View, instrument: Instrument, instrument_class_aggregation: Dict[type, Aggregation], ): self._view = view self._instrument = instrument self._attributes_aggregation: Dict[frozenset, _Aggregation] = {} self._lock = Lock() self._instrument_class_aggregation = instrument_class_aggregation self._name = self._view._name or self._instrument.name self._description = ( self._view._description or self._instrument.description ) if not isinstance(self._view._aggregation, DefaultAggregation): self._aggregation = self._view._aggregation._create_aggregation( self._instrument, None, self._view._exemplar_reservoir_factory, 0, ) else: self._aggregation = self._instrument_class_aggregation[ self._instrument.__class__ ]._create_aggregation( self._instrument, None, self._view._exemplar_reservoir_factory, 0, ) def conflicts(self, other: "_ViewInstrumentMatch") -> bool: # pylint: disable=protected-access result = ( self._name == other._name and self._instrument.unit == other._instrument.unit # The aggregation class is being used here instead of data point # type since they are functionally equivalent. and self._aggregation.__class__ == other._aggregation.__class__ ) if isinstance(self._aggregation, _SumAggregation): result = ( result and self._aggregation._instrument_is_monotonic == other._aggregation._instrument_is_monotonic and self._aggregation._instrument_aggregation_temporality == other._aggregation._instrument_aggregation_temporality ) return result # pylint: disable=protected-access def consume_measurement( self, measurement: Measurement, should_sample_exemplar: bool = True ) -> None: if self._view._attribute_keys is not None: attributes = {} for key, value in (measurement.attributes or {}).items(): if key in self._view._attribute_keys: attributes[key] = value elif measurement.attributes is not None: attributes = measurement.attributes else: attributes = {} aggr_key = frozenset(attributes.items()) if aggr_key not in self._attributes_aggregation: with self._lock: if aggr_key not in self._attributes_aggregation: if not isinstance( self._view._aggregation, DefaultAggregation ): aggregation = ( self._view._aggregation._create_aggregation( self._instrument, attributes, self._view._exemplar_reservoir_factory, time_ns(), ) ) else: aggregation = self._instrument_class_aggregation[ self._instrument.__class__ ]._create_aggregation( self._instrument, attributes, self._view._exemplar_reservoir_factory, time_ns(), ) self._attributes_aggregation[aggr_key] = aggregation self._attributes_aggregation[aggr_key].aggregate( measurement, should_sample_exemplar ) def collect( self, collection_aggregation_temporality: AggregationTemporality, collection_start_nanos: int, ) -> Optional[Sequence[DataPointT]]: data_points: List[DataPointT] = [] with self._lock: for aggregation in self._attributes_aggregation.values(): data_point = aggregation.collect( collection_aggregation_temporality, collection_start_nanos ) if data_point is not None: data_points.append(data_point) # Returning here None instead of an empty list because the caller # does not consume a sequence and to be consistent with the rest of # collect methods that also return None. return data_points or None python-opentelemetry-1.39.1/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/aggregation.py000066400000000000000000001444311511654350100342310ustar00rootroot00000000000000# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # pylint: disable=too-many-lines from abc import ABC, abstractmethod from bisect import bisect_left from enum import IntEnum from functools import partial from logging import getLogger from math import inf from threading import Lock from typing import ( Callable, Generic, List, Optional, Sequence, Type, TypeVar, ) from opentelemetry.metrics import ( Asynchronous, Counter, Histogram, Instrument, ObservableCounter, ObservableGauge, ObservableUpDownCounter, Synchronous, UpDownCounter, _Gauge, ) from opentelemetry.sdk.metrics._internal.exemplar import ( Exemplar, ExemplarReservoirBuilder, ) from opentelemetry.sdk.metrics._internal.exponential_histogram.buckets import ( Buckets, ) from opentelemetry.sdk.metrics._internal.exponential_histogram.mapping import ( Mapping, ) from opentelemetry.sdk.metrics._internal.exponential_histogram.mapping.exponent_mapping import ( ExponentMapping, ) from opentelemetry.sdk.metrics._internal.exponential_histogram.mapping.logarithm_mapping import ( LogarithmMapping, ) from opentelemetry.sdk.metrics._internal.measurement import Measurement from opentelemetry.sdk.metrics._internal.point import Buckets as BucketsPoint from opentelemetry.sdk.metrics._internal.point import ( ExponentialHistogramDataPoint, HistogramDataPoint, NumberDataPoint, Sum, ) from opentelemetry.sdk.metrics._internal.point import Gauge as GaugePoint from opentelemetry.sdk.metrics._internal.point import ( Histogram as HistogramPoint, ) from opentelemetry.util.types import Attributes _DataPointVarT = TypeVar("_DataPointVarT", NumberDataPoint, HistogramDataPoint) _logger = getLogger(__name__) class AggregationTemporality(IntEnum): """ The temporality to use when aggregating data. Can be one of the following values: """ UNSPECIFIED = 0 DELTA = 1 CUMULATIVE = 2 class _Aggregation(ABC, Generic[_DataPointVarT]): def __init__( self, attributes: Attributes, reservoir_builder: ExemplarReservoirBuilder, ): self._lock = Lock() self._attributes = attributes self._reservoir = reservoir_builder() self._previous_point = None @abstractmethod def aggregate( self, measurement: Measurement, should_sample_exemplar: bool = True ) -> None: """Aggregate a measurement. Args: measurement: Measurement to aggregate should_sample_exemplar: Whether the measurement should be sampled by the exemplars reservoir or not. """ @abstractmethod def collect( self, collection_aggregation_temporality: AggregationTemporality, collection_start_nano: int, ) -> Optional[_DataPointVarT]: pass def _collect_exemplars(self) -> Sequence[Exemplar]: """Returns the collected exemplars. Returns: The exemplars collected by the reservoir """ return self._reservoir.collect(self._attributes) def _sample_exemplar( self, measurement: Measurement, should_sample_exemplar: bool ) -> None: """Offer the measurement to the exemplar reservoir for sampling. It should be called within the each :ref:`aggregate` call. Args: measurement: The new measurement should_sample_exemplar: Whether the measurement should be sampled by the exemplars reservoir or not. """ if should_sample_exemplar: self._reservoir.offer( measurement.value, measurement.time_unix_nano, measurement.attributes, measurement.context, ) class _DropAggregation(_Aggregation): def aggregate( self, measurement: Measurement, should_sample_exemplar: bool = True ) -> None: pass def collect( self, collection_aggregation_temporality: AggregationTemporality, collection_start_nano: int, ) -> Optional[_DataPointVarT]: pass class _SumAggregation(_Aggregation[Sum]): def __init__( self, attributes: Attributes, instrument_is_monotonic: bool, instrument_aggregation_temporality: AggregationTemporality, start_time_unix_nano: int, reservoir_builder: ExemplarReservoirBuilder, ): super().__init__(attributes, reservoir_builder) self._start_time_unix_nano = start_time_unix_nano self._instrument_aggregation_temporality = ( instrument_aggregation_temporality ) self._instrument_is_monotonic = instrument_is_monotonic self._value = None self._previous_collection_start_nano = self._start_time_unix_nano self._previous_value = 0 def aggregate( self, measurement: Measurement, should_sample_exemplar: bool = True ) -> None: with self._lock: if self._value is None: self._value = 0 self._value = self._value + measurement.value self._sample_exemplar(measurement, should_sample_exemplar) def collect( self, collection_aggregation_temporality: AggregationTemporality, collection_start_nano: int, ) -> Optional[NumberDataPoint]: """ Atomically return a point for the current value of the metric and reset the aggregation value. Synchronous instruments have a method which is called directly with increments for a given quantity: For example, an instrument that counts the amount of passengers in every vehicle that crosses a certain point in a highway: synchronous_instrument.add(2) collect(...) # 2 passengers are counted synchronous_instrument.add(3) collect(...) # 3 passengers are counted synchronous_instrument.add(1) collect(...) # 1 passenger is counted In this case the instrument aggregation temporality is DELTA because every value represents an increment to the count, Asynchronous instruments have a callback which returns the total value of a given quantity: For example, an instrument that measures the amount of bytes written to a certain hard drive: callback() -> 1352 collect(...) # 1352 bytes have been written so far callback() -> 2324 collect(...) # 2324 bytes have been written so far callback() -> 4542 collect(...) # 4542 bytes have been written so far In this case the instrument aggregation temporality is CUMULATIVE because every value represents the total of the measurement. There is also the collection aggregation temporality, which is passed to this method. The collection aggregation temporality defines the nature of the returned value by this aggregation. When the collection aggregation temporality matches the instrument aggregation temporality, then this method returns the current value directly: synchronous_instrument.add(2) collect(DELTA) -> 2 synchronous_instrument.add(3) collect(DELTA) -> 3 synchronous_instrument.add(1) collect(DELTA) -> 1 callback() -> 1352 collect(CUMULATIVE) -> 1352 callback() -> 2324 collect(CUMULATIVE) -> 2324 callback() -> 4542 collect(CUMULATIVE) -> 4542 When the collection aggregation temporality does not match the instrument aggregation temporality, then a conversion is made. For this purpose, this aggregation keeps a private attribute, self._previous_value. When the instrument is synchronous: self._previous_value is the sum of every previously collected (delta) value. In this case, the returned (cumulative) value will be: self._previous_value + value synchronous_instrument.add(2) collect(CUMULATIVE) -> 2 synchronous_instrument.add(3) collect(CUMULATIVE) -> 5 synchronous_instrument.add(1) collect(CUMULATIVE) -> 6 Also, as a diagram: time -> self._previous_value |-------------| value (delta) |----| returned value (cumulative) |------------------| When the instrument is asynchronous: self._previous_value is the value of the previously collected (cumulative) value. In this case, the returned (delta) value will be: value - self._previous_value callback() -> 1352 collect(DELTA) -> 1352 callback() -> 2324 collect(DELTA) -> 972 callback() -> 4542 collect(DELTA) -> 2218 Also, as a diagram: time -> self._previous_value |-------------| value (cumulative) |------------------| returned value (delta) |----| """ with self._lock: value = self._value self._value = None if ( self._instrument_aggregation_temporality is AggregationTemporality.DELTA ): # This happens when the corresponding instrument for this # aggregation is synchronous. if ( collection_aggregation_temporality is AggregationTemporality.DELTA ): previous_collection_start_nano = ( self._previous_collection_start_nano ) self._previous_collection_start_nano = ( collection_start_nano ) if value is None: return None return NumberDataPoint( attributes=self._attributes, exemplars=self._collect_exemplars(), start_time_unix_nano=previous_collection_start_nano, time_unix_nano=collection_start_nano, value=value, ) if value is None: value = 0 self._previous_value = value + self._previous_value return NumberDataPoint( attributes=self._attributes, exemplars=self._collect_exemplars(), start_time_unix_nano=self._start_time_unix_nano, time_unix_nano=collection_start_nano, value=self._previous_value, ) # This happens when the corresponding instrument for this # aggregation is asynchronous. if value is None: # This happens when the corresponding instrument callback # does not produce measurements. return None if ( collection_aggregation_temporality is AggregationTemporality.DELTA ): result_value = value - self._previous_value self._previous_value = value previous_collection_start_nano = ( self._previous_collection_start_nano ) self._previous_collection_start_nano = collection_start_nano return NumberDataPoint( attributes=self._attributes, exemplars=self._collect_exemplars(), start_time_unix_nano=previous_collection_start_nano, time_unix_nano=collection_start_nano, value=result_value, ) return NumberDataPoint( attributes=self._attributes, exemplars=self._collect_exemplars(), start_time_unix_nano=self._start_time_unix_nano, time_unix_nano=collection_start_nano, value=value, ) class _LastValueAggregation(_Aggregation[GaugePoint]): def __init__( self, attributes: Attributes, reservoir_builder: ExemplarReservoirBuilder, ): super().__init__(attributes, reservoir_builder) self._value = None def aggregate( self, measurement: Measurement, should_sample_exemplar: bool = True ): with self._lock: self._value = measurement.value self._sample_exemplar(measurement, should_sample_exemplar) def collect( self, collection_aggregation_temporality: AggregationTemporality, collection_start_nano: int, ) -> Optional[_DataPointVarT]: """ Atomically return a point for the current value of the metric. """ with self._lock: if self._value is None: return None value = self._value self._value = None exemplars = self._collect_exemplars() return NumberDataPoint( attributes=self._attributes, exemplars=exemplars, start_time_unix_nano=None, time_unix_nano=collection_start_nano, value=value, ) _DEFAULT_EXPLICIT_BUCKET_HISTOGRAM_AGGREGATION_BOUNDARIES: Sequence[float] = ( 0.0, 5.0, 10.0, 25.0, 50.0, 75.0, 100.0, 250.0, 500.0, 750.0, 1000.0, 2500.0, 5000.0, 7500.0, 10000.0, ) class _ExplicitBucketHistogramAggregation(_Aggregation[HistogramPoint]): def __init__( self, attributes: Attributes, instrument_aggregation_temporality: AggregationTemporality, start_time_unix_nano: int, reservoir_builder: ExemplarReservoirBuilder, boundaries: Optional[Sequence[float]] = None, record_min_max: bool = True, ): if boundaries is None: boundaries = ( _DEFAULT_EXPLICIT_BUCKET_HISTOGRAM_AGGREGATION_BOUNDARIES ) super().__init__( attributes, reservoir_builder=partial( reservoir_builder, boundaries=boundaries ), ) self._instrument_aggregation_temporality = ( instrument_aggregation_temporality ) self._start_time_unix_nano = start_time_unix_nano self._boundaries = tuple(boundaries) self._record_min_max = record_min_max self._value = None self._min = inf self._max = -inf self._sum = 0 self._previous_value = None self._previous_min = inf self._previous_max = -inf self._previous_sum = 0 self._previous_collection_start_nano = self._start_time_unix_nano def _get_empty_bucket_counts(self) -> List[int]: return [0] * (len(self._boundaries) + 1) def aggregate( self, measurement: Measurement, should_sample_exemplar: bool = True ) -> None: with self._lock: if self._value is None: self._value = self._get_empty_bucket_counts() measurement_value = measurement.value self._sum += measurement_value if self._record_min_max: self._min = min(self._min, measurement_value) self._max = max(self._max, measurement_value) self._value[bisect_left(self._boundaries, measurement_value)] += 1 self._sample_exemplar(measurement, should_sample_exemplar) def collect( self, collection_aggregation_temporality: AggregationTemporality, collection_start_nano: int, ) -> Optional[_DataPointVarT]: """ Atomically return a point for the current value of the metric. """ with self._lock: value = self._value sum_ = self._sum min_ = self._min max_ = self._max self._value = None self._sum = 0 self._min = inf self._max = -inf if ( self._instrument_aggregation_temporality is AggregationTemporality.DELTA ): # This happens when the corresponding instrument for this # aggregation is synchronous. if ( collection_aggregation_temporality is AggregationTemporality.DELTA ): previous_collection_start_nano = ( self._previous_collection_start_nano ) self._previous_collection_start_nano = ( collection_start_nano ) if value is None: return None return HistogramDataPoint( attributes=self._attributes, exemplars=self._collect_exemplars(), start_time_unix_nano=previous_collection_start_nano, time_unix_nano=collection_start_nano, count=sum(value), sum=sum_, bucket_counts=tuple(value), explicit_bounds=self._boundaries, min=min_, max=max_, ) if value is None: value = self._get_empty_bucket_counts() if self._previous_value is None: self._previous_value = self._get_empty_bucket_counts() self._previous_value = [ value_element + previous_value_element for ( value_element, previous_value_element, ) in zip(value, self._previous_value) ] self._previous_min = min(min_, self._previous_min) self._previous_max = max(max_, self._previous_max) self._previous_sum = sum_ + self._previous_sum return HistogramDataPoint( attributes=self._attributes, exemplars=self._collect_exemplars(), start_time_unix_nano=self._start_time_unix_nano, time_unix_nano=collection_start_nano, count=sum(self._previous_value), sum=self._previous_sum, bucket_counts=tuple(self._previous_value), explicit_bounds=self._boundaries, min=self._previous_min, max=self._previous_max, ) return None # pylint: disable=protected-access class _ExponentialBucketHistogramAggregation(_Aggregation[HistogramPoint]): # _min_max_size and _max_max_size are the smallest and largest values # the max_size parameter may have, respectively. # _min_max_size is is the smallest reasonable value which is small enough # to contain the entire normal floating point range at the minimum scale. _min_max_size = 2 # _max_max_size is an arbitrary limit meant to limit accidental creation of # giant exponential bucket histograms. _max_max_size = 16384 def __init__( self, attributes: Attributes, reservoir_builder: ExemplarReservoirBuilder, instrument_aggregation_temporality: AggregationTemporality, start_time_unix_nano: int, # This is the default maximum number of buckets per positive or # negative number range. The value 160 is specified by OpenTelemetry. # See the derivation here: # https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/metrics/sdk.md#exponential-bucket-histogram-aggregation) max_size: int = 160, max_scale: int = 20, ): # max_size is the maximum capacity of the positive and negative # buckets. # _sum is the sum of all the values aggregated by this aggregator. # _count is the count of all calls to aggregate. # _zero_count is the count of all the calls to aggregate when the value # to be aggregated is exactly 0. # _min is the smallest value aggregated by this aggregator. # _max is the smallest value aggregated by this aggregator. # _positive holds the positive values. # _negative holds the negative values by their absolute value. if max_size < self._min_max_size: raise ValueError( f"Buckets max size {max_size} is smaller than " "minimum max size {self._min_max_size}" ) if max_size > self._max_max_size: raise ValueError( f"Buckets max size {max_size} is larger than " "maximum max size {self._max_max_size}" ) if max_scale > 20: _logger.warning( "max_scale is set to %s which is " "larger than the recommended value of 20", max_scale, ) # This aggregation is analogous to _ExplicitBucketHistogramAggregation, # the only difference is that with every call to aggregate, the size # and amount of buckets can change (in # _ExplicitBucketHistogramAggregation both size and amount of buckets # remain constant once it is instantiated). super().__init__( attributes, reservoir_builder=partial( reservoir_builder, size=min(20, max_size) ), ) self._instrument_aggregation_temporality = ( instrument_aggregation_temporality ) self._start_time_unix_nano = start_time_unix_nano self._max_size = max_size self._max_scale = max_scale self._value_positive = None self._value_negative = None self._min = inf self._max = -inf self._sum = 0 self._count = 0 self._zero_count = 0 self._scale = None self._previous_value_positive = None self._previous_value_negative = None self._previous_min = inf self._previous_max = -inf self._previous_sum = 0 self._previous_count = 0 self._previous_zero_count = 0 self._previous_scale = None self._previous_collection_start_nano = self._start_time_unix_nano self._mapping = self._new_mapping(self._max_scale) def aggregate( self, measurement: Measurement, should_sample_exemplar: bool = True ) -> None: # pylint: disable=too-many-branches,too-many-statements, too-many-locals with self._lock: if self._value_positive is None: self._value_positive = Buckets() if self._value_negative is None: self._value_negative = Buckets() measurement_value = measurement.value self._sum += measurement_value self._min = min(self._min, measurement_value) self._max = max(self._max, measurement_value) self._count += 1 if measurement_value == 0: self._zero_count += 1 if self._count == self._zero_count: self._scale = 0 return if measurement_value > 0: value = self._value_positive else: measurement_value = -measurement_value value = self._value_negative # The following code finds out if it is necessary to change the # buckets to hold the incoming measurement_value, changes them if # necessary. This process does not exist in # _ExplicitBucketHistogram aggregation because the buckets there # are constant in size and amount. index = self._mapping.map_to_index(measurement_value) is_rescaling_needed = False low, high = 0, 0 if len(value) == 0: value.index_start = index value.index_end = index value.index_base = index elif ( index < value.index_start and (value.index_end - index) >= self._max_size ): is_rescaling_needed = True low = index high = value.index_end elif ( index > value.index_end and (index - value.index_start) >= self._max_size ): is_rescaling_needed = True low = value.index_start high = index if is_rescaling_needed: scale_change = self._get_scale_change(low, high) self._downscale( scale_change, self._value_positive, self._value_negative, ) self._mapping = self._new_mapping( self._mapping.scale - scale_change ) index = self._mapping.map_to_index(measurement_value) self._scale = self._mapping.scale if index < value.index_start: span = value.index_end - index if span >= len(value.counts): value.grow(span + 1, self._max_size) value.index_start = index elif index > value.index_end: span = index - value.index_start if span >= len(value.counts): value.grow(span + 1, self._max_size) value.index_end = index bucket_index = index - value.index_base if bucket_index < 0: bucket_index += len(value.counts) # Now the buckets have been changed if needed and bucket_index will # be used to increment the counter of the bucket that needs to be # incremented. # This is analogous to # self._value[bisect_left(self._boundaries, measurement_value)] += 1 # in _ExplicitBucketHistogramAggregation.aggregate value.increment_bucket(bucket_index) self._sample_exemplar(measurement, should_sample_exemplar) def collect( self, collection_aggregation_temporality: AggregationTemporality, collection_start_nano: int, ) -> Optional[_DataPointVarT]: """ Atomically return a point for the current value of the metric. """ # pylint: disable=too-many-statements, too-many-locals with self._lock: value_positive = self._value_positive value_negative = self._value_negative sum_ = self._sum min_ = self._min max_ = self._max count = self._count zero_count = self._zero_count scale = self._scale self._value_positive = None self._value_negative = None self._sum = 0 self._min = inf self._max = -inf self._count = 0 self._zero_count = 0 self._scale = None if ( self._instrument_aggregation_temporality is AggregationTemporality.DELTA ): # This happens when the corresponding instrument for this # aggregation is synchronous. if ( collection_aggregation_temporality is AggregationTemporality.DELTA ): previous_collection_start_nano = ( self._previous_collection_start_nano ) self._previous_collection_start_nano = ( collection_start_nano ) if value_positive is None and value_negative is None: return None return ExponentialHistogramDataPoint( attributes=self._attributes, exemplars=self._collect_exemplars(), start_time_unix_nano=previous_collection_start_nano, time_unix_nano=collection_start_nano, count=count, sum=sum_, scale=scale, zero_count=zero_count, positive=BucketsPoint( offset=value_positive.offset, bucket_counts=(value_positive.get_offset_counts()), ), negative=BucketsPoint( offset=value_negative.offset, bucket_counts=(value_negative.get_offset_counts()), ), # FIXME: Find the right value for flags flags=0, min=min_, max=max_, ) # Here collection_temporality is CUMULATIVE. # instrument_temporality is always DELTA for the time being. # Here we need to handle the case where: # collect is called after at least one other call to collect # (there is data in previous buckets, a call to merge is needed # to handle possible differences in bucket sizes). # collect is called without another call previous call to # collect was made (there is no previous buckets, previous, # empty buckets that are the same scale of the current buckets # need to be made so that they can be cumulatively aggregated # to the current buckets). if ( value_positive is None and self._previous_value_positive is None ): # This happens if collect is called for the first time # and aggregate has not yet been called. value_positive = Buckets() self._previous_value_positive = value_positive.copy_empty() if ( value_negative is None and self._previous_value_negative is None ): value_negative = Buckets() self._previous_value_negative = value_negative.copy_empty() if scale is None and self._previous_scale is None: scale = self._mapping.scale self._previous_scale = scale if ( value_positive is not None and self._previous_value_positive is None ): # This happens when collect is called the very first time # and aggregate has been called before. # We need previous buckets to add them to the current ones. # When collect is called for the first time, there are no # previous buckets, so we need to create empty buckets to # add them to the current ones. The addition of empty # buckets to the current ones will result in the current # ones unchanged. # The way the previous buckets are generated here is # different from the explicit bucket histogram where # the size and amount of the buckets does not change once # they are instantiated. Here, the size and amount of the # buckets can change with every call to aggregate. In order # to get empty buckets that can be added to the current # ones resulting in the current ones unchanged we need to # generate empty buckets that have the same size and amount # as the current ones, this is what copy_empty does. self._previous_value_positive = value_positive.copy_empty() if ( value_negative is not None and self._previous_value_negative is None ): self._previous_value_negative = value_negative.copy_empty() if scale is not None and self._previous_scale is None: self._previous_scale = scale if ( value_positive is None and self._previous_value_positive is not None ): value_positive = self._previous_value_positive.copy_empty() if ( value_negative is None and self._previous_value_negative is not None ): value_negative = self._previous_value_negative.copy_empty() if scale is None and self._previous_scale is not None: scale = self._previous_scale min_scale = min(self._previous_scale, scale) low_positive, high_positive = ( self._get_low_high_previous_current( self._previous_value_positive, value_positive, scale, min_scale, ) ) low_negative, high_negative = ( self._get_low_high_previous_current( self._previous_value_negative, value_negative, scale, min_scale, ) ) min_scale = min( min_scale - self._get_scale_change(low_positive, high_positive), min_scale - self._get_scale_change(low_negative, high_negative), ) self._downscale( self._previous_scale - min_scale, self._previous_value_positive, self._previous_value_negative, ) # self._merge adds the values from value to # self._previous_value, this is analogous to # self._previous_value = [ # value_element + previous_value_element # for ( # value_element, # previous_value_element, # ) in zip(value, self._previous_value) # ] # in _ExplicitBucketHistogramAggregation.collect. self._merge( self._previous_value_positive, value_positive, scale, min_scale, collection_aggregation_temporality, ) self._merge( self._previous_value_negative, value_negative, scale, min_scale, collection_aggregation_temporality, ) self._previous_min = min(min_, self._previous_min) self._previous_max = max(max_, self._previous_max) self._previous_sum = sum_ + self._previous_sum self._previous_count = count + self._previous_count self._previous_zero_count = ( zero_count + self._previous_zero_count ) self._previous_scale = min_scale return ExponentialHistogramDataPoint( attributes=self._attributes, exemplars=self._collect_exemplars(), start_time_unix_nano=self._start_time_unix_nano, time_unix_nano=collection_start_nano, count=self._previous_count, sum=self._previous_sum, scale=self._previous_scale, zero_count=self._previous_zero_count, positive=BucketsPoint( offset=self._previous_value_positive.offset, bucket_counts=( self._previous_value_positive.get_offset_counts() ), ), negative=BucketsPoint( offset=self._previous_value_negative.offset, bucket_counts=( self._previous_value_negative.get_offset_counts() ), ), # FIXME: Find the right value for flags flags=0, min=self._previous_min, max=self._previous_max, ) return None def _get_low_high_previous_current( self, previous_point_buckets, current_point_buckets, current_scale, min_scale, ): (previous_point_low, previous_point_high) = self._get_low_high( previous_point_buckets, self._previous_scale, min_scale ) (current_point_low, current_point_high) = self._get_low_high( current_point_buckets, current_scale, min_scale ) if current_point_low > current_point_high: low = previous_point_low high = previous_point_high elif previous_point_low > previous_point_high: low = current_point_low high = current_point_high else: low = min(previous_point_low, current_point_low) high = max(previous_point_high, current_point_high) return low, high @staticmethod def _get_low_high(buckets, scale, min_scale): if buckets.counts == [0]: return 0, -1 shift = scale - min_scale return buckets.index_start >> shift, buckets.index_end >> shift @staticmethod def _new_mapping(scale: int) -> Mapping: if scale <= 0: return ExponentMapping(scale) return LogarithmMapping(scale) def _get_scale_change(self, low, high): change = 0 while high - low >= self._max_size: high = high >> 1 low = low >> 1 change += 1 return change @staticmethod def _downscale(change: int, positive, negative): if change == 0: return if change < 0: # pylint: disable=broad-exception-raised raise Exception("Invalid change of scale") positive.downscale(change) negative.downscale(change) def _merge( self, previous_buckets: Buckets, current_buckets: Buckets, current_scale, min_scale, aggregation_temporality, ): current_change = current_scale - min_scale for current_bucket_index, current_bucket in enumerate( current_buckets.counts ): if current_bucket == 0: continue # Not considering the case where len(previous_buckets) == 0. This # would not happen because self._previous_point is only assigned to # an ExponentialHistogramDataPoint object if self._count != 0. current_index = current_buckets.index_base + current_bucket_index if current_index > current_buckets.index_end: current_index -= len(current_buckets.counts) index = current_index >> current_change if index < previous_buckets.index_start: span = previous_buckets.index_end - index if span >= self._max_size: # pylint: disable=broad-exception-raised raise Exception("Incorrect merge scale") if span >= len(previous_buckets.counts): previous_buckets.grow(span + 1, self._max_size) previous_buckets.index_start = index if index > previous_buckets.index_end: span = index - previous_buckets.index_start if span >= self._max_size: # pylint: disable=broad-exception-raised raise Exception("Incorrect merge scale") if span >= len(previous_buckets.counts): previous_buckets.grow(span + 1, self._max_size) previous_buckets.index_end = index bucket_index = index - previous_buckets.index_base if bucket_index < 0: bucket_index += len(previous_buckets.counts) if aggregation_temporality is AggregationTemporality.DELTA: current_bucket = -current_bucket previous_buckets.increment_bucket( bucket_index, increment=current_bucket ) class Aggregation(ABC): """ Base class for all aggregation types. """ @abstractmethod def _create_aggregation( self, instrument: Instrument, attributes: Attributes, reservoir_factory: Callable[ [Type[_Aggregation]], ExemplarReservoirBuilder ], start_time_unix_nano: int, ) -> _Aggregation: """Creates an aggregation""" class DefaultAggregation(Aggregation): """ The default aggregation to be used in a `View`. This aggregation will create an actual aggregation depending on the instrument type, as specified next: ==================================================== ==================================== Instrument Aggregation ==================================================== ==================================== `opentelemetry.sdk.metrics.Counter` `SumAggregation` `opentelemetry.sdk.metrics.UpDownCounter` `SumAggregation` `opentelemetry.sdk.metrics.ObservableCounter` `SumAggregation` `opentelemetry.sdk.metrics.ObservableUpDownCounter` `SumAggregation` `opentelemetry.sdk.metrics.Histogram` `ExplicitBucketHistogramAggregation` `opentelemetry.sdk.metrics.ObservableGauge` `LastValueAggregation` ==================================================== ==================================== """ def _create_aggregation( self, instrument: Instrument, attributes: Attributes, reservoir_factory: Callable[ [Type[_Aggregation]], ExemplarReservoirBuilder ], start_time_unix_nano: int, ) -> _Aggregation: # pylint: disable=too-many-return-statements if isinstance(instrument, Counter): return _SumAggregation( attributes, reservoir_builder=reservoir_factory(_SumAggregation), instrument_is_monotonic=True, instrument_aggregation_temporality=( AggregationTemporality.DELTA ), start_time_unix_nano=start_time_unix_nano, ) if isinstance(instrument, UpDownCounter): return _SumAggregation( attributes, reservoir_builder=reservoir_factory(_SumAggregation), instrument_is_monotonic=False, instrument_aggregation_temporality=( AggregationTemporality.DELTA ), start_time_unix_nano=start_time_unix_nano, ) if isinstance(instrument, ObservableCounter): return _SumAggregation( attributes, reservoir_builder=reservoir_factory(_SumAggregation), instrument_is_monotonic=True, instrument_aggregation_temporality=( AggregationTemporality.CUMULATIVE ), start_time_unix_nano=start_time_unix_nano, ) if isinstance(instrument, ObservableUpDownCounter): return _SumAggregation( attributes, reservoir_builder=reservoir_factory(_SumAggregation), instrument_is_monotonic=False, instrument_aggregation_temporality=( AggregationTemporality.CUMULATIVE ), start_time_unix_nano=start_time_unix_nano, ) if isinstance(instrument, Histogram): boundaries = instrument._advisory.explicit_bucket_boundaries return _ExplicitBucketHistogramAggregation( attributes, reservoir_builder=reservoir_factory( _ExplicitBucketHistogramAggregation ), instrument_aggregation_temporality=( AggregationTemporality.DELTA ), boundaries=boundaries, start_time_unix_nano=start_time_unix_nano, ) if isinstance(instrument, ObservableGauge): return _LastValueAggregation( attributes, reservoir_builder=reservoir_factory(_LastValueAggregation), ) if isinstance(instrument, _Gauge): return _LastValueAggregation( attributes, reservoir_builder=reservoir_factory(_LastValueAggregation), ) # pylint: disable=broad-exception-raised raise Exception(f"Invalid instrument type {type(instrument)} found") class ExponentialBucketHistogramAggregation(Aggregation): def __init__( self, max_size: int = 160, max_scale: int = 20, ): self._max_size = max_size self._max_scale = max_scale def _create_aggregation( self, instrument: Instrument, attributes: Attributes, reservoir_factory: Callable[ [Type[_Aggregation]], ExemplarReservoirBuilder ], start_time_unix_nano: int, ) -> _Aggregation: instrument_aggregation_temporality = AggregationTemporality.UNSPECIFIED if isinstance(instrument, Synchronous): instrument_aggregation_temporality = AggregationTemporality.DELTA elif isinstance(instrument, Asynchronous): instrument_aggregation_temporality = ( AggregationTemporality.CUMULATIVE ) return _ExponentialBucketHistogramAggregation( attributes, reservoir_factory(_ExponentialBucketHistogramAggregation), instrument_aggregation_temporality, start_time_unix_nano, max_size=self._max_size, max_scale=self._max_scale, ) class ExplicitBucketHistogramAggregation(Aggregation): """This aggregation informs the SDK to collect: - Count of Measurement values falling within explicit bucket boundaries. - Arithmetic sum of Measurement values in population. This SHOULD NOT be collected when used with instruments that record negative measurements, e.g. UpDownCounter or ObservableGauge. - Min (optional) Measurement value in population. - Max (optional) Measurement value in population. Args: boundaries: Array of increasing values representing explicit bucket boundary values. record_min_max: Whether to record min and max. """ def __init__( self, boundaries: Optional[Sequence[float]] = None, record_min_max: bool = True, ) -> None: self._boundaries = boundaries self._record_min_max = record_min_max def _create_aggregation( self, instrument: Instrument, attributes: Attributes, reservoir_factory: Callable[ [Type[_Aggregation]], ExemplarReservoirBuilder ], start_time_unix_nano: int, ) -> _Aggregation: instrument_aggregation_temporality = AggregationTemporality.UNSPECIFIED if isinstance(instrument, Synchronous): instrument_aggregation_temporality = AggregationTemporality.DELTA elif isinstance(instrument, Asynchronous): instrument_aggregation_temporality = ( AggregationTemporality.CUMULATIVE ) if self._boundaries is not None: boundaries = self._boundaries else: boundaries = instrument._advisory.explicit_bucket_boundaries return _ExplicitBucketHistogramAggregation( attributes, instrument_aggregation_temporality, start_time_unix_nano, reservoir_factory(_ExplicitBucketHistogramAggregation), boundaries, self._record_min_max, ) class SumAggregation(Aggregation): """This aggregation informs the SDK to collect: - The arithmetic sum of Measurement values. """ def _create_aggregation( self, instrument: Instrument, attributes: Attributes, reservoir_factory: Callable[ [Type[_Aggregation]], ExemplarReservoirBuilder ], start_time_unix_nano: int, ) -> _Aggregation: instrument_aggregation_temporality = AggregationTemporality.UNSPECIFIED if isinstance(instrument, Synchronous): instrument_aggregation_temporality = AggregationTemporality.DELTA elif isinstance(instrument, Asynchronous): instrument_aggregation_temporality = ( AggregationTemporality.CUMULATIVE ) return _SumAggregation( attributes, isinstance(instrument, (Counter, ObservableCounter)), instrument_aggregation_temporality, start_time_unix_nano, reservoir_factory(_SumAggregation), ) class LastValueAggregation(Aggregation): """ This aggregation informs the SDK to collect: - The last Measurement. - The timestamp of the last Measurement. """ def _create_aggregation( self, instrument: Instrument, attributes: Attributes, reservoir_factory: Callable[ [Type[_Aggregation]], ExemplarReservoirBuilder ], start_time_unix_nano: int, ) -> _Aggregation: return _LastValueAggregation( attributes, reservoir_builder=reservoir_factory(_LastValueAggregation), ) class DropAggregation(Aggregation): """Using this aggregation will make all measurements be ignored.""" def _create_aggregation( self, instrument: Instrument, attributes: Attributes, reservoir_factory: Callable[ [Type[_Aggregation]], ExemplarReservoirBuilder ], start_time_unix_nano: int, ) -> _Aggregation: return _DropAggregation( attributes, reservoir_factory(_DropAggregation) ) python-opentelemetry-1.39.1/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/exceptions.py000066400000000000000000000012431511654350100341140ustar00rootroot00000000000000# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. class MetricsTimeoutError(Exception): """Raised when a metrics function times out""" python-opentelemetry-1.39.1/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/exemplar/000077500000000000000000000000001511654350100331765ustar00rootroot00000000000000__init__.py000066400000000000000000000023021511654350100352250ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/exemplar# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from .exemplar import Exemplar from .exemplar_filter import ( AlwaysOffExemplarFilter, AlwaysOnExemplarFilter, ExemplarFilter, TraceBasedExemplarFilter, ) from .exemplar_reservoir import ( AlignedHistogramBucketExemplarReservoir, ExemplarReservoir, ExemplarReservoirBuilder, SimpleFixedSizeExemplarReservoir, ) __all__ = [ "Exemplar", "ExemplarFilter", "AlwaysOffExemplarFilter", "AlwaysOnExemplarFilter", "TraceBasedExemplarFilter", "AlignedHistogramBucketExemplarReservoir", "ExemplarReservoir", "ExemplarReservoirBuilder", "SimpleFixedSizeExemplarReservoir", ] exemplar.py000066400000000000000000000041001511654350100353010ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/exemplar# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import dataclasses from typing import Optional, Union from opentelemetry.util.types import Attributes @dataclasses.dataclass(frozen=True) class Exemplar: """A representation of an exemplar, which is a sample input measurement. Exemplars also hold information about the environment when the measurement was recorded, for example the span and trace ID of the active span when the exemplar was recorded. Attributes trace_id: (optional) The trace associated with a recording span_id: (optional) The span associated with a recording time_unix_nano: The time of the observation value: The recorded value filtered_attributes: A set of filtered attributes which provide additional insight into the Context when the observation was made. References: https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/metrics/data-model.md#exemplars https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/metrics/sdk.md#exemplar """ # TODO Fix doc - if using valid Google `Attributes:` key, the attributes are duplicated # one will come from napoleon extension and the other from autodoc extension. This # will raise an sphinx error of duplicated object description # See https://github.com/sphinx-doc/sphinx/issues/8664 filtered_attributes: Attributes value: Union[int, float] time_unix_nano: int span_id: Optional[int] = None trace_id: Optional[int] = None exemplar_filter.py000066400000000000000000000111011511654350100366450ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/exemplar# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from abc import ABC, abstractmethod from typing import Union from opentelemetry import trace from opentelemetry.context import Context from opentelemetry.trace.span import INVALID_SPAN from opentelemetry.util.types import Attributes class ExemplarFilter(ABC): """``ExemplarFilter`` determines which measurements are eligible for becoming an ``Exemplar``. Exemplar filters are used to filter measurements before attempting to store them in a reservoir. Reference: https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/metrics/sdk.md#exemplarfilter """ @abstractmethod def should_sample( self, value: Union[int, float], time_unix_nano: int, attributes: Attributes, context: Context, ) -> bool: """Returns whether or not a reservoir should attempt to filter a measurement. Args: value: The value of the measurement timestamp: A timestamp that best represents when the measurement was taken attributes: The complete set of measurement attributes context: The Context of the measurement """ raise NotImplementedError( "ExemplarFilter.should_sample is not implemented" ) class AlwaysOnExemplarFilter(ExemplarFilter): """An ExemplarFilter which makes all measurements eligible for being an Exemplar. Reference: https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/metrics/sdk.md#alwayson """ def should_sample( self, value: Union[int, float], time_unix_nano: int, attributes: Attributes, context: Context, ) -> bool: """Returns whether or not a reservoir should attempt to filter a measurement. Args: value: The value of the measurement timestamp: A timestamp that best represents when the measurement was taken attributes: The complete set of measurement attributes context: The Context of the measurement """ return True class AlwaysOffExemplarFilter(ExemplarFilter): """An ExemplarFilter which makes no measurements eligible for being an Exemplar. Using this ExemplarFilter is as good as disabling Exemplar feature. Reference: https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/metrics/sdk.md#alwaysoff """ def should_sample( self, value: Union[int, float], time_unix_nano: int, attributes: Attributes, context: Context, ) -> bool: """Returns whether or not a reservoir should attempt to filter a measurement. Args: value: The value of the measurement timestamp: A timestamp that best represents when the measurement was taken attributes: The complete set of measurement attributes context: The Context of the measurement """ return False class TraceBasedExemplarFilter(ExemplarFilter): """An ExemplarFilter which makes those measurements eligible for being an Exemplar, which are recorded in the context of a sampled parent span. Reference: https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/metrics/sdk.md#tracebased """ def should_sample( self, value: Union[int, float], time_unix_nano: int, attributes: Attributes, context: Context, ) -> bool: """Returns whether or not a reservoir should attempt to filter a measurement. Args: value: The value of the measurement timestamp: A timestamp that best represents when the measurement was taken attributes: The complete set of measurement attributes context: The Context of the measurement """ span = trace.get_current_span(context) if span == INVALID_SPAN: return False return span.get_span_context().trace_flags.sampled exemplar_reservoir.py000066400000000000000000000246401511654350100374140ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/exemplar# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from abc import ABC, abstractmethod from collections import defaultdict from random import randrange from typing import ( Any, Callable, Dict, List, Mapping, Optional, Sequence, Union, ) from opentelemetry import trace from opentelemetry.context import Context from opentelemetry.trace.span import INVALID_SPAN from opentelemetry.util.types import Attributes from .exemplar import Exemplar class ExemplarReservoir(ABC): """ExemplarReservoir provide a method to offer measurements to the reservoir and another to collect accumulated Exemplars. Note: The constructor MUST accept ``**kwargs`` that may be set from aggregation parameters. Reference: https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/metrics/sdk.md#exemplarreservoir """ @abstractmethod def offer( self, value: Union[int, float], time_unix_nano: int, attributes: Attributes, context: Context, ) -> None: """Offers a measurement to be sampled. Args: value: Measured value time_unix_nano: Measurement instant attributes: Measurement attributes context: Measurement context """ raise NotImplementedError("ExemplarReservoir.offer is not implemented") @abstractmethod def collect(self, point_attributes: Attributes) -> List[Exemplar]: """Returns accumulated Exemplars and also resets the reservoir for the next sampling period Args: point_attributes: The attributes associated with metric point. Returns: a list of ``opentelemetry.sdk.metrics._internal.exemplar.exemplar.Exemplar`` s. Returned exemplars contain the attributes that were filtered out by the aggregator, but recorded alongside the original measurement. """ raise NotImplementedError( "ExemplarReservoir.collect is not implemented" ) class ExemplarBucket: def __init__(self) -> None: self.__value: Union[int, float] = 0 self.__attributes: Attributes = None self.__time_unix_nano: int = 0 self.__span_id: Optional[int] = None self.__trace_id: Optional[int] = None self.__offered: bool = False def offer( self, value: Union[int, float], time_unix_nano: int, attributes: Attributes, context: Context, ) -> None: """Offers a measurement to be sampled. Args: value: Measured value time_unix_nano: Measurement instant attributes: Measurement attributes context: Measurement context """ self.__value = value self.__time_unix_nano = time_unix_nano self.__attributes = attributes span = trace.get_current_span(context) if span != INVALID_SPAN: span_context = span.get_span_context() self.__span_id = span_context.span_id self.__trace_id = span_context.trace_id self.__offered = True def collect(self, point_attributes: Attributes) -> Optional[Exemplar]: """May return an Exemplar and resets the bucket for the next sampling period.""" if not self.__offered: return None # filters out attributes from the measurement that are already included in the metric data point # See the specification for more details: # https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/metrics/sdk.md#exemplar filtered_attributes = ( { k: v for k, v in self.__attributes.items() if k not in point_attributes } if self.__attributes else None ) exemplar = Exemplar( filtered_attributes, self.__value, self.__time_unix_nano, self.__span_id, self.__trace_id, ) self.__reset() return exemplar def __reset(self) -> None: """Reset the bucket state after a collection cycle.""" self.__value = 0 self.__attributes = {} self.__time_unix_nano = 0 self.__span_id = None self.__trace_id = None self.__offered = False class BucketIndexError(ValueError): """An exception raised when the bucket index cannot be found.""" class FixedSizeExemplarReservoirABC(ExemplarReservoir): """Abstract class for a reservoir with fixed size.""" def __init__(self, size: int, **kwargs) -> None: super().__init__(**kwargs) self._size: int = size self._reservoir_storage: Mapping[int, ExemplarBucket] = defaultdict( ExemplarBucket ) def collect(self, point_attributes: Attributes) -> List[Exemplar]: """Returns accumulated Exemplars and also resets the reservoir for the next sampling period Args: point_attributes: The attributes associated with metric point. Returns: a list of ``opentelemetry.sdk.metrics._internal.exemplar.exemplar.Exemplar`` s. Returned exemplars contain the attributes that were filtered out by the aggregator, but recorded alongside the original measurement. """ exemplars = [ e for e in ( bucket.collect(point_attributes) for _, bucket in sorted(self._reservoir_storage.items()) ) if e is not None ] self._reset() return exemplars def offer( self, value: Union[int, float], time_unix_nano: int, attributes: Attributes, context: Context, ) -> None: """Offers a measurement to be sampled. Args: value: Measured value time_unix_nano: Measurement instant attributes: Measurement attributes context: Measurement context """ try: index = self._find_bucket_index( value, time_unix_nano, attributes, context ) self._reservoir_storage[index].offer( value, time_unix_nano, attributes, context ) except BucketIndexError: # Ignore invalid bucket index pass @abstractmethod def _find_bucket_index( self, value: Union[int, float], time_unix_nano: int, attributes: Attributes, context: Context, ) -> int: """Determines the bucket index for the given measurement. It should be implemented by subclasses based on specific strategies. Args: value: Measured value time_unix_nano: Measurement instant attributes: Measurement attributes context: Measurement context Returns: The bucket index Raises: BucketIndexError: If no bucket index can be found. """ def _reset(self) -> None: """Reset the reservoir by resetting any stateful logic after a collection cycle.""" class SimpleFixedSizeExemplarReservoir(FixedSizeExemplarReservoirABC): """This reservoir uses an uniformly-weighted sampling algorithm based on the number of samples the reservoir has seen so far to determine if the offered measurements should be sampled. Reference: https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/metrics/sdk.md#simplefixedsizeexemplarreservoir """ def __init__(self, size: int = 1, **kwargs) -> None: super().__init__(size, **kwargs) self._measurements_seen: int = 0 def _reset(self) -> None: super()._reset() self._measurements_seen = 0 def _find_bucket_index( self, value: Union[int, float], time_unix_nano: int, attributes: Attributes, context: Context, ) -> int: self._measurements_seen += 1 if self._measurements_seen < self._size: return self._measurements_seen - 1 index = randrange(0, self._measurements_seen) if index < self._size: return index raise BucketIndexError("Unable to find the bucket index.") class AlignedHistogramBucketExemplarReservoir(FixedSizeExemplarReservoirABC): """This Exemplar reservoir takes a configuration parameter that is the configuration of a Histogram. This implementation keeps the last seen measurement that falls within a histogram bucket. Reference: https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/metrics/sdk.md#alignedhistogrambucketexemplarreservoir """ def __init__(self, boundaries: Sequence[float], **kwargs) -> None: super().__init__(len(boundaries) + 1, **kwargs) self._boundaries: Sequence[float] = boundaries def offer( self, value: Union[int, float], time_unix_nano: int, attributes: Attributes, context: Context, ) -> None: """Offers a measurement to be sampled.""" index = self._find_bucket_index( value, time_unix_nano, attributes, context ) self._reservoir_storage[index].offer( value, time_unix_nano, attributes, context ) def _find_bucket_index( self, value: Union[int, float], time_unix_nano: int, attributes: Attributes, context: Context, ) -> int: for index, boundary in enumerate(self._boundaries): if value <= boundary: return index return len(self._boundaries) ExemplarReservoirBuilder = Callable[[Dict[str, Any]], ExemplarReservoir] ExemplarReservoirBuilder.__doc__ = """ExemplarReservoir builder. It may receive the Aggregation parameters it is bounded to; e.g. the _ExplicitBucketHistogramAggregation will provide the boundaries. """ exponential_histogram/000077500000000000000000000000001511654350100357055ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal__init__.py000066400000000000000000000000001511654350100400040ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/exponential_histogrambuckets.py000066400000000000000000000134671511654350100377320ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/exponential_histogram# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from math import ceil, log2 class Buckets: # No method of this class is protected by locks because instances of this # class are only used in methods that are protected by locks themselves. def __init__(self): self._counts = [0] # The term index refers to the number of the exponential histogram bucket # used to determine its boundaries. The lower boundary of a bucket is # determined by base ** index and the upper boundary of a bucket is # determined by base ** (index + 1). index values are signedto account # for values less than or equal to 1. # self._index_* will all have values equal to a certain index that is # determined by the corresponding mapping _map_to_index function and # the value of the index depends on the value passed to _map_to_index. # Index of the 0th position in self._counts: self._counts[0] is the # count in the bucket with index self.__index_base. self.__index_base = 0 # self.__index_start is the smallest index value represented in # self._counts. self.__index_start = 0 # self.__index_start is the largest index value represented in # self._counts. self.__index_end = 0 @property def index_start(self) -> int: return self.__index_start @index_start.setter def index_start(self, value: int) -> None: self.__index_start = value @property def index_end(self) -> int: return self.__index_end @index_end.setter def index_end(self, value: int) -> None: self.__index_end = value @property def index_base(self) -> int: return self.__index_base @index_base.setter def index_base(self, value: int) -> None: self.__index_base = value @property def counts(self): return self._counts def get_offset_counts(self): bias = self.__index_base - self.__index_start return self._counts[-bias:] + self._counts[:-bias] def grow(self, needed: int, max_size: int) -> None: size = len(self._counts) bias = self.__index_base - self.__index_start old_positive_limit = size - bias # 2 ** ceil(log2(needed)) finds the smallest power of two that is larger # or equal than needed: # 2 ** ceil(log2(1)) == 1 # 2 ** ceil(log2(2)) == 2 # 2 ** ceil(log2(3)) == 4 # 2 ** ceil(log2(4)) == 4 # 2 ** ceil(log2(5)) == 8 # 2 ** ceil(log2(6)) == 8 # 2 ** ceil(log2(7)) == 8 # 2 ** ceil(log2(8)) == 8 new_size = min(2 ** ceil(log2(needed)), max_size) new_positive_limit = new_size - bias tmp = [0] * new_size tmp[new_positive_limit:] = self._counts[old_positive_limit:] tmp[0:old_positive_limit] = self._counts[0:old_positive_limit] self._counts = tmp @property def offset(self) -> int: return self.__index_start def __len__(self) -> int: if len(self._counts) == 0: return 0 if self.__index_end == self.__index_start and self[0] == 0: return 0 return self.__index_end - self.__index_start + 1 def __getitem__(self, key: int) -> int: bias = self.__index_base - self.__index_start if key < bias: key += len(self._counts) key -= bias return self._counts[key] def downscale(self, amount: int) -> None: """ Rotates, then collapses 2 ** amount to 1 buckets. """ bias = self.__index_base - self.__index_start if bias != 0: self.__index_base = self.__index_start # [0, 1, 2, 3, 4] Original backing array self._counts = self._counts[::-1] # [4, 3, 2, 1, 0] self._counts = ( self._counts[:bias][::-1] + self._counts[bias:][::-1] ) # [3, 4, 0, 1, 2] This is a rotation of the backing array. size = 1 + self.__index_end - self.__index_start each = 1 << amount inpos = 0 outpos = 0 pos = self.__index_start while pos <= self.__index_end: mod = pos % each if mod < 0: mod += each index = mod while index < each and inpos < size: if outpos != inpos: self._counts[outpos] += self._counts[inpos] self._counts[inpos] = 0 inpos += 1 pos += 1 index += 1 outpos += 1 self.__index_start >>= amount self.__index_end >>= amount self.__index_base = self.__index_start def increment_bucket(self, bucket_index: int, increment: int = 1) -> None: self._counts[bucket_index] += increment def copy_empty(self) -> "Buckets": copy = Buckets() # pylint: disable=no-member # pylint: disable=protected-access # pylint: disable=attribute-defined-outside-init # pylint: disable=invalid-name copy._Buckets__index_base = self._Buckets__index_base copy._Buckets__index_start = self._Buckets__index_start copy._Buckets__index_end = self._Buckets__index_end copy._counts = [0 for _ in self._counts] return copy mapping/000077500000000000000000000000001511654350100373405ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/exponential_histogram__init__.py000066400000000000000000000074231511654350100414570ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/exponential_histogram/mapping# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from abc import ABC, abstractmethod class Mapping(ABC): """ Parent class for `LogarithmMapping` and `ExponentialMapping`. """ # pylint: disable=no-member def __new__(cls, scale: int): with cls._mappings_lock: # cls._mappings and cls._mappings_lock are implemented in each of # the child classes as a dictionary and a lock, respectively. They # are not instantiated here because that would lead to both child # classes having the same instance of cls._mappings and # cls._mappings_lock. if scale not in cls._mappings: cls._mappings[scale] = super().__new__(cls) cls._mappings[scale]._init(scale) return cls._mappings[scale] @abstractmethod def _init(self, scale: int) -> None: # pylint: disable=attribute-defined-outside-init if scale > self._get_max_scale(): # pylint: disable=broad-exception-raised raise Exception(f"scale is larger than {self._max_scale}") if scale < self._get_min_scale(): # pylint: disable=broad-exception-raised raise Exception(f"scale is smaller than {self._min_scale}") # The size of the exponential histogram buckets is determined by a # parameter known as scale, larger values of scale will produce smaller # buckets. Bucket boundaries of the exponential histogram are located # at integer powers of the base, where: # # base = 2 ** (2 ** (-scale)) # https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/metrics/data-model.md#all-scales-use-the-logarithm-function self._scale = scale @abstractmethod def _get_min_scale(self) -> int: """ Return the smallest possible value for the mapping scale """ @abstractmethod def _get_max_scale(self) -> int: """ Return the largest possible value for the mapping scale """ @abstractmethod def map_to_index(self, value: float) -> int: """ Maps positive floating point values to indexes corresponding to `Mapping.scale`. Implementations are not expected to handle zeros, +inf, NaN, or negative values. """ @abstractmethod def get_lower_boundary(self, index: int) -> float: """ Returns the lower boundary of a given bucket index. The index is expected to map onto a range that is at least partially inside the range of normal floating point values. If the corresponding bucket's upper boundary is less than or equal to 2 ** -1022, :class:`~opentelemetry.sdk.metrics.MappingUnderflowError` will be raised. If the corresponding bucket's lower boundary is greater than ``sys.float_info.max``, :class:`~opentelemetry.sdk.metrics.MappingOverflowError` will be raised. """ @property def scale(self) -> int: """ Returns the parameter that controls the resolution of this mapping. See: https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/metrics/datamodel.md#exponential-scale """ return self._scale errors.py000066400000000000000000000015661511654350100412360ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/exponential_histogram/mapping# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. class MappingUnderflowError(Exception): """ Raised when computing the lower boundary of an index that maps into a denormal floating point value. """ class MappingOverflowError(Exception): """ Raised when computing the lower boundary of an index that maps into +inf. """ exponent_mapping.py000066400000000000000000000137621511654350100432760ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/exponential_histogram/mapping# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from math import ldexp from threading import Lock from opentelemetry.sdk.metrics._internal.exponential_histogram.mapping import ( Mapping, ) from opentelemetry.sdk.metrics._internal.exponential_histogram.mapping.errors import ( MappingOverflowError, MappingUnderflowError, ) from opentelemetry.sdk.metrics._internal.exponential_histogram.mapping.ieee_754 import ( MANTISSA_WIDTH, MAX_NORMAL_EXPONENT, MIN_NORMAL_EXPONENT, MIN_NORMAL_VALUE, get_ieee_754_exponent, get_ieee_754_mantissa, ) class ExponentMapping(Mapping): # Reference implementation here: # https://github.com/open-telemetry/opentelemetry-go/blob/0e6f9c29c10d6078e8131418e1d1d166c7195d61/sdk/metric/aggregator/exponential/mapping/exponent/exponent.go _mappings = {} _mappings_lock = Lock() _min_scale = -10 _max_scale = 0 def _get_min_scale(self): # _min_scale defines the point at which the exponential mapping # function becomes useless for 64-bit floats. With scale -10, ignoring # subnormal values, bucket indices range from -1 to 1. return -10 def _get_max_scale(self): # _max_scale is the largest scale supported by exponential mapping. Use # a logarithm mapping for larger scales. return 0 def _init(self, scale: int): # pylint: disable=attribute-defined-outside-init super()._init(scale) # self._min_normal_lower_boundary_index is the largest index such that # base ** index < MIN_NORMAL_VALUE and # base ** (index + 1) >= MIN_NORMAL_VALUE. An exponential histogram # bucket with this index covers the range # (base ** index, base (index + 1)], including MIN_NORMAL_VALUE. This # is the smallest valid index that contains at least one normal value. index = MIN_NORMAL_EXPONENT >> -self._scale if -self._scale < 2: # For scales -1 and 0, the maximum value 2 ** -1022 is a # power-of-two multiple, meaning base ** index == MIN_NORMAL_VALUE. # Subtracting 1 so that base ** (index + 1) == MIN_NORMAL_VALUE. index -= 1 self._min_normal_lower_boundary_index = index # self._max_normal_lower_boundary_index is the index such that # base**index equals the greatest representable lower boundary. An # exponential histogram bucket with this index covers the range # ((2 ** 1024) / base, 2 ** 1024], which includes opentelemetry.sdk. # metrics._internal.exponential_histogram.ieee_754.MAX_NORMAL_VALUE. # This bucket is incomplete, since the upper boundary cannot be # represented. One greater than this index corresponds with the bucket # containing values > 2 ** 1024. self._max_normal_lower_boundary_index = ( MAX_NORMAL_EXPONENT >> -self._scale ) def map_to_index(self, value: float) -> int: if value < MIN_NORMAL_VALUE: return self._min_normal_lower_boundary_index exponent = get_ieee_754_exponent(value) # Positive integers are represented in binary as having an infinite # amount of leading zeroes, for example 2 is represented as ...00010. # A negative integer -x is represented in binary as the complement of # (x - 1). For example, -4 is represented as the complement of 4 - 1 # == 3. 3 is represented as ...00011. Its compliment is ...11100, the # binary representation of -4. # get_ieee_754_mantissa(value) gets the positive integer made up # from the rightmost MANTISSA_WIDTH bits (the mantissa) of the IEEE # 754 representation of value. If value is an exact power of 2, all # these MANTISSA_WIDTH bits would be all zeroes, and when 1 is # subtracted the resulting value is -1. The binary representation of # -1 is ...111, so when these bits are right shifted MANTISSA_WIDTH # places, the resulting value for correction is -1. If value is not an # exact power of 2, at least one of the rightmost MANTISSA_WIDTH # bits would be 1 (even for values whose decimal part is 0, like 5.0 # since the IEEE 754 of such number is too the product of a power of 2 # (defined in the exponent part of the IEEE 754 representation) and the # value defined in the mantissa). Having at least one of the rightmost # MANTISSA_WIDTH bit being 1 means that get_ieee_754(value) will # always be greater or equal to 1, and when 1 is subtracted, the # result will be greater or equal to 0, whose representation in binary # will be of at most MANTISSA_WIDTH ones that have an infinite # amount of leading zeroes. When those MANTISSA_WIDTH bits are # shifted to the right MANTISSA_WIDTH places, the resulting value # will be 0. # In summary, correction will be -1 if value is a power of 2, 0 if not. # FIXME Document why we can assume value will not be 0, inf, or NaN. correction = (get_ieee_754_mantissa(value) - 1) >> MANTISSA_WIDTH return (exponent + correction) >> -self._scale def get_lower_boundary(self, index: int) -> float: if index < self._min_normal_lower_boundary_index: raise MappingUnderflowError() if index > self._max_normal_lower_boundary_index: raise MappingOverflowError() return ldexp(1, index << -self._scale) @property def scale(self) -> int: return self._scale ieee_754.md000066400000000000000000000115641511654350100411770ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/exponential_histogram/mapping# IEEE 754 Explained IEEE 754 is a standard that defines a way to represent certain mathematical objects using binary numbers. ## Binary Number Fields The binary numbers used in IEEE 754 can have different lengths, the length that is interesting for the purposes of this project is 64 bits. These binary numbers are made up of 3 contiguous fields of bits, from left to right: 1. 1 sign bit 2. 11 exponent bits 3. 52 mantissa bits Depending on the values these fields have, the represented mathematical object can be one of: * Floating point number * Zero * NaN * Infinite ## Floating Point Numbers IEEE 754 represents a floating point number $f$ using an exponential notation with 4 components: $sign$, $mantissa$, $base$ and $exponent$: $$f = sign \times mantissa \times base ^ {exponent}$$ There are two possible representations of floating point numbers: _normal_ and _denormal_, which have different valid values for their $mantissa$ and $exponent$ fields. ### Binary Representation $sign$, $mantissa$, and $exponent$ are represented in binary, the representation of each component has certain details explained next. $base$ is always $2$ and it is not represented in binary. #### Sign $sign$ can have 2 values: 1. $1$ if the `sign` bit is `0` 2. $-1$ if the `sign` bit is `1`. #### Mantissa ##### Normal Floating Point Numbers $mantissa$ is a positive fractional number whose integer part is $1$, for example $1.2345 \dots$. The `mantissa` bits represent only the fractional part and the $mantissa$ value can be calculated as: $$mantissa = 1 + \sum_{i=1}^{52} b_{i} \times 2^{-i} = 1 + \frac{b_{1}}{2^{1}} + \frac{b_{2}}{2^{2}} + \dots + \frac{b_{51}}{2^{51}} + \frac{b_{52}}{2^{52}}$$ Where $b_{i}$ is: 1. $0$ if the bit at the position `i - 1` is `0`. 2. $1$ if the bit at the position `i - 1` is `1`. ##### Denormal Floating Point Numbers $mantissa$ is a positive fractional number whose integer part is $0$, for example $0.12345 \dots$. The `mantissa` bits represent only the fractional part and the $mantissa$ value can be calculated as: $$mantissa = \sum_{i=1}^{52} b_{i} \times 2^{-i} = \frac{b_{1}}{2^{1}} + \frac{b_{2}}{2^{2}} + \dots + \frac{b_{51}}{2^{51}} + \frac{b_{52}}{2^{52}}$$ Where $b_{i}$ is: 1. $0$ if the bit at the position `i - 1` is `0`. 2. $1$ if the bit at the position `i - 1` is `1`. #### Exponent ##### Normal Floating Point Numbers Only the following bit sequences are allowed: `00000000001` to `11111111110`. That is, there must be at least one `0` and one `1` in the exponent bits. The actual value of the $exponent$ can be calculated as: $$exponent = v - bias$$ where $v$ is the value of the binary number in the exponent bits and $bias$ is $1023$. Considering the restrictions above, the respective minimum and maximum values for the exponent are: 1. `00000000001` = $1$, $1 - 1023 = -1022$ 2. `11111111110` = $2046$, $2046 - 1023 = 1023$ So, $exponent$ is an integer in the range $\left[-1022, 1023\right]$. ##### Denormal Floating Point Numbers $exponent$ is always $-1022$. Nevertheless, it is always represented as `00000000000`. ### Normal and Denormal Floating Point Numbers The smallest absolute value a normal floating point number can have is calculated like this: $$1 \times 1.0\dots0 \times 2^{-1022} = 2.2250738585072014 \times 10^{-308}$$ Since normal floating point numbers always have a $1$ as the integer part of the $mantissa$, then smaller values can be achieved by using the smallest possible exponent ( $-1022$ ) and a $0$ in the integer part of the $mantissa$, but significant digits are lost. The smallest absolute value a denormal floating point number can have is calculated like this: $$1 \times 2^{-52} \times 2^{-1022} = 5 \times 10^{-324}$$ ## Zero Zero is represented like this: * Sign bit: `X` * Exponent bits: `00000000000` * Mantissa bits: `0000000000000000000000000000000000000000000000000000` where `X` means `0` or `1`. ## NaN There are 2 kinds of NaNs that are represented: 1. QNaNs (Quiet NaNs): represent the result of indeterminate operations. 2. SNaNs (Signalling NaNs): represent the result of invalid operations. ### QNaNs QNaNs are represented like this: * Sign bit: `X` * Exponent bits: `11111111111` * Mantissa bits: `1XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX` where `X` means `0` or `1`. ### SNaNs SNaNs are represented like this: * Sign bit: `X` * Exponent bits: `11111111111` * Mantissa bits: `0XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX1` where `X` means `0` or `1`. ## Infinite ### Positive Infinite Positive infinite is represented like this: * Sign bit: `0` * Exponent bits: `11111111111` * Mantissa bits: `0000000000000000000000000000000000000000000000000000` where `X` means `0` or `1`. ### Negative Infinite Negative infinite is represented like this: * Sign bit: `1` * Exponent bits: `11111111111` * Mantissa bits: `0000000000000000000000000000000000000000000000000000` where `X` means `0` or `1`. ieee_754.py000066400000000000000000000125521511654350100412250ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/exponential_histogram/mapping# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from ctypes import c_double, c_uint64 from sys import float_info # IEEE 754 64-bit floating point numbers use 11 bits for the exponent and 52 # bits for the mantissa. MANTISSA_WIDTH = 52 EXPONENT_WIDTH = 11 # This mask is equivalent to 52 "1" bits (there are 13 hexadecimal 4-bit "f"s # in the mantissa mask, 13 * 4 == 52) or 0xfffffffffffff in hexadecimal. MANTISSA_MASK = (1 << MANTISSA_WIDTH) - 1 # There are 11 bits for the exponent, but the exponent values 0 (11 "0" # bits) and 2047 (11 "1" bits) have special meanings so the exponent range is # from 1 to 2046. To calculate the exponent value, 1023 (the bias) is # subtracted from the exponent, so the exponent value range is from -1022 to # +1023. EXPONENT_BIAS = (2 ** (EXPONENT_WIDTH - 1)) - 1 # All the exponent mask bits are set to 1 for the 11 exponent bits. EXPONENT_MASK = ((1 << EXPONENT_WIDTH) - 1) << MANTISSA_WIDTH # The sign mask has the first bit set to 1 and the rest to 0. SIGN_MASK = 1 << (EXPONENT_WIDTH + MANTISSA_WIDTH) # For normal floating point numbers, the exponent can have a value in the # range [-1022, 1023]. MIN_NORMAL_EXPONENT = -EXPONENT_BIAS + 1 MAX_NORMAL_EXPONENT = EXPONENT_BIAS # The smallest possible normal value is 2.2250738585072014e-308. # This value is the result of using the smallest possible number in the # mantissa, 1.0000000000000000000000000000000000000000000000000000 (52 "0"s in # the fractional part) and a single "1" in the exponent. # Finally 1 * (2 ** -1022) = 2.2250738585072014e-308. MIN_NORMAL_VALUE = float_info.min # Greatest possible normal value (1.7976931348623157e+308) # The binary representation of a float in scientific notation uses (for the # mantissa) one bit for the integer part (which is implicit) and 52 bits for # the fractional part. Consider a float binary 1.111. It is equal to 1 + 1/2 + # 1/4 + 1/8. The greatest possible value in the 52-bit binary mantissa would be # then 1.1111111111111111111111111111111111111111111111111111 (52 "1"s in the # fractional part) whose decimal value is 1.9999999999999998. Finally, # 1.9999999999999998 * (2 ** 1023) = 1.7976931348623157e+308. MAX_NORMAL_VALUE = float_info.max def get_ieee_754_exponent(value: float) -> int: """ Gets the exponent of the IEEE 754 representation of a float. """ return ( ( # This step gives the integer that corresponds to the IEEE 754 # representation of a float. For example, consider # -MAX_NORMAL_VALUE for an example. We choose this value because # of its binary representation which makes easy to understand the # subsequent operations. # # c_uint64.from_buffer(c_double(-MAX_NORMAL_VALUE)).value == 18442240474082181119 # bin(18442240474082181119) == '0b1111111111101111111111111111111111111111111111111111111111111111' # # The first bit of the previous binary number is the sign bit: 1 (1 means negative, 0 means positive) # The next 11 bits are the exponent bits: 11111111110 # The next 52 bits are the mantissa bits: 1111111111111111111111111111111111111111111111111111 # # This step isolates the exponent bits, turning every bit outside # of the exponent field (sign and mantissa bits) to 0. c_uint64.from_buffer(c_double(value)).value & EXPONENT_MASK # For the example this means: # 18442240474082181119 & EXPONENT_MASK == 9214364837600034816 # bin(9214364837600034816) == '0b111111111100000000000000000000000000000000000000000000000000000' # Notice that the previous binary representation does not include # leading zeroes, so the sign bit is not included since it is a # zero. ) # This step moves the exponent bits to the right, removing the # mantissa bits that were set to 0 by the previous step. This # leaves the IEEE 754 exponent value, ready for the next step. >> MANTISSA_WIDTH # For the example this means: # 9214364837600034816 >> MANTISSA_WIDTH == 2046 # bin(2046) == '0b11111111110' # As shown above, these are the original 11 bits that correspond to the # exponent. # This step subtracts the exponent bias from the IEEE 754 value, # leaving the actual exponent value. ) - EXPONENT_BIAS # For the example this means: # 2046 - EXPONENT_BIAS == 1023 # As mentioned in a comment above, the largest value for the exponent is def get_ieee_754_mantissa(value: float) -> int: return ( c_uint64.from_buffer(c_double(value)).value # This step isolates the mantissa bits. There is no need to do any # bit shifting as the mantissa bits are already the rightmost field # in an IEEE 754 representation. & MANTISSA_MASK ) logarithm_mapping.py000066400000000000000000000133101511654350100434110ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/exponential_histogram/mapping# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from math import exp, floor, ldexp, log from threading import Lock from opentelemetry.sdk.metrics._internal.exponential_histogram.mapping import ( Mapping, ) from opentelemetry.sdk.metrics._internal.exponential_histogram.mapping.errors import ( MappingOverflowError, MappingUnderflowError, ) from opentelemetry.sdk.metrics._internal.exponential_histogram.mapping.ieee_754 import ( MAX_NORMAL_EXPONENT, MIN_NORMAL_EXPONENT, MIN_NORMAL_VALUE, get_ieee_754_exponent, get_ieee_754_mantissa, ) class LogarithmMapping(Mapping): # Reference implementation here: # https://github.com/open-telemetry/opentelemetry-go/blob/0e6f9c29c10d6078e8131418e1d1d166c7195d61/sdk/metric/aggregator/exponential/mapping/logarithm/logarithm.go _mappings = {} _mappings_lock = Lock() _min_scale = 1 _max_scale = 20 def _get_min_scale(self): # _min_scale ensures that ExponentMapping is used for zero and negative # scale values. return self._min_scale def _get_max_scale(self): # FIXME The Go implementation uses a value of 20 here, find out the # right value for this implementation, more information here: # https://github.com/lightstep/otel-launcher-go/blob/c9ca8483be067a39ab306b09060446e7fda65f35/lightstep/sdk/metric/aggregator/histogram/structure/README.md#mapping-function # https://github.com/open-telemetry/opentelemetry-go/blob/0e6f9c29c10d6078e8131418e1d1d166c7195d61/sdk/metric/aggregator/exponential/mapping/logarithm/logarithm.go#L32-L45 return self._max_scale def _init(self, scale: int): # pylint: disable=attribute-defined-outside-init super()._init(scale) # self._scale_factor is defined as a multiplier because multiplication # is faster than division. self._scale_factor is defined as: # index = log(value) * self._scale_factor # Where: # index = log(value) / log(base) # index = log(value) / log(2 ** (2 ** -scale)) # index = log(value) / ((2 ** -scale) * log(2)) # index = log(value) * ((1 / log(2)) * (2 ** scale)) # self._scale_factor = ((1 / log(2)) * (2 ** scale)) # self._scale_factor = (1 /log(2)) * (2 ** scale) # self._scale_factor = ldexp(1 / log(2), scale) # This implementation was copied from a Java prototype. See: # https://github.com/newrelic-experimental/newrelic-sketch-java/blob/1ce245713603d61ba3a4510f6df930a5479cd3f6/src/main/java/com/newrelic/nrsketch/indexer/LogIndexer.java # for the equations used here. self._scale_factor = ldexp(1 / log(2), scale) # self._min_normal_lower_boundary_index is the index such that # base ** index == MIN_NORMAL_VALUE. An exponential histogram bucket # with this index covers the range # (MIN_NORMAL_VALUE, MIN_NORMAL_VALUE * base]. One less than this index # corresponds with the bucket containing values <= MIN_NORMAL_VALUE. self._min_normal_lower_boundary_index = ( MIN_NORMAL_EXPONENT << self._scale ) # self._max_normal_lower_boundary_index is the index such that # base ** index equals the greatest representable lower boundary. An # exponential histogram bucket with this index covers the range # ((2 ** 1024) / base, 2 ** 1024], which includes opentelemetry.sdk. # metrics._internal.exponential_histogram.ieee_754.MAX_NORMAL_VALUE. # This bucket is incomplete, since the upper boundary cannot be # represented. One greater than this index corresponds with the bucket # containing values > 2 ** 1024. self._max_normal_lower_boundary_index = ( (MAX_NORMAL_EXPONENT + 1) << self._scale ) - 1 def map_to_index(self, value: float) -> int: """ Maps positive floating point values to indexes corresponding to scale. """ # value is subnormal if value <= MIN_NORMAL_VALUE: return self._min_normal_lower_boundary_index - 1 # value is an exact power of two. if get_ieee_754_mantissa(value) == 0: exponent = get_ieee_754_exponent(value) return (exponent << self._scale) - 1 return min( floor(log(value) * self._scale_factor), self._max_normal_lower_boundary_index, ) def get_lower_boundary(self, index: int) -> float: if index >= self._max_normal_lower_boundary_index: if index == self._max_normal_lower_boundary_index: return 2 * exp( (index - (1 << self._scale)) / self._scale_factor ) raise MappingOverflowError() if index <= self._min_normal_lower_boundary_index: if index == self._min_normal_lower_boundary_index: return MIN_NORMAL_VALUE if index == self._min_normal_lower_boundary_index - 1: return ( exp((index + (1 << self._scale)) / self._scale_factor) / 2 ) raise MappingUnderflowError() return exp(index / self._scale_factor) @property def scale(self) -> int: return self._scale python-opentelemetry-1.39.1/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/export/000077500000000000000000000000001511654350100327025ustar00rootroot00000000000000__init__.py000066400000000000000000000512731511654350100347440ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/export# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import annotations import math import os import weakref from abc import ABC, abstractmethod from enum import Enum from logging import getLogger from os import environ, linesep from sys import stdout from threading import Event, Lock, RLock, Thread from time import time_ns from typing import IO, Callable, Iterable, Optional from typing_extensions import final # This kind of import is needed to avoid Sphinx errors. import opentelemetry.sdk.metrics._internal from opentelemetry.context import ( _SUPPRESS_INSTRUMENTATION_KEY, attach, detach, set_value, ) from opentelemetry.sdk.environment_variables import ( OTEL_METRIC_EXPORT_INTERVAL, OTEL_METRIC_EXPORT_TIMEOUT, ) from opentelemetry.sdk.metrics._internal.aggregation import ( AggregationTemporality, DefaultAggregation, ) from opentelemetry.sdk.metrics._internal.exceptions import MetricsTimeoutError from opentelemetry.sdk.metrics._internal.instrument import ( Counter, Gauge, Histogram, ObservableCounter, ObservableGauge, ObservableUpDownCounter, UpDownCounter, _Counter, _Gauge, _Histogram, _ObservableCounter, _ObservableGauge, _ObservableUpDownCounter, _UpDownCounter, ) from opentelemetry.sdk.metrics._internal.point import MetricsData from opentelemetry.util._once import Once _logger = getLogger(__name__) class MetricExportResult(Enum): """Result of exporting a metric Can be any of the following values:""" SUCCESS = 0 FAILURE = 1 class MetricExporter(ABC): """Interface for exporting metrics. Interface to be implemented by services that want to export metrics received in their own format. Args: preferred_temporality: Used by `opentelemetry.sdk.metrics.export.PeriodicExportingMetricReader` to configure exporter level preferred temporality. See `opentelemetry.sdk.metrics.export.MetricReader` for more details on what preferred temporality is. preferred_aggregation: Used by `opentelemetry.sdk.metrics.export.PeriodicExportingMetricReader` to configure exporter level preferred aggregation. See `opentelemetry.sdk.metrics.export.MetricReader` for more details on what preferred aggregation is. """ def __init__( self, preferred_temporality: dict[type, AggregationTemporality] | None = None, preferred_aggregation: dict[ type, "opentelemetry.sdk.metrics.view.Aggregation" ] | None = None, ) -> None: self._preferred_temporality = preferred_temporality self._preferred_aggregation = preferred_aggregation @abstractmethod def export( self, metrics_data: MetricsData, timeout_millis: float = 10_000, **kwargs, ) -> MetricExportResult: """Exports a batch of telemetry data. Args: metrics: The list of `opentelemetry.sdk.metrics.export.Metric` objects to be exported Returns: The result of the export """ @abstractmethod def force_flush(self, timeout_millis: float = 10_000) -> bool: """ Ensure that export of any metrics currently received by the exporter are completed as soon as possible. """ @abstractmethod def shutdown(self, timeout_millis: float = 30_000, **kwargs) -> None: """Shuts down the exporter. Called when the SDK is shut down. """ class ConsoleMetricExporter(MetricExporter): """Implementation of :class:`MetricExporter` that prints metrics to the console. This class can be used for diagnostic purposes. It prints the exported metrics to the console STDOUT. """ def __init__( self, out: IO = stdout, formatter: Callable[ [MetricsData], str ] = lambda metrics_data: metrics_data.to_json() + linesep, preferred_temporality: dict[type, AggregationTemporality] | None = None, preferred_aggregation: dict[ type, "opentelemetry.sdk.metrics.view.Aggregation" ] | None = None, ): super().__init__( preferred_temporality=preferred_temporality, preferred_aggregation=preferred_aggregation, ) self.out = out self.formatter = formatter def export( self, metrics_data: MetricsData, timeout_millis: float = 10_000, **kwargs, ) -> MetricExportResult: self.out.write(self.formatter(metrics_data)) self.out.flush() return MetricExportResult.SUCCESS def shutdown(self, timeout_millis: float = 30_000, **kwargs) -> None: pass def force_flush(self, timeout_millis: float = 10_000) -> bool: return True class MetricReader(ABC): # pylint: disable=too-many-branches,broad-exception-raised """ Base class for all metric readers Args: preferred_temporality: A mapping between instrument classes and aggregation temporality. By default uses CUMULATIVE for all instrument classes. This mapping will be used to define the default aggregation temporality of every instrument class. If the user wants to make a change in the default aggregation temporality of an instrument class, it is enough to pass here a dictionary whose keys are the instrument classes and the values are the corresponding desired aggregation temporalities of the classes that the user wants to change, not all of them. The classes not included in the passed dictionary will retain their association to their default aggregation temporalities. preferred_aggregation: A mapping between instrument classes and aggregation instances. By default maps all instrument classes to an instance of `DefaultAggregation`. This mapping will be used to define the default aggregation of every instrument class. If the user wants to make a change in the default aggregation of an instrument class, it is enough to pass here a dictionary whose keys are the instrument classes and the values are the corresponding desired aggregation for the instrument classes that the user wants to change, not necessarily all of them. The classes not included in the passed dictionary will retain their association to their default aggregations. The aggregation defined here will be overridden by an aggregation defined by a view that is not `DefaultAggregation`. .. document protected _receive_metrics which is a intended to be overridden by subclass .. automethod:: _receive_metrics """ def __init__( self, preferred_temporality: dict[type, AggregationTemporality] | None = None, preferred_aggregation: dict[ type, "opentelemetry.sdk.metrics.view.Aggregation" ] | None = None, ) -> None: self._collect: Callable[ [ "opentelemetry.sdk.metrics.export.MetricReader", AggregationTemporality, ], Iterable["opentelemetry.sdk.metrics.export.Metric"], ] = None self._instrument_class_temporality = { _Counter: AggregationTemporality.CUMULATIVE, _UpDownCounter: AggregationTemporality.CUMULATIVE, _Histogram: AggregationTemporality.CUMULATIVE, _Gauge: AggregationTemporality.CUMULATIVE, _ObservableCounter: AggregationTemporality.CUMULATIVE, _ObservableUpDownCounter: AggregationTemporality.CUMULATIVE, _ObservableGauge: AggregationTemporality.CUMULATIVE, } if preferred_temporality is not None: for temporality in preferred_temporality.values(): if temporality not in ( AggregationTemporality.CUMULATIVE, AggregationTemporality.DELTA, ): raise Exception( f"Invalid temporality value found {temporality}" ) if preferred_temporality is not None: for typ, temporality in preferred_temporality.items(): if typ is Counter: self._instrument_class_temporality[_Counter] = temporality elif typ is UpDownCounter: self._instrument_class_temporality[_UpDownCounter] = ( temporality ) elif typ is Histogram: self._instrument_class_temporality[_Histogram] = ( temporality ) elif typ is Gauge: self._instrument_class_temporality[_Gauge] = temporality elif typ is ObservableCounter: self._instrument_class_temporality[_ObservableCounter] = ( temporality ) elif typ is ObservableUpDownCounter: self._instrument_class_temporality[ _ObservableUpDownCounter ] = temporality elif typ is ObservableGauge: self._instrument_class_temporality[_ObservableGauge] = ( temporality ) else: raise Exception(f"Invalid instrument class found {typ}") self._preferred_temporality = preferred_temporality self._instrument_class_aggregation = { _Counter: DefaultAggregation(), _UpDownCounter: DefaultAggregation(), _Histogram: DefaultAggregation(), _Gauge: DefaultAggregation(), _ObservableCounter: DefaultAggregation(), _ObservableUpDownCounter: DefaultAggregation(), _ObservableGauge: DefaultAggregation(), } if preferred_aggregation is not None: for typ, aggregation in preferred_aggregation.items(): if typ is Counter: self._instrument_class_aggregation[_Counter] = aggregation elif typ is UpDownCounter: self._instrument_class_aggregation[_UpDownCounter] = ( aggregation ) elif typ is Histogram: self._instrument_class_aggregation[_Histogram] = ( aggregation ) elif typ is Gauge: self._instrument_class_aggregation[_Gauge] = aggregation elif typ is ObservableCounter: self._instrument_class_aggregation[_ObservableCounter] = ( aggregation ) elif typ is ObservableUpDownCounter: self._instrument_class_aggregation[ _ObservableUpDownCounter ] = aggregation elif typ is ObservableGauge: self._instrument_class_aggregation[_ObservableGauge] = ( aggregation ) else: raise Exception(f"Invalid instrument class found {typ}") @final def collect(self, timeout_millis: float = 10_000) -> None: """Collects the metrics from the internal SDK state and invokes the `_receive_metrics` with the collection. Args: timeout_millis: Amount of time in milliseconds before this function raises a timeout error. If any of the underlying ``collect`` methods called by this method fails by any reason (including timeout) an exception will be raised detailing the individual errors that caused this function to fail. """ if self._collect is None: _logger.warning( "Cannot call collect on a MetricReader until it is registered on a MeterProvider" ) return metrics = self._collect(self, timeout_millis=timeout_millis) if metrics is not None: self._receive_metrics( metrics, timeout_millis=timeout_millis, ) @final def _set_collect_callback( self, func: Callable[ [ "opentelemetry.sdk.metrics.export.MetricReader", AggregationTemporality, ], Iterable["opentelemetry.sdk.metrics.export.Metric"], ], ) -> None: """This function is internal to the SDK. It should not be called or overridden by users""" self._collect = func @abstractmethod def _receive_metrics( self, metrics_data: MetricsData, timeout_millis: float = 10_000, **kwargs, ) -> None: """Called by `MetricReader.collect` when it receives a batch of metrics""" def force_flush(self, timeout_millis: float = 10_000) -> bool: self.collect(timeout_millis=timeout_millis) return True @abstractmethod def shutdown(self, timeout_millis: float = 30_000, **kwargs) -> None: """Shuts down the MetricReader. This method provides a way for the MetricReader to do any cleanup required. A metric reader can only be shutdown once, any subsequent calls are ignored and return failure status. When a `MetricReader` is registered on a :class:`~opentelemetry.sdk.metrics.MeterProvider`, :meth:`~opentelemetry.sdk.metrics.MeterProvider.shutdown` will invoke this automatically. """ class InMemoryMetricReader(MetricReader): """Implementation of `MetricReader` that returns its metrics from :func:`get_metrics_data`. This is useful for e.g. unit tests. """ def __init__( self, preferred_temporality: dict[type, AggregationTemporality] | None = None, preferred_aggregation: dict[ type, "opentelemetry.sdk.metrics.view.Aggregation" ] | None = None, ) -> None: super().__init__( preferred_temporality=preferred_temporality, preferred_aggregation=preferred_aggregation, ) self._lock = RLock() self._metrics_data: MetricsData = None def get_metrics_data( self, ) -> Optional[MetricsData]: """Reads and returns current metrics from the SDK""" with self._lock: self.collect() metrics_data = self._metrics_data self._metrics_data = None return metrics_data def _receive_metrics( self, metrics_data: MetricsData, timeout_millis: float = 10_000, **kwargs, ) -> None: with self._lock: self._metrics_data = metrics_data def shutdown(self, timeout_millis: float = 30_000, **kwargs) -> None: pass class PeriodicExportingMetricReader(MetricReader): """`PeriodicExportingMetricReader` is an implementation of `MetricReader` that collects metrics based on a user-configurable time interval, and passes the metrics to the configured exporter. If the time interval is set to `math.inf`, the reader will not invoke periodic collection. The configured exporter's :py:meth:`~MetricExporter.export` method will not be called concurrently. """ def __init__( self, exporter: MetricExporter, export_interval_millis: Optional[float] = None, export_timeout_millis: Optional[float] = None, ) -> None: # PeriodicExportingMetricReader defers to exporter for configuration super().__init__( preferred_temporality=exporter._preferred_temporality, preferred_aggregation=exporter._preferred_aggregation, ) # This lock is held whenever calling self._exporter.export() to prevent concurrent # execution of MetricExporter.export() # https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/metrics/sdk.md#exportbatch self._export_lock = Lock() self._exporter = exporter if export_interval_millis is None: try: export_interval_millis = float( environ.get(OTEL_METRIC_EXPORT_INTERVAL, 60000) ) except ValueError: _logger.warning( "Found invalid value for export interval, using default" ) export_interval_millis = 60000 if export_timeout_millis is None: try: export_timeout_millis = float( environ.get(OTEL_METRIC_EXPORT_TIMEOUT, 30000) ) except ValueError: _logger.warning( "Found invalid value for export timeout, using default" ) export_timeout_millis = 30000 self._export_interval_millis = export_interval_millis self._export_timeout_millis = export_timeout_millis self._shutdown = False self._shutdown_event = Event() self._shutdown_once = Once() self._daemon_thread = None if ( self._export_interval_millis > 0 and self._export_interval_millis < math.inf ): self._daemon_thread = Thread( name="OtelPeriodicExportingMetricReader", target=self._ticker, daemon=True, ) self._daemon_thread.start() if hasattr(os, "register_at_fork"): weak_at_fork = weakref.WeakMethod(self._at_fork_reinit) os.register_at_fork( after_in_child=lambda: weak_at_fork()() # pylint: disable=unnecessary-lambda ) elif self._export_interval_millis <= 0: raise ValueError( f"interval value {self._export_interval_millis} is invalid \ and needs to be larger than zero." ) def _at_fork_reinit(self): self._daemon_thread = Thread( name="OtelPeriodicExportingMetricReader", target=self._ticker, daemon=True, ) self._daemon_thread.start() def _ticker(self) -> None: interval_secs = self._export_interval_millis / 1e3 while not self._shutdown_event.wait(interval_secs): try: self.collect(timeout_millis=self._export_timeout_millis) except MetricsTimeoutError: _logger.warning( "Metric collection timed out. Will try again after %s seconds", interval_secs, exc_info=True, ) # one last collection below before shutting down completely try: self.collect(timeout_millis=self._export_interval_millis) except MetricsTimeoutError: _logger.warning( "Metric collection timed out.", exc_info=True, ) def _receive_metrics( self, metrics_data: MetricsData, timeout_millis: float = 10_000, **kwargs, ) -> None: token = attach(set_value(_SUPPRESS_INSTRUMENTATION_KEY, True)) # pylint: disable=broad-exception-caught,invalid-name try: with self._export_lock: self._exporter.export( metrics_data, timeout_millis=timeout_millis ) except Exception: _logger.exception("Exception while exporting metrics") detach(token) def shutdown(self, timeout_millis: float = 30_000, **kwargs) -> None: deadline_ns = time_ns() + timeout_millis * 10**6 def _shutdown(): self._shutdown = True did_set = self._shutdown_once.do_once(_shutdown) if not did_set: _logger.warning("Can't shutdown multiple times") return self._shutdown_event.set() if self._daemon_thread: self._daemon_thread.join(timeout=(deadline_ns - time_ns()) / 10**9) self._exporter.shutdown(timeout=(deadline_ns - time_ns()) / 10**6) def force_flush(self, timeout_millis: float = 10_000) -> bool: super().force_flush(timeout_millis=timeout_millis) self._exporter.force_flush(timeout_millis=timeout_millis) return True python-opentelemetry-1.39.1/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/instrument.py000066400000000000000000000244131511654350100341470ustar00rootroot00000000000000# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # pylint: disable=too-many-ancestors, unused-import from __future__ import annotations from logging import getLogger from time import time_ns from typing import Generator, Iterable, List, Sequence, Union # This kind of import is needed to avoid Sphinx errors. import opentelemetry.sdk.metrics from opentelemetry.context import Context, get_current from opentelemetry.metrics import CallbackT from opentelemetry.metrics import Counter as APICounter from opentelemetry.metrics import Histogram as APIHistogram from opentelemetry.metrics import ObservableCounter as APIObservableCounter from opentelemetry.metrics import ObservableGauge as APIObservableGauge from opentelemetry.metrics import ( ObservableUpDownCounter as APIObservableUpDownCounter, ) from opentelemetry.metrics import UpDownCounter as APIUpDownCounter from opentelemetry.metrics import _Gauge as APIGauge from opentelemetry.metrics._internal.instrument import ( CallbackOptions, _MetricsHistogramAdvisory, ) from opentelemetry.sdk.metrics._internal.measurement import Measurement from opentelemetry.sdk.util.instrumentation import InstrumentationScope _logger = getLogger(__name__) _ERROR_MESSAGE = ( "Expected ASCII string of maximum length 63 characters but got {}" ) class _Synchronous: def __init__( self, name: str, instrumentation_scope: InstrumentationScope, measurement_consumer: "opentelemetry.sdk.metrics.MeasurementConsumer", unit: str = "", description: str = "", ): # pylint: disable=no-member result = self._check_name_unit_description(name, unit, description) if result["name"] is None: # pylint: disable=broad-exception-raised raise Exception(_ERROR_MESSAGE.format(name)) if result["unit"] is None: # pylint: disable=broad-exception-raised raise Exception(_ERROR_MESSAGE.format(unit)) name = result["name"] unit = result["unit"] description = result["description"] self.name = name.lower() self.unit = unit self.description = description self.instrumentation_scope = instrumentation_scope self._measurement_consumer = measurement_consumer super().__init__(name, unit=unit, description=description) class _Asynchronous: def __init__( self, name: str, instrumentation_scope: InstrumentationScope, measurement_consumer: "opentelemetry.sdk.metrics.MeasurementConsumer", callbacks: Iterable[CallbackT] | None = None, unit: str = "", description: str = "", ): # pylint: disable=no-member result = self._check_name_unit_description(name, unit, description) if result["name"] is None: # pylint: disable=broad-exception-raised raise Exception(_ERROR_MESSAGE.format(name)) if result["unit"] is None: # pylint: disable=broad-exception-raised raise Exception(_ERROR_MESSAGE.format(unit)) name = result["name"] unit = result["unit"] description = result["description"] self.name = name.lower() self.unit = unit self.description = description self.instrumentation_scope = instrumentation_scope self._measurement_consumer = measurement_consumer super().__init__(name, callbacks, unit=unit, description=description) self._callbacks: List[CallbackT] = [] if callbacks is not None: for callback in callbacks: if isinstance(callback, Generator): # advance generator to it's first yield next(callback) def inner( options: CallbackOptions, callback=callback, ) -> Iterable[Measurement]: try: return callback.send(options) except StopIteration: return [] self._callbacks.append(inner) else: self._callbacks.append(callback) def callback( self, callback_options: CallbackOptions ) -> Iterable[Measurement]: for callback in self._callbacks: try: for api_measurement in callback(callback_options): yield Measurement( api_measurement.value, time_unix_nano=time_ns(), instrument=self, context=api_measurement.context or get_current(), attributes=api_measurement.attributes, ) except Exception: # pylint: disable=broad-exception-caught _logger.exception( "Callback failed for instrument %s.", self.name ) class Counter(_Synchronous, APICounter): def __new__(cls, *args, **kwargs): if cls is Counter: raise TypeError("Counter must be instantiated via a meter.") return super().__new__(cls) def add( self, amount: Union[int, float], attributes: dict[str, str] | None = None, context: Context | None = None, ): if amount < 0: _logger.warning( "Add amount must be non-negative on Counter %s.", self.name ) return time_unix_nano = time_ns() self._measurement_consumer.consume_measurement( Measurement( amount, time_unix_nano, self, context or get_current(), attributes, ) ) class UpDownCounter(_Synchronous, APIUpDownCounter): def __new__(cls, *args, **kwargs): if cls is UpDownCounter: raise TypeError("UpDownCounter must be instantiated via a meter.") return super().__new__(cls) def add( self, amount: Union[int, float], attributes: dict[str, str] | None = None, context: Context | None = None, ): time_unix_nano = time_ns() self._measurement_consumer.consume_measurement( Measurement( amount, time_unix_nano, self, context or get_current(), attributes, ) ) class ObservableCounter(_Asynchronous, APIObservableCounter): def __new__(cls, *args, **kwargs): if cls is ObservableCounter: raise TypeError( "ObservableCounter must be instantiated via a meter." ) return super().__new__(cls) class ObservableUpDownCounter(_Asynchronous, APIObservableUpDownCounter): def __new__(cls, *args, **kwargs): if cls is ObservableUpDownCounter: raise TypeError( "ObservableUpDownCounter must be instantiated via a meter." ) return super().__new__(cls) class Histogram(_Synchronous, APIHistogram): def __init__( self, name: str, instrumentation_scope: InstrumentationScope, measurement_consumer: "opentelemetry.sdk.metrics.MeasurementConsumer", unit: str = "", description: str = "", explicit_bucket_boundaries_advisory: Sequence[float] | None = None, ): super().__init__( name, unit=unit, description=description, instrumentation_scope=instrumentation_scope, measurement_consumer=measurement_consumer, ) self._advisory = _MetricsHistogramAdvisory( explicit_bucket_boundaries=explicit_bucket_boundaries_advisory ) def __new__(cls, *args, **kwargs): if cls is Histogram: raise TypeError("Histogram must be instantiated via a meter.") return super().__new__(cls) def record( self, amount: Union[int, float], attributes: dict[str, str] | None = None, context: Context | None = None, ): if amount < 0: _logger.warning( "Record amount must be non-negative on Histogram %s.", self.name, ) return time_unix_nano = time_ns() self._measurement_consumer.consume_measurement( Measurement( amount, time_unix_nano, self, context or get_current(), attributes, ) ) class Gauge(_Synchronous, APIGauge): def __new__(cls, *args, **kwargs): if cls is Gauge: raise TypeError("Gauge must be instantiated via a meter.") return super().__new__(cls) def set( self, amount: Union[int, float], attributes: dict[str, str] | None = None, context: Context | None = None, ): time_unix_nano = time_ns() self._measurement_consumer.consume_measurement( Measurement( amount, time_unix_nano, self, context or get_current(), attributes, ) ) class ObservableGauge(_Asynchronous, APIObservableGauge): def __new__(cls, *args, **kwargs): if cls is ObservableGauge: raise TypeError( "ObservableGauge must be instantiated via a meter." ) return super().__new__(cls) # Below classes exist to prevent the direct instantiation class _Counter(Counter): pass class _UpDownCounter(UpDownCounter): pass class _ObservableCounter(ObservableCounter): pass class _ObservableUpDownCounter(ObservableUpDownCounter): pass class _Histogram(Histogram): pass class _Gauge(Gauge): pass class _ObservableGauge(ObservableGauge): pass python-opentelemetry-1.39.1/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/measurement.py000066400000000000000000000031771511654350100342700ustar00rootroot00000000000000# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from dataclasses import dataclass from typing import Union from opentelemetry.context import Context from opentelemetry.metrics import Instrument from opentelemetry.util.types import Attributes @dataclass(frozen=True) class Measurement: """ Represents a data point reported via the metrics API to the SDK. Attributes value: Measured value time_unix_nano: The time the API call was made to record the Measurement instrument: The instrument that produced this `Measurement`. context: The active Context of the Measurement at API call time. attributes: Measurement attributes """ # TODO Fix doc - if using valid Google `Attributes:` key, the attributes are duplicated # one will come from napoleon extension and the other from autodoc extension. This # will raise an sphinx error of duplicated object description # See https://github.com/sphinx-doc/sphinx/issues/8664 value: Union[int, float] time_unix_nano: int instrument: Instrument context: Context attributes: Attributes = None measurement_consumer.py000066400000000000000000000120541511654350100361160ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # pylint: disable=unused-import from abc import ABC, abstractmethod from threading import Lock from time import time_ns from typing import Iterable, List, Mapping, Optional # This kind of import is needed to avoid Sphinx errors. import opentelemetry.sdk.metrics import opentelemetry.sdk.metrics._internal.instrument import opentelemetry.sdk.metrics._internal.sdk_configuration from opentelemetry.metrics._internal.instrument import CallbackOptions from opentelemetry.sdk.metrics._internal.exceptions import MetricsTimeoutError from opentelemetry.sdk.metrics._internal.measurement import Measurement from opentelemetry.sdk.metrics._internal.metric_reader_storage import ( MetricReaderStorage, ) from opentelemetry.sdk.metrics._internal.point import Metric class MeasurementConsumer(ABC): @abstractmethod def consume_measurement(self, measurement: Measurement) -> None: pass @abstractmethod def register_asynchronous_instrument( self, instrument: ( "opentelemetry.sdk.metrics._internal.instrument_Asynchronous" ), ): pass @abstractmethod def collect( self, metric_reader: "opentelemetry.sdk.metrics.MetricReader", timeout_millis: float = 10_000, ) -> Optional[Iterable[Metric]]: pass class SynchronousMeasurementConsumer(MeasurementConsumer): def __init__( self, sdk_config: "opentelemetry.sdk.metrics._internal.SdkConfiguration", ) -> None: self._lock = Lock() self._sdk_config = sdk_config # should never be mutated self._reader_storages: Mapping[ "opentelemetry.sdk.metrics.MetricReader", MetricReaderStorage ] = { reader: MetricReaderStorage( sdk_config, reader._instrument_class_temporality, reader._instrument_class_aggregation, ) for reader in sdk_config.metric_readers } self._async_instruments: List[ "opentelemetry.sdk.metrics._internal.instrument._Asynchronous" ] = [] def consume_measurement(self, measurement: Measurement) -> None: should_sample_exemplar = ( self._sdk_config.exemplar_filter.should_sample( measurement.value, measurement.time_unix_nano, measurement.attributes, measurement.context, ) ) for reader_storage in self._reader_storages.values(): reader_storage.consume_measurement( measurement, should_sample_exemplar ) def register_asynchronous_instrument( self, instrument: ( "opentelemetry.sdk.metrics._internal.instrument._Asynchronous" ), ) -> None: with self._lock: self._async_instruments.append(instrument) def collect( self, metric_reader: "opentelemetry.sdk.metrics.MetricReader", timeout_millis: float = 10_000, ) -> Optional[Iterable[Metric]]: with self._lock: metric_reader_storage = self._reader_storages[metric_reader] # for now, just use the defaults callback_options = CallbackOptions() deadline_ns = time_ns() + (timeout_millis * 1e6) default_timeout_ns = 10000 * 1e6 for async_instrument in self._async_instruments: remaining_time = deadline_ns - time_ns() if remaining_time < default_timeout_ns: callback_options = CallbackOptions( timeout_millis=remaining_time / 1e6 ) measurements = async_instrument.callback(callback_options) if time_ns() >= deadline_ns: raise MetricsTimeoutError( "Timed out while executing callback" ) for measurement in measurements: should_sample_exemplar = ( self._sdk_config.exemplar_filter.should_sample( measurement.value, measurement.time_unix_nano, measurement.attributes, measurement.context, ) ) metric_reader_storage.consume_measurement( measurement, should_sample_exemplar ) result = self._reader_storages[metric_reader].collect() return result metric_reader_storage.py000066400000000000000000000274221511654350100362140ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from logging import getLogger from threading import RLock from time import time_ns from typing import Dict, List, Optional from opentelemetry.metrics import ( Asynchronous, Counter, Instrument, ObservableCounter, ) from opentelemetry.sdk.metrics._internal._view_instrument_match import ( _ViewInstrumentMatch, ) from opentelemetry.sdk.metrics._internal.aggregation import ( Aggregation, ExplicitBucketHistogramAggregation, _DropAggregation, _ExplicitBucketHistogramAggregation, _ExponentialBucketHistogramAggregation, _LastValueAggregation, _SumAggregation, ) from opentelemetry.sdk.metrics._internal.export import AggregationTemporality from opentelemetry.sdk.metrics._internal.measurement import Measurement from opentelemetry.sdk.metrics._internal.point import ( ExponentialHistogram, Gauge, Histogram, Metric, MetricsData, ResourceMetrics, ScopeMetrics, Sum, ) from opentelemetry.sdk.metrics._internal.sdk_configuration import ( SdkConfiguration, ) from opentelemetry.sdk.metrics._internal.view import View from opentelemetry.sdk.util.instrumentation import InstrumentationScope _logger = getLogger(__name__) _DEFAULT_VIEW = View(instrument_name="") class MetricReaderStorage: """The SDK's storage for a given reader""" def __init__( self, sdk_config: SdkConfiguration, instrument_class_temporality: Dict[type, AggregationTemporality], instrument_class_aggregation: Dict[type, Aggregation], ) -> None: self._lock = RLock() self._sdk_config = sdk_config self._instrument_view_instrument_matches: Dict[ Instrument, List[_ViewInstrumentMatch] ] = {} self._instrument_class_temporality = instrument_class_temporality self._instrument_class_aggregation = instrument_class_aggregation def _get_or_init_view_instrument_match( self, instrument: Instrument ) -> List[_ViewInstrumentMatch]: # Optimistically get the relevant views for the given instrument. Once set for a given # instrument, the mapping will never change if instrument in self._instrument_view_instrument_matches: return self._instrument_view_instrument_matches[instrument] with self._lock: # double check if it was set before we held the lock if instrument in self._instrument_view_instrument_matches: return self._instrument_view_instrument_matches[instrument] # not present, hold the lock and add a new mapping view_instrument_matches = [] self._handle_view_instrument_match( instrument, view_instrument_matches ) # if no view targeted the instrument, use the default if not view_instrument_matches: view_instrument_matches.append( _ViewInstrumentMatch( view=_DEFAULT_VIEW, instrument=instrument, instrument_class_aggregation=( self._instrument_class_aggregation ), ) ) self._instrument_view_instrument_matches[instrument] = ( view_instrument_matches ) return view_instrument_matches def consume_measurement( self, measurement: Measurement, should_sample_exemplar: bool = True ) -> None: for view_instrument_match in self._get_or_init_view_instrument_match( measurement.instrument ): view_instrument_match.consume_measurement( measurement, should_sample_exemplar ) def collect(self) -> Optional[MetricsData]: # Use a list instead of yielding to prevent a slow reader from holding # SDK locks # While holding the lock, new _ViewInstrumentMatch can't be added from # another thread (so we are sure we collect all existing view). # However, instruments can still send measurements that will make it # into the individual aggregations; collection will acquire those locks # iteratively to keep locking as fine-grained as possible. One side # effect is that end times can be slightly skewed among the metric # streams produced by the SDK, but we still align the output timestamps # for a single instrument. collection_start_nanos = time_ns() with self._lock: instrumentation_scope_scope_metrics: Dict[ InstrumentationScope, ScopeMetrics ] = {} for ( instrument, view_instrument_matches, ) in self._instrument_view_instrument_matches.items(): aggregation_temporality = self._instrument_class_temporality[ instrument.__class__ ] metrics: List[Metric] = [] for view_instrument_match in view_instrument_matches: data_points = view_instrument_match.collect( aggregation_temporality, collection_start_nanos ) if data_points is None: continue if isinstance( # pylint: disable=protected-access view_instrument_match._aggregation, _SumAggregation, ): data = Sum( aggregation_temporality=aggregation_temporality, data_points=data_points, is_monotonic=isinstance( instrument, (Counter, ObservableCounter) ), ) elif isinstance( # pylint: disable=protected-access view_instrument_match._aggregation, _LastValueAggregation, ): data = Gauge(data_points=data_points) elif isinstance( # pylint: disable=protected-access view_instrument_match._aggregation, _ExplicitBucketHistogramAggregation, ): data = Histogram( data_points=data_points, aggregation_temporality=aggregation_temporality, ) elif isinstance( # pylint: disable=protected-access view_instrument_match._aggregation, _DropAggregation, ): continue elif isinstance( # pylint: disable=protected-access view_instrument_match._aggregation, _ExponentialBucketHistogramAggregation, ): data = ExponentialHistogram( data_points=data_points, aggregation_temporality=aggregation_temporality, ) metrics.append( Metric( # pylint: disable=protected-access # pylint: disable=possibly-used-before-assignment name=view_instrument_match._name, description=view_instrument_match._description, unit=view_instrument_match._instrument.unit, data=data, ) ) if metrics: if instrument.instrumentation_scope not in ( instrumentation_scope_scope_metrics ): instrumentation_scope_scope_metrics[ instrument.instrumentation_scope ] = ScopeMetrics( scope=instrument.instrumentation_scope, metrics=metrics, schema_url=instrument.instrumentation_scope.schema_url, ) else: instrumentation_scope_scope_metrics[ instrument.instrumentation_scope ].metrics.extend(metrics) if instrumentation_scope_scope_metrics: return MetricsData( resource_metrics=[ ResourceMetrics( resource=self._sdk_config.resource, scope_metrics=list( instrumentation_scope_scope_metrics.values() ), schema_url=self._sdk_config.resource.schema_url, ) ] ) return None def _handle_view_instrument_match( self, instrument: Instrument, view_instrument_matches: List["_ViewInstrumentMatch"], ) -> None: for view in self._sdk_config.views: # pylint: disable=protected-access if not view._match(instrument): continue if not self._check_view_instrument_compatibility(view, instrument): continue new_view_instrument_match = _ViewInstrumentMatch( view=view, instrument=instrument, instrument_class_aggregation=( self._instrument_class_aggregation ), ) for ( existing_view_instrument_matches ) in self._instrument_view_instrument_matches.values(): for ( existing_view_instrument_match ) in existing_view_instrument_matches: if existing_view_instrument_match.conflicts( new_view_instrument_match ): _logger.warning( "Views %s and %s will cause conflicting " "metrics identities", existing_view_instrument_match._view, new_view_instrument_match._view, ) view_instrument_matches.append(new_view_instrument_match) @staticmethod def _check_view_instrument_compatibility( view: View, instrument: Instrument ) -> bool: """ Checks if a view and an instrument are compatible. Returns `true` if they are compatible and a `_ViewInstrumentMatch` object should be created, `false` otherwise. """ result = True # pylint: disable=protected-access if isinstance(instrument, Asynchronous) and isinstance( view._aggregation, ExplicitBucketHistogramAggregation ): _logger.warning( "View %s and instrument %s will produce " "semantic errors when matched, the view " "has not been applied.", view, instrument, ) result = False return result python-opentelemetry-1.39.1/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/point.py000066400000000000000000000176441511654350100331000ustar00rootroot00000000000000# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # pylint: disable=unused-import from dataclasses import asdict, dataclass, field from json import dumps, loads from typing import Optional, Sequence, Union # This kind of import is needed to avoid Sphinx errors. import opentelemetry.sdk.metrics._internal from opentelemetry.sdk.metrics._internal.exemplar import Exemplar from opentelemetry.sdk.resources import Resource from opentelemetry.sdk.util.instrumentation import InstrumentationScope from opentelemetry.util.types import Attributes @dataclass(frozen=True) class NumberDataPoint: """Single data point in a timeseries that describes the time-varying scalar value of a metric. """ attributes: Attributes start_time_unix_nano: int time_unix_nano: int value: Union[int, float] exemplars: Sequence[Exemplar] = field(default_factory=list) def to_json(self, indent: Optional[int] = 4) -> str: return dumps(asdict(self), indent=indent) @dataclass(frozen=True) class HistogramDataPoint: """Single data point in a timeseries that describes the time-varying scalar value of a metric. """ attributes: Attributes start_time_unix_nano: int time_unix_nano: int count: int sum: Union[int, float] bucket_counts: Sequence[int] explicit_bounds: Sequence[float] min: float max: float exemplars: Sequence[Exemplar] = field(default_factory=list) def to_json(self, indent: Optional[int] = 4) -> str: return dumps(asdict(self), indent=indent) @dataclass(frozen=True) class Buckets: offset: int bucket_counts: Sequence[int] @dataclass(frozen=True) class ExponentialHistogramDataPoint: """Single data point in a timeseries whose boundaries are defined by an exponential function. This timeseries describes the time-varying scalar value of a metric. """ attributes: Attributes start_time_unix_nano: int time_unix_nano: int count: int sum: Union[int, float] scale: int zero_count: int positive: Buckets negative: Buckets flags: int min: float max: float exemplars: Sequence[Exemplar] = field(default_factory=list) def to_json(self, indent: Optional[int] = 4) -> str: return dumps(asdict(self), indent=indent) @dataclass(frozen=True) class ExponentialHistogram: """Represents the type of a metric that is calculated by aggregating as an ExponentialHistogram of all reported measurements over a time interval. """ data_points: Sequence[ExponentialHistogramDataPoint] aggregation_temporality: ( "opentelemetry.sdk.metrics.export.AggregationTemporality" ) def to_json(self, indent: Optional[int] = 4) -> str: return dumps( { "data_points": [ loads(data_point.to_json(indent=indent)) for data_point in self.data_points ], "aggregation_temporality": self.aggregation_temporality, }, indent=indent, ) @dataclass(frozen=True) class Sum: """Represents the type of a scalar metric that is calculated as a sum of all reported measurements over a time interval.""" data_points: Sequence[NumberDataPoint] aggregation_temporality: ( "opentelemetry.sdk.metrics.export.AggregationTemporality" ) is_monotonic: bool def to_json(self, indent: Optional[int] = 4) -> str: return dumps( { "data_points": [ loads(data_point.to_json(indent=indent)) for data_point in self.data_points ], "aggregation_temporality": self.aggregation_temporality, "is_monotonic": self.is_monotonic, }, indent=indent, ) @dataclass(frozen=True) class Gauge: """Represents the type of a scalar metric that always exports the current value for every data point. It should be used for an unknown aggregation.""" data_points: Sequence[NumberDataPoint] def to_json(self, indent: Optional[int] = 4) -> str: return dumps( { "data_points": [ loads(data_point.to_json(indent=indent)) for data_point in self.data_points ], }, indent=indent, ) @dataclass(frozen=True) class Histogram: """Represents the type of a metric that is calculated by aggregating as a histogram of all reported measurements over a time interval.""" data_points: Sequence[HistogramDataPoint] aggregation_temporality: ( "opentelemetry.sdk.metrics.export.AggregationTemporality" ) def to_json(self, indent: Optional[int] = 4) -> str: return dumps( { "data_points": [ loads(data_point.to_json(indent=indent)) for data_point in self.data_points ], "aggregation_temporality": self.aggregation_temporality, }, indent=indent, ) # pylint: disable=invalid-name DataT = Union[Sum, Gauge, Histogram, ExponentialHistogram] DataPointT = Union[ NumberDataPoint, HistogramDataPoint, ExponentialHistogramDataPoint ] @dataclass(frozen=True) class Metric: """Represents a metric point in the OpenTelemetry data model to be exported.""" name: str description: Optional[str] unit: Optional[str] data: DataT def to_json(self, indent: Optional[int] = 4) -> str: return dumps( { "name": self.name, "description": self.description or "", "unit": self.unit or "", "data": loads(self.data.to_json(indent=indent)), }, indent=indent, ) @dataclass(frozen=True) class ScopeMetrics: """A collection of Metrics produced by a scope""" scope: InstrumentationScope metrics: Sequence[Metric] schema_url: str def to_json(self, indent: Optional[int] = 4) -> str: return dumps( { "scope": loads(self.scope.to_json(indent=indent)), "metrics": [ loads(metric.to_json(indent=indent)) for metric in self.metrics ], "schema_url": self.schema_url, }, indent=indent, ) @dataclass(frozen=True) class ResourceMetrics: """A collection of ScopeMetrics from a Resource""" resource: Resource scope_metrics: Sequence[ScopeMetrics] schema_url: str def to_json(self, indent: Optional[int] = 4) -> str: return dumps( { "resource": loads(self.resource.to_json(indent=indent)), "scope_metrics": [ loads(scope_metrics.to_json(indent=indent)) for scope_metrics in self.scope_metrics ], "schema_url": self.schema_url, }, indent=indent, ) @dataclass(frozen=True) class MetricsData: """An array of ResourceMetrics""" resource_metrics: Sequence[ResourceMetrics] def to_json(self, indent: Optional[int] = 4) -> str: return dumps( { "resource_metrics": [ loads(resource_metrics.to_json(indent=indent)) for resource_metrics in self.resource_metrics ] }, indent=indent, ) sdk_configuration.py000066400000000000000000000020741511654350100353670ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # pylint: disable=unused-import from dataclasses import dataclass from typing import Sequence # This kind of import is needed to avoid Sphinx errors. import opentelemetry.sdk.metrics import opentelemetry.sdk.resources @dataclass class SdkConfiguration: exemplar_filter: "opentelemetry.sdk.metrics.ExemplarFilter" resource: "opentelemetry.sdk.resources.Resource" metric_readers: Sequence["opentelemetry.sdk.metrics.MetricReader"] views: Sequence["opentelemetry.sdk.metrics.View"] python-opentelemetry-1.39.1/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/view.py000066400000000000000000000165461511654350100327210ustar00rootroot00000000000000# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from fnmatch import fnmatch from logging import getLogger from typing import Callable, Optional, Set, Type from opentelemetry.metrics import Instrument from opentelemetry.sdk.metrics._internal.aggregation import ( Aggregation, DefaultAggregation, _Aggregation, _ExplicitBucketHistogramAggregation, _ExponentialBucketHistogramAggregation, ) from opentelemetry.sdk.metrics._internal.exemplar import ( AlignedHistogramBucketExemplarReservoir, ExemplarReservoirBuilder, SimpleFixedSizeExemplarReservoir, ) _logger = getLogger(__name__) def _default_reservoir_factory( aggregation_type: Type[_Aggregation], ) -> ExemplarReservoirBuilder: """Default reservoir factory per aggregation.""" if issubclass(aggregation_type, _ExplicitBucketHistogramAggregation): return AlignedHistogramBucketExemplarReservoir if issubclass(aggregation_type, _ExponentialBucketHistogramAggregation): return SimpleFixedSizeExemplarReservoir return SimpleFixedSizeExemplarReservoir class View: """ A `View` configuration parameters can be used for the following purposes: 1. Match instruments: When an instrument matches a view, measurements received by that instrument will be processed. 2. Customize metric streams: A metric stream is identified by a match between a view and an instrument and a set of attributes. The metric stream can be customized by certain attributes of the corresponding view. The attributes documented next serve one of the previous two purposes. Args: instrument_type: This is an instrument matching attribute: the class the instrument must be to match the view. instrument_name: This is an instrument matching attribute: the name the instrument must have to match the view. Wild card characters are supported. Wild card characters should not be used with this attribute if the view has also a ``name`` defined. meter_name: This is an instrument matching attribute: the name the instrument meter must have to match the view. meter_version: This is an instrument matching attribute: the version the instrument meter must have to match the view. meter_schema_url: This is an instrument matching attribute: the schema URL the instrument meter must have to match the view. name: This is a metric stream customizing attribute: the name of the metric stream. If `None`, the name of the instrument will be used. description: This is a metric stream customizing attribute: the description of the metric stream. If `None`, the description of the instrument will be used. attribute_keys: This is a metric stream customizing attribute: this is a set of attribute keys. If not `None` then only the measurement attributes that are in ``attribute_keys`` will be used to identify the metric stream. aggregation: This is a metric stream customizing attribute: the aggregation instance to use when data is aggregated for the corresponding metrics stream. If `None` an instance of `DefaultAggregation` will be used. exemplar_reservoir_factory: This is a metric stream customizing attribute: the exemplar reservoir factory instrument_unit: This is an instrument matching attribute: the unit the instrument must have to match the view. This class is not intended to be subclassed by the user. """ _default_aggregation = DefaultAggregation() def __init__( self, instrument_type: Optional[Type[Instrument]] = None, instrument_name: Optional[str] = None, meter_name: Optional[str] = None, meter_version: Optional[str] = None, meter_schema_url: Optional[str] = None, name: Optional[str] = None, description: Optional[str] = None, attribute_keys: Optional[Set[str]] = None, aggregation: Optional[Aggregation] = None, exemplar_reservoir_factory: Optional[ Callable[[Type[_Aggregation]], ExemplarReservoirBuilder] ] = None, instrument_unit: Optional[str] = None, ): if ( instrument_type is instrument_name is instrument_unit is meter_name is meter_version is meter_schema_url is None ): # pylint: disable=broad-exception-raised raise Exception( "Some instrument selection " f"criteria must be provided for View {name}" ) if ( name is not None and instrument_name is not None and ("*" in instrument_name or "?" in instrument_name) ): # pylint: disable=broad-exception-raised raise Exception( f"View {name} declared with wildcard " "characters in instrument_name" ) # _name, _description, _aggregation, _exemplar_reservoir_factory and # _attribute_keys will be accessed when instantiating a _ViewInstrumentMatch. self._name = name self._instrument_type = instrument_type self._instrument_name = instrument_name self._instrument_unit = instrument_unit self._meter_name = meter_name self._meter_version = meter_version self._meter_schema_url = meter_schema_url self._description = description self._attribute_keys = attribute_keys self._aggregation = aggregation or self._default_aggregation self._exemplar_reservoir_factory = ( exemplar_reservoir_factory or _default_reservoir_factory ) # pylint: disable=too-many-return-statements # pylint: disable=too-many-branches def _match(self, instrument: Instrument) -> bool: if self._instrument_type is not None: if not isinstance(instrument, self._instrument_type): return False if self._instrument_name is not None: if not fnmatch(instrument.name, self._instrument_name): return False if self._instrument_unit is not None: if not fnmatch(instrument.unit, self._instrument_unit): return False if self._meter_name is not None: if instrument.instrumentation_scope.name != self._meter_name: return False if self._meter_version is not None: if instrument.instrumentation_scope.version != self._meter_version: return False if self._meter_schema_url is not None: if ( instrument.instrumentation_scope.schema_url != self._meter_schema_url ): return False return True python-opentelemetry-1.39.1/opentelemetry-sdk/src/opentelemetry/sdk/metrics/export/000077500000000000000000000000001511654350100307275ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-sdk/src/opentelemetry/sdk/metrics/export/__init__.py000066400000000000000000000033531511654350100330440ustar00rootroot00000000000000# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from opentelemetry.sdk.metrics._internal.aggregation import ( AggregationTemporality, ) from opentelemetry.sdk.metrics._internal.export import ( ConsoleMetricExporter, InMemoryMetricReader, MetricExporter, MetricExportResult, MetricReader, PeriodicExportingMetricReader, ) # The point module is not in the export directory to avoid a circular import. from opentelemetry.sdk.metrics._internal.point import ( # noqa: F401 Buckets, DataPointT, DataT, ExponentialHistogram, ExponentialHistogramDataPoint, Gauge, Histogram, HistogramDataPoint, Metric, MetricsData, NumberDataPoint, ResourceMetrics, ScopeMetrics, Sum, ) __all__ = [ "AggregationTemporality", "Buckets", "ConsoleMetricExporter", "InMemoryMetricReader", "MetricExporter", "MetricExportResult", "MetricReader", "PeriodicExportingMetricReader", "DataPointT", "DataT", "ExponentialHistogram", "ExponentialHistogramDataPoint", "Gauge", "Histogram", "HistogramDataPoint", "Metric", "MetricsData", "NumberDataPoint", "ResourceMetrics", "ScopeMetrics", "Sum", ] python-opentelemetry-1.39.1/opentelemetry-sdk/src/opentelemetry/sdk/metrics/view/000077500000000000000000000000001511654350100303605ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-sdk/src/opentelemetry/sdk/metrics/view/__init__.py000066400000000000000000000021521511654350100324710ustar00rootroot00000000000000# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from opentelemetry.sdk.metrics._internal.aggregation import ( Aggregation, DefaultAggregation, DropAggregation, ExplicitBucketHistogramAggregation, ExponentialBucketHistogramAggregation, LastValueAggregation, SumAggregation, ) from opentelemetry.sdk.metrics._internal.view import View __all__ = [ "Aggregation", "DefaultAggregation", "DropAggregation", "ExplicitBucketHistogramAggregation", "ExponentialBucketHistogramAggregation", "LastValueAggregation", "SumAggregation", "View", ] python-opentelemetry-1.39.1/opentelemetry-sdk/src/opentelemetry/sdk/py.typed000066400000000000000000000000001511654350100274250ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-sdk/src/opentelemetry/sdk/resources/000077500000000000000000000000001511654350100277525ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-sdk/src/opentelemetry/sdk/resources/__init__.py000066400000000000000000000466701511654350100321000ustar00rootroot00000000000000# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ This package implements `OpenTelemetry Resources `_: *A Resource is an immutable representation of the entity producing telemetry. For example, a process producing telemetry that is running in a container on Kubernetes has a Pod name, it is in a namespace and possibly is part of a Deployment which also has a name. All three of these attributes can be included in the Resource.* Resource objects are created with `Resource.create`, which accepts attributes (key-values). Resources should NOT be created via constructor except by `ResourceDetector` instances which can't use `Resource.create` to avoid infinite loops. Working with `Resource` objects should only be done via the Resource API methods. Resource attributes can also be passed at process invocation in the :envvar:`OTEL_RESOURCE_ATTRIBUTES` environment variable. You should register your resource with the `opentelemetry.sdk.trace.TracerProvider` by passing them into their constructors. The `Resource` passed to a provider is available to the exporter, which can send on this information as it sees fit. .. code-block:: python trace.set_tracer_provider( TracerProvider( resource=Resource.create({ "service.name": "shoppingcart", "service.instance.id": "instance-12", }), ), ) print(trace.get_tracer_provider().resource.attributes) {'telemetry.sdk.language': 'python', 'telemetry.sdk.name': 'opentelemetry', 'telemetry.sdk.version': '0.13.dev0', 'service.name': 'shoppingcart', 'service.instance.id': 'instance-12'} Note that the OpenTelemetry project documents certain `"standard attributes" `_ that have prescribed semantic meanings, for example ``service.name`` in the above example. """ # ResourceAttributes is deprecated # pyright: reportDeprecated=false import abc import concurrent.futures import logging import os import platform import socket import sys import typing from json import dumps from os import environ from types import ModuleType from typing import List, Optional, cast from urllib import parse from opentelemetry.attributes import BoundedAttributes from opentelemetry.sdk.environment_variables import ( OTEL_EXPERIMENTAL_RESOURCE_DETECTORS, OTEL_RESOURCE_ATTRIBUTES, OTEL_SERVICE_NAME, ) from opentelemetry.semconv.resource import ResourceAttributes from opentelemetry.util._importlib_metadata import ( entry_points, # type: ignore[reportUnknownVariableType] version, ) from opentelemetry.util.types import AttributeValue psutil: Optional[ModuleType] = None try: import psutil as psutil_module psutil = psutil_module except ImportError: pass LabelValue = AttributeValue Attributes = typing.Mapping[str, LabelValue] logger = logging.getLogger(__name__) CLOUD_PROVIDER = ResourceAttributes.CLOUD_PROVIDER CLOUD_ACCOUNT_ID = ResourceAttributes.CLOUD_ACCOUNT_ID CLOUD_REGION = ResourceAttributes.CLOUD_REGION CLOUD_AVAILABILITY_ZONE = ResourceAttributes.CLOUD_AVAILABILITY_ZONE CONTAINER_NAME = ResourceAttributes.CONTAINER_NAME CONTAINER_ID = ResourceAttributes.CONTAINER_ID CONTAINER_IMAGE_NAME = ResourceAttributes.CONTAINER_IMAGE_NAME CONTAINER_IMAGE_TAG = ResourceAttributes.CONTAINER_IMAGE_TAG DEPLOYMENT_ENVIRONMENT = ResourceAttributes.DEPLOYMENT_ENVIRONMENT FAAS_NAME = ResourceAttributes.FAAS_NAME FAAS_ID = ResourceAttributes.FAAS_ID FAAS_VERSION = ResourceAttributes.FAAS_VERSION FAAS_INSTANCE = ResourceAttributes.FAAS_INSTANCE HOST_NAME = ResourceAttributes.HOST_NAME HOST_ARCH = ResourceAttributes.HOST_ARCH HOST_TYPE = ResourceAttributes.HOST_TYPE HOST_IMAGE_NAME = ResourceAttributes.HOST_IMAGE_NAME HOST_IMAGE_ID = ResourceAttributes.HOST_IMAGE_ID HOST_IMAGE_VERSION = ResourceAttributes.HOST_IMAGE_VERSION KUBERNETES_CLUSTER_NAME = ResourceAttributes.K8S_CLUSTER_NAME KUBERNETES_NAMESPACE_NAME = ResourceAttributes.K8S_NAMESPACE_NAME KUBERNETES_POD_UID = ResourceAttributes.K8S_POD_UID KUBERNETES_POD_NAME = ResourceAttributes.K8S_POD_NAME KUBERNETES_CONTAINER_NAME = ResourceAttributes.K8S_CONTAINER_NAME KUBERNETES_REPLICA_SET_UID = ResourceAttributes.K8S_REPLICASET_UID KUBERNETES_REPLICA_SET_NAME = ResourceAttributes.K8S_REPLICASET_NAME KUBERNETES_DEPLOYMENT_UID = ResourceAttributes.K8S_DEPLOYMENT_UID KUBERNETES_DEPLOYMENT_NAME = ResourceAttributes.K8S_DEPLOYMENT_NAME KUBERNETES_STATEFUL_SET_UID = ResourceAttributes.K8S_STATEFULSET_UID KUBERNETES_STATEFUL_SET_NAME = ResourceAttributes.K8S_STATEFULSET_NAME KUBERNETES_DAEMON_SET_UID = ResourceAttributes.K8S_DAEMONSET_UID KUBERNETES_DAEMON_SET_NAME = ResourceAttributes.K8S_DAEMONSET_NAME KUBERNETES_JOB_UID = ResourceAttributes.K8S_JOB_UID KUBERNETES_JOB_NAME = ResourceAttributes.K8S_JOB_NAME KUBERNETES_CRON_JOB_UID = ResourceAttributes.K8S_CRONJOB_UID KUBERNETES_CRON_JOB_NAME = ResourceAttributes.K8S_CRONJOB_NAME OS_DESCRIPTION = ResourceAttributes.OS_DESCRIPTION OS_TYPE = ResourceAttributes.OS_TYPE OS_VERSION = ResourceAttributes.OS_VERSION PROCESS_PID = ResourceAttributes.PROCESS_PID PROCESS_PARENT_PID = ResourceAttributes.PROCESS_PARENT_PID PROCESS_EXECUTABLE_NAME = ResourceAttributes.PROCESS_EXECUTABLE_NAME PROCESS_EXECUTABLE_PATH = ResourceAttributes.PROCESS_EXECUTABLE_PATH PROCESS_COMMAND = ResourceAttributes.PROCESS_COMMAND PROCESS_COMMAND_LINE = ResourceAttributes.PROCESS_COMMAND_LINE PROCESS_COMMAND_ARGS = ResourceAttributes.PROCESS_COMMAND_ARGS PROCESS_OWNER = ResourceAttributes.PROCESS_OWNER PROCESS_RUNTIME_NAME = ResourceAttributes.PROCESS_RUNTIME_NAME PROCESS_RUNTIME_VERSION = ResourceAttributes.PROCESS_RUNTIME_VERSION PROCESS_RUNTIME_DESCRIPTION = ResourceAttributes.PROCESS_RUNTIME_DESCRIPTION SERVICE_NAME = ResourceAttributes.SERVICE_NAME SERVICE_NAMESPACE = ResourceAttributes.SERVICE_NAMESPACE SERVICE_INSTANCE_ID = ResourceAttributes.SERVICE_INSTANCE_ID SERVICE_VERSION = ResourceAttributes.SERVICE_VERSION TELEMETRY_SDK_NAME = ResourceAttributes.TELEMETRY_SDK_NAME TELEMETRY_SDK_VERSION = ResourceAttributes.TELEMETRY_SDK_VERSION TELEMETRY_AUTO_VERSION = ResourceAttributes.TELEMETRY_AUTO_VERSION TELEMETRY_SDK_LANGUAGE = ResourceAttributes.TELEMETRY_SDK_LANGUAGE _OPENTELEMETRY_SDK_VERSION: str = version("opentelemetry-sdk") class Resource: """A Resource is an immutable representation of the entity producing telemetry as Attributes.""" _attributes: BoundedAttributes _schema_url: str def __init__( self, attributes: Attributes, schema_url: typing.Optional[str] = None ): self._attributes = BoundedAttributes(attributes=attributes) if schema_url is None: schema_url = "" self._schema_url = schema_url @staticmethod def create( attributes: typing.Optional[Attributes] = None, schema_url: typing.Optional[str] = None, ) -> "Resource": """Creates a new `Resource` from attributes. `ResourceDetector` instances should not call this method. Args: attributes: Optional zero or more key-value pairs. schema_url: Optional URL pointing to the schema Returns: The newly-created Resource. """ if not attributes: attributes = {} otel_experimental_resource_detectors = {"otel"}.union( { otel_experimental_resource_detector.strip() for otel_experimental_resource_detector in environ.get( OTEL_EXPERIMENTAL_RESOURCE_DETECTORS, "" ).split(",") if otel_experimental_resource_detector } ) resource_detectors: List[ResourceDetector] = [] resource_detector: str for resource_detector in otel_experimental_resource_detectors: try: resource_detectors.append( next( iter( entry_points( group="opentelemetry_resource_detector", name=resource_detector.strip(), ) # type: ignore[reportUnknownArgumentType] ) ).load()() ) except Exception: # pylint: disable=broad-exception-caught logger.exception( "Failed to load resource detector '%s', skipping", resource_detector, ) continue resource = get_aggregated_resources( resource_detectors, _DEFAULT_RESOURCE ).merge(Resource(attributes, schema_url)) if not resource.attributes.get(SERVICE_NAME, None): default_service_name = "unknown_service" process_executable_name = cast( Optional[str], resource.attributes.get(PROCESS_EXECUTABLE_NAME, None), ) if process_executable_name: default_service_name += ":" + process_executable_name resource = resource.merge( Resource({SERVICE_NAME: default_service_name}, schema_url) ) return resource @staticmethod def get_empty() -> "Resource": return _EMPTY_RESOURCE @property def attributes(self) -> Attributes: return self._attributes @property def schema_url(self) -> str: return self._schema_url def merge(self, other: "Resource") -> "Resource": """Merges this resource and an updating resource into a new `Resource`. If a key exists on both the old and updating resource, the value of the updating resource will override the old resource value. The updating resource's `schema_url` will be used only if the old `schema_url` is empty. Attempting to merge two resources with different, non-empty values for `schema_url` will result in an error and return the old resource. Args: other: The other resource to be merged. Returns: The newly-created Resource. """ merged_attributes = dict(self.attributes).copy() merged_attributes.update(other.attributes) if self.schema_url == "": schema_url = other.schema_url elif other.schema_url == "": schema_url = self.schema_url elif self.schema_url == other.schema_url: schema_url = other.schema_url else: logger.error( "Failed to merge resources: The two schemas %s and %s are incompatible", self.schema_url, other.schema_url, ) return self return Resource(merged_attributes, schema_url) def __eq__(self, other: object) -> bool: if not isinstance(other, Resource): return False return ( self._attributes == other._attributes and self._schema_url == other._schema_url ) def __hash__(self) -> int: return hash( f"{dumps(self._attributes.copy(), sort_keys=True)}|{self._schema_url}" ) def to_json(self, indent: Optional[int] = 4) -> str: return dumps( { "attributes": dict(self.attributes), "schema_url": self._schema_url, }, indent=indent, ) _EMPTY_RESOURCE = Resource({}) _DEFAULT_RESOURCE = Resource( { TELEMETRY_SDK_LANGUAGE: "python", TELEMETRY_SDK_NAME: "opentelemetry", TELEMETRY_SDK_VERSION: _OPENTELEMETRY_SDK_VERSION, } ) class ResourceDetector(abc.ABC): def __init__(self, raise_on_error: bool = False) -> None: self.raise_on_error = raise_on_error @abc.abstractmethod def detect(self) -> "Resource": """Don't call `Resource.create` here to avoid an infinite loop, instead instantiate `Resource` directly""" raise NotImplementedError() class OTELResourceDetector(ResourceDetector): # pylint: disable=no-self-use def detect(self) -> "Resource": env_resources_items = environ.get(OTEL_RESOURCE_ATTRIBUTES) env_resource_map: dict[str, AttributeValue] = {} if env_resources_items: for item in env_resources_items.split(","): try: key, value = item.split("=", maxsplit=1) except ValueError as exc: logger.warning( "Invalid key value resource attribute pair %s: %s", item, exc, ) continue value_url_decoded = parse.unquote(value.strip()) env_resource_map[key.strip()] = value_url_decoded service_name = environ.get(OTEL_SERVICE_NAME) if service_name: env_resource_map[SERVICE_NAME] = service_name return Resource(env_resource_map) class ProcessResourceDetector(ResourceDetector): # pylint: disable=no-self-use def detect(self) -> "Resource": _runtime_version = ".".join( map( str, ( sys.version_info[:3] if sys.version_info.releaselevel == "final" and not sys.version_info.serial else sys.version_info ), ) ) _process_pid = os.getpid() _process_executable_name = sys.executable _process_executable_path = os.path.dirname(_process_executable_name) _process_command = sys.argv[0] _process_command_line = " ".join(sys.argv) _process_command_args = sys.argv resource_info = { PROCESS_RUNTIME_DESCRIPTION: sys.version, PROCESS_RUNTIME_NAME: sys.implementation.name, PROCESS_RUNTIME_VERSION: _runtime_version, PROCESS_PID: _process_pid, PROCESS_EXECUTABLE_NAME: _process_executable_name, PROCESS_EXECUTABLE_PATH: _process_executable_path, PROCESS_COMMAND: _process_command, PROCESS_COMMAND_LINE: _process_command_line, PROCESS_COMMAND_ARGS: _process_command_args, } if hasattr(os, "getppid"): # pypy3 does not have getppid() resource_info[PROCESS_PARENT_PID] = os.getppid() if psutil is not None: process = psutil.Process() username = process.username() resource_info[PROCESS_OWNER] = username return Resource(resource_info) # type: ignore class OsResourceDetector(ResourceDetector): """Detect os resources based on `Operating System conventions `_.""" def detect(self) -> "Resource": """Returns a resource with with ``os.type`` and ``os.version``. Python's platform library ~~~~~~~~~~~~~~~~~~~~~~~~~ To grab this information, Python's ``platform`` does not return what a user might expect it to. Below is a breakdown of its return values in different operating systems. .. code-block:: python :caption: Linux >>> platform.system() 'Linux' >>> platform.release() '6.5.0-35-generic' >>> platform.version() '#35~22.04.1-Ubuntu SMP PREEMPT_DYNAMIC Tue May 7 09:00:52 UTC 2' .. code-block:: python :caption: MacOS >>> platform.system() 'Darwin' >>> platform.release() '23.0.0' >>> platform.version() 'Darwin Kernel Version 23.0.0: Fri Sep 15 14:42:57 PDT 2023; root:xnu-10002.1.13~1/RELEASE_ARM64_T8112' .. code-block:: python :caption: Windows >>> platform.system() 'Windows' >>> platform.release() '2022Server' >>> platform.version() '10.0.20348' .. code-block:: python :caption: FreeBSD >>> platform.system() 'FreeBSD' >>> platform.release() '14.1-RELEASE' >>> platform.version() 'FreeBSD 14.1-RELEASE releng/14.1-n267679-10e31f0946d8 GENERIC' .. code-block:: python :caption: Solaris >>> platform.system() 'SunOS' >>> platform.release() '5.11' >>> platform.version() '11.4.0.15.0' """ os_type = platform.system().lower() os_version = platform.release() # See docstring if os_type == "windows": os_version = platform.version() # Align SunOS with conventions elif os_type == "sunos": os_type = "solaris" os_version = platform.version() return Resource( { OS_TYPE: os_type, OS_VERSION: os_version, } ) class _HostResourceDetector(ResourceDetector): # type: ignore[reportUnusedClass] """ The HostResourceDetector detects the hostname and architecture attributes. """ def detect(self) -> "Resource": return Resource( { HOST_NAME: socket.gethostname(), HOST_ARCH: platform.machine(), } ) def get_aggregated_resources( detectors: typing.List["ResourceDetector"], initial_resource: typing.Optional[Resource] = None, timeout: int = 5, ) -> "Resource": """Retrieves resources from detectors in the order that they were passed :param detectors: List of resources in order of priority :param initial_resource: Static resource. This has highest priority :param timeout: Number of seconds to wait for each detector to return :return: """ detectors_merged_resource = initial_resource or Resource.create() with concurrent.futures.ThreadPoolExecutor(max_workers=4) as executor: futures = [executor.submit(detector.detect) for detector in detectors] for detector_ind, future in enumerate(futures): detector = detectors[detector_ind] detected_resource: Resource = _EMPTY_RESOURCE try: detected_resource = future.result(timeout=timeout) except concurrent.futures.TimeoutError as ex: if detector.raise_on_error: raise ex logger.warning( "Detector %s took longer than %s seconds, skipping", detector, timeout, ) # pylint: disable=broad-exception-caught except Exception as ex: if detector.raise_on_error: raise ex logger.warning( "Exception %s in detector %s, ignoring", ex, detector ) finally: detectors_merged_resource = detectors_merged_resource.merge( detected_resource ) return detectors_merged_resource python-opentelemetry-1.39.1/opentelemetry-sdk/src/opentelemetry/sdk/trace/000077500000000000000000000000001511654350100270365ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-sdk/src/opentelemetry/sdk/trace/__init__.py000066400000000000000000001302041511654350100311470ustar00rootroot00000000000000# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # pylint: disable=too-many-lines import abc import atexit import concurrent.futures import json import logging import threading import traceback import typing from os import environ from time import time_ns from types import MappingProxyType, TracebackType from typing import ( Any, Callable, Dict, Iterator, List, Mapping, MutableMapping, Optional, Sequence, Tuple, Type, Union, ) from warnings import filterwarnings from typing_extensions import deprecated from opentelemetry import context as context_api from opentelemetry import trace as trace_api from opentelemetry.attributes import BoundedAttributes from opentelemetry.sdk import util from opentelemetry.sdk.environment_variables import ( OTEL_ATTRIBUTE_COUNT_LIMIT, OTEL_ATTRIBUTE_VALUE_LENGTH_LIMIT, OTEL_EVENT_ATTRIBUTE_COUNT_LIMIT, OTEL_LINK_ATTRIBUTE_COUNT_LIMIT, OTEL_SDK_DISABLED, OTEL_SPAN_ATTRIBUTE_COUNT_LIMIT, OTEL_SPAN_ATTRIBUTE_VALUE_LENGTH_LIMIT, OTEL_SPAN_EVENT_COUNT_LIMIT, OTEL_SPAN_LINK_COUNT_LIMIT, ) from opentelemetry.sdk.resources import Resource from opentelemetry.sdk.trace import sampling from opentelemetry.sdk.trace.id_generator import IdGenerator, RandomIdGenerator from opentelemetry.sdk.util import BoundedList from opentelemetry.sdk.util.instrumentation import ( InstrumentationInfo, InstrumentationScope, ) from opentelemetry.semconv.attributes.exception_attributes import ( EXCEPTION_ESCAPED, EXCEPTION_MESSAGE, EXCEPTION_STACKTRACE, EXCEPTION_TYPE, ) from opentelemetry.trace import NoOpTracer, SpanContext from opentelemetry.trace.status import Status, StatusCode from opentelemetry.util import types from opentelemetry.util._decorator import _agnosticcontextmanager logger = logging.getLogger(__name__) _DEFAULT_OTEL_ATTRIBUTE_COUNT_LIMIT = 128 _DEFAULT_OTEL_SPAN_ATTRIBUTE_COUNT_LIMIT = 128 _DEFAULT_OTEL_EVENT_ATTRIBUTE_COUNT_LIMIT = 128 _DEFAULT_OTEL_LINK_ATTRIBUTE_COUNT_LIMIT = 128 _DEFAULT_OTEL_SPAN_EVENT_COUNT_LIMIT = 128 _DEFAULT_OTEL_SPAN_LINK_COUNT_LIMIT = 128 _ENV_VALUE_UNSET = "" class SpanProcessor: """Interface which allows hooks for SDK's `Span` start and end method invocations. Span processors can be registered directly using :func:`TracerProvider.add_span_processor` and they are invoked in the same order as they were registered. """ def on_start( self, span: "Span", parent_context: Optional[context_api.Context] = None, ) -> None: """Called when a :class:`opentelemetry.trace.Span` is started. This method is called synchronously on the thread that starts the span, therefore it should not block or throw an exception. Args: span: The :class:`opentelemetry.trace.Span` that just started. parent_context: The parent context of the span that just started. """ def on_end(self, span: "ReadableSpan") -> None: """Called when a :class:`opentelemetry.trace.Span` is ended. This method is called synchronously on the thread that ends the span, therefore it should not block or throw an exception. Args: span: The :class:`opentelemetry.trace.Span` that just ended. """ def shutdown(self) -> None: """Called when a :class:`opentelemetry.sdk.trace.TracerProvider` is shutdown.""" def force_flush(self, timeout_millis: int = 30000) -> bool: """Export all ended spans to the configured Exporter that have not yet been exported. Args: timeout_millis: The maximum amount of time to wait for spans to be exported. Returns: False if the timeout is exceeded, True otherwise. """ # Temporary fix until https://github.com/PyCQA/pylint/issues/4098 is resolved # pylint:disable=no-member class SynchronousMultiSpanProcessor(SpanProcessor): """Implementation of class:`SpanProcessor` that forwards all received events to a list of span processors sequentially. The underlying span processors are called in sequential order as they were added. """ _span_processors: Tuple[SpanProcessor, ...] def __init__(self): # use a tuple to avoid race conditions when adding a new span and # iterating through it on "on_start" and "on_end". self._span_processors = () self._lock = threading.Lock() def add_span_processor(self, span_processor: SpanProcessor) -> None: """Adds a SpanProcessor to the list handled by this instance.""" with self._lock: self._span_processors += (span_processor,) def on_start( self, span: "Span", parent_context: Optional[context_api.Context] = None, ) -> None: for sp in self._span_processors: sp.on_start(span, parent_context=parent_context) def on_end(self, span: "ReadableSpan") -> None: for sp in self._span_processors: sp.on_end(span) def shutdown(self) -> None: """Sequentially shuts down all underlying span processors.""" for sp in self._span_processors: sp.shutdown() def force_flush(self, timeout_millis: int = 30000) -> bool: """Sequentially calls force_flush on all underlying :class:`SpanProcessor` Args: timeout_millis: The maximum amount of time over all span processors to wait for spans to be exported. In case the first n span processors exceeded the timeout followup span processors will be skipped. Returns: True if all span processors flushed their spans within the given timeout, False otherwise. """ deadline_ns = time_ns() + timeout_millis * 1000000 for sp in self._span_processors: current_time_ns = time_ns() if current_time_ns >= deadline_ns: return False if not sp.force_flush((deadline_ns - current_time_ns) // 1000000): return False return True class ConcurrentMultiSpanProcessor(SpanProcessor): """Implementation of :class:`SpanProcessor` that forwards all received events to a list of span processors in parallel. Calls to the underlying span processors are forwarded in parallel by submitting them to a thread pool executor and waiting until each span processor finished its work. Args: num_threads: The number of threads managed by the thread pool executor and thus defining how many span processors can work in parallel. """ def __init__(self, num_threads: int = 2): # use a tuple to avoid race conditions when adding a new span and # iterating through it on "on_start" and "on_end". self._span_processors = () # type: Tuple[SpanProcessor, ...] self._lock = threading.Lock() self._executor = concurrent.futures.ThreadPoolExecutor( max_workers=num_threads ) def add_span_processor(self, span_processor: SpanProcessor) -> None: """Adds a SpanProcessor to the list handled by this instance.""" with self._lock: self._span_processors += (span_processor,) def _submit_and_await( self, func: Callable[[SpanProcessor], Callable[..., None]], *args: Any, **kwargs: Any, ): futures = [] for sp in self._span_processors: future = self._executor.submit(func(sp), *args, **kwargs) futures.append(future) for future in futures: future.result() def on_start( self, span: "Span", parent_context: Optional[context_api.Context] = None, ) -> None: self._submit_and_await( lambda sp: sp.on_start, span, parent_context=parent_context ) def on_end(self, span: "ReadableSpan") -> None: self._submit_and_await(lambda sp: sp.on_end, span) def shutdown(self) -> None: """Shuts down all underlying span processors in parallel.""" self._submit_and_await(lambda sp: sp.shutdown) def force_flush(self, timeout_millis: int = 30000) -> bool: """Calls force_flush on all underlying span processors in parallel. Args: timeout_millis: The maximum amount of time to wait for spans to be exported. Returns: True if all span processors flushed their spans within the given timeout, False otherwise. """ futures = [] for sp in self._span_processors: # type: SpanProcessor future = self._executor.submit(sp.force_flush, timeout_millis) futures.append(future) timeout_sec = timeout_millis / 1e3 done_futures, not_done_futures = concurrent.futures.wait( futures, timeout_sec ) if not_done_futures: return False for future in done_futures: if not future.result(): return False return True class EventBase(abc.ABC): def __init__(self, name: str, timestamp: Optional[int] = None) -> None: self._name = name if timestamp is None: self._timestamp = time_ns() else: self._timestamp = timestamp @property def name(self) -> str: return self._name @property def timestamp(self) -> int: return self._timestamp @property @abc.abstractmethod def attributes(self) -> types.Attributes: pass class Event(EventBase): """A text annotation with a set of attributes. The attributes of an event are immutable. Args: name: Name of the event. attributes: Attributes of the event. timestamp: Timestamp of the event. If `None` it will filled automatically. """ def __init__( self, name: str, attributes: types.Attributes = None, timestamp: Optional[int] = None, limit: Optional[int] = _DEFAULT_OTEL_SPAN_ATTRIBUTE_COUNT_LIMIT, ) -> None: super().__init__(name, timestamp) self._attributes = attributes @property def attributes(self) -> types.Attributes: return self._attributes @property def dropped_attributes(self) -> int: if isinstance(self._attributes, BoundedAttributes): return self._attributes.dropped return 0 def _check_span_ended(func): def wrapper(self, *args, **kwargs): already_ended = False with self._lock: # pylint: disable=protected-access if self._end_time is None: # pylint: disable=protected-access func(self, *args, **kwargs) else: already_ended = True if already_ended: logger.warning("Tried calling %s on an ended span.", func.__name__) return wrapper def _is_valid_link(context: SpanContext, attributes: types.Attributes) -> bool: return bool( context and (context.is_valid or (attributes or context.trace_state)) ) class ReadableSpan: """Provides read-only access to span attributes. Users should NOT be creating these objects directly. `ReadableSpan`s are created as a direct result from using the tracing pipeline via the `Tracer`. """ def __init__( self, name: str, context: Optional[trace_api.SpanContext] = None, parent: Optional[trace_api.SpanContext] = None, resource: Optional[Resource] = None, attributes: types.Attributes = None, events: Sequence[Event] = (), links: Sequence[trace_api.Link] = (), kind: trace_api.SpanKind = trace_api.SpanKind.INTERNAL, instrumentation_info: Optional[InstrumentationInfo] = None, status: Status = Status(StatusCode.UNSET), start_time: Optional[int] = None, end_time: Optional[int] = None, instrumentation_scope: Optional[InstrumentationScope] = None, ) -> None: self._name = name self._context = context self._kind = kind self._instrumentation_info = instrumentation_info self._instrumentation_scope = instrumentation_scope self._parent = parent self._start_time = start_time self._end_time = end_time self._attributes = attributes self._events = events self._links = links if resource is None: self._resource = Resource.create({}) else: self._resource = resource self._status = status @property def dropped_attributes(self) -> int: if isinstance(self._attributes, BoundedAttributes): return self._attributes.dropped return 0 @property def dropped_events(self) -> int: if isinstance(self._events, BoundedList): return self._events.dropped return 0 @property def dropped_links(self) -> int: if isinstance(self._links, BoundedList): return self._links.dropped return 0 @property def name(self) -> str: return self._name def get_span_context(self): return self._context @property def context(self): return self._context @property def kind(self) -> trace_api.SpanKind: return self._kind @property def parent(self) -> Optional[trace_api.SpanContext]: return self._parent @property def start_time(self) -> Optional[int]: return self._start_time @property def end_time(self) -> Optional[int]: return self._end_time @property def status(self) -> trace_api.Status: return self._status @property def attributes(self) -> types.Attributes: return MappingProxyType(self._attributes or {}) @property def events(self) -> Sequence[Event]: return tuple(event for event in self._events) @property def links(self) -> Sequence[trace_api.Link]: return tuple(link for link in self._links) @property def resource(self) -> Resource: return self._resource @property @deprecated( "You should use instrumentation_scope. Deprecated since version 1.11.1." ) def instrumentation_info(self) -> Optional[InstrumentationInfo]: return self._instrumentation_info @property def instrumentation_scope(self) -> Optional[InstrumentationScope]: return self._instrumentation_scope def to_json(self, indent: Optional[int] = 4): parent_id = None if self.parent is not None: parent_id = f"0x{trace_api.format_span_id(self.parent.span_id)}" start_time = None if self._start_time: start_time = util.ns_to_iso_str(self._start_time) end_time = None if self._end_time: end_time = util.ns_to_iso_str(self._end_time) status = { "status_code": str(self._status.status_code.name), } if self._status.description: status["description"] = self._status.description f_span = { "name": self._name, "context": ( self._format_context(self._context) if self._context else None ), "kind": str(self.kind), "parent_id": parent_id, "start_time": start_time, "end_time": end_time, "status": status, "attributes": self._format_attributes(self._attributes), "events": self._format_events(self._events), "links": self._format_links(self._links), "resource": json.loads(self.resource.to_json()), } return json.dumps(f_span, indent=indent) @staticmethod def _format_context(context: SpanContext) -> Dict[str, str]: return { "trace_id": f"0x{trace_api.format_trace_id(context.trace_id)}", "span_id": f"0x{trace_api.format_span_id(context.span_id)}", "trace_state": repr(context.trace_state), } @staticmethod def _format_attributes( attributes: types.Attributes, ) -> Optional[Dict[str, Any]]: if attributes is not None and not isinstance(attributes, dict): return dict(attributes) return attributes @staticmethod def _format_events(events: Sequence[Event]) -> List[Dict[str, Any]]: return [ { "name": event.name, "timestamp": util.ns_to_iso_str(event.timestamp), "attributes": Span._format_attributes( # pylint: disable=protected-access event.attributes ), } for event in events ] @staticmethod def _format_links(links: Sequence[trace_api.Link]) -> List[Dict[str, Any]]: return [ { "context": Span._format_context( # pylint: disable=protected-access link.context ), "attributes": Span._format_attributes( # pylint: disable=protected-access link.attributes ), } for link in links ] class SpanLimits: """The limits that should be enforce on recorded data such as events, links, attributes etc. This class does not enforce any limits itself. It only provides an a way read limits from env, default values and from user provided arguments. All limit arguments must be either a non-negative integer, ``None`` or ``SpanLimits.UNSET``. - All limit arguments are optional. - If a limit argument is not set, the class will try to read its value from the corresponding environment variable. - If the environment variable is not set, the default value, if any, will be used. Limit precedence: - If a model specific limit is set, it will be used. - Else if the corresponding global limit is set, it will be used. - Else if the model specific limit has a default value, the default value will be used. - Else if the global limit has a default value, the default value will be used. Args: max_attributes: Maximum number of attributes that can be added to a span, event, and link. Environment variable: OTEL_ATTRIBUTE_COUNT_LIMIT Default: {_DEFAULT_ATTRIBUTE_COUNT_LIMIT} max_events: Maximum number of events that can be added to a Span. Environment variable: OTEL_SPAN_EVENT_COUNT_LIMIT Default: {_DEFAULT_SPAN_EVENT_COUNT_LIMIT} max_links: Maximum number of links that can be added to a Span. Environment variable: OTEL_SPAN_LINK_COUNT_LIMIT Default: {_DEFAULT_SPAN_LINK_COUNT_LIMIT} max_span_attributes: Maximum number of attributes that can be added to a Span. Environment variable: OTEL_SPAN_ATTRIBUTE_COUNT_LIMIT Default: {_DEFAULT_OTEL_SPAN_ATTRIBUTE_COUNT_LIMIT} max_event_attributes: Maximum number of attributes that can be added to an Event. Default: {_DEFAULT_OTEL_EVENT_ATTRIBUTE_COUNT_LIMIT} max_link_attributes: Maximum number of attributes that can be added to a Link. Default: {_DEFAULT_OTEL_LINK_ATTRIBUTE_COUNT_LIMIT} max_attribute_length: Maximum length an attribute value can have. Values longer than the specified length will be truncated. max_span_attribute_length: Maximum length a span attribute value can have. Values longer than the specified length will be truncated. """ UNSET = -1 def __init__( self, max_attributes: Optional[int] = None, max_events: Optional[int] = None, max_links: Optional[int] = None, max_span_attributes: Optional[int] = None, max_event_attributes: Optional[int] = None, max_link_attributes: Optional[int] = None, max_attribute_length: Optional[int] = None, max_span_attribute_length: Optional[int] = None, ): # span events and links count self.max_events = self._from_env_if_absent( max_events, OTEL_SPAN_EVENT_COUNT_LIMIT, _DEFAULT_OTEL_SPAN_EVENT_COUNT_LIMIT, ) self.max_links = self._from_env_if_absent( max_links, OTEL_SPAN_LINK_COUNT_LIMIT, _DEFAULT_OTEL_SPAN_LINK_COUNT_LIMIT, ) # attribute count global_max_attributes = self._from_env_if_absent( max_attributes, OTEL_ATTRIBUTE_COUNT_LIMIT ) self.max_attributes = ( global_max_attributes if global_max_attributes is not None else _DEFAULT_OTEL_ATTRIBUTE_COUNT_LIMIT ) self.max_span_attributes = self._from_env_if_absent( max_span_attributes, OTEL_SPAN_ATTRIBUTE_COUNT_LIMIT, ( global_max_attributes if global_max_attributes is not None else _DEFAULT_OTEL_SPAN_ATTRIBUTE_COUNT_LIMIT ), ) self.max_event_attributes = self._from_env_if_absent( max_event_attributes, OTEL_EVENT_ATTRIBUTE_COUNT_LIMIT, ( global_max_attributes if global_max_attributes is not None else _DEFAULT_OTEL_EVENT_ATTRIBUTE_COUNT_LIMIT ), ) self.max_link_attributes = self._from_env_if_absent( max_link_attributes, OTEL_LINK_ATTRIBUTE_COUNT_LIMIT, ( global_max_attributes if global_max_attributes is not None else _DEFAULT_OTEL_LINK_ATTRIBUTE_COUNT_LIMIT ), ) # attribute length self.max_attribute_length = self._from_env_if_absent( max_attribute_length, OTEL_ATTRIBUTE_VALUE_LENGTH_LIMIT, ) self.max_span_attribute_length = self._from_env_if_absent( max_span_attribute_length, OTEL_SPAN_ATTRIBUTE_VALUE_LENGTH_LIMIT, # use global attribute length limit as default self.max_attribute_length, ) def __repr__(self): return f"{type(self).__name__}(max_span_attributes={self.max_span_attributes}, max_events_attributes={self.max_event_attributes}, max_link_attributes={self.max_link_attributes}, max_attributes={self.max_attributes}, max_events={self.max_events}, max_links={self.max_links}, max_attribute_length={self.max_attribute_length})" @classmethod def _from_env_if_absent( cls, value: Optional[int], env_var: str, default: Optional[int] = None ) -> Optional[int]: if value == cls.UNSET: return None err_msg = "{} must be a non-negative integer but got {}" # if no value is provided for the limit, try to load it from env if value is None: # return default value if env var is not set if env_var not in environ: return default str_value = environ.get(env_var, "").strip().lower() if str_value == _ENV_VALUE_UNSET: return None try: value = int(str_value) except ValueError: raise ValueError(err_msg.format(env_var, str_value)) if value < 0: raise ValueError(err_msg.format(env_var, value)) return value _UnsetLimits = SpanLimits( max_attributes=SpanLimits.UNSET, max_events=SpanLimits.UNSET, max_links=SpanLimits.UNSET, max_span_attributes=SpanLimits.UNSET, max_event_attributes=SpanLimits.UNSET, max_link_attributes=SpanLimits.UNSET, max_attribute_length=SpanLimits.UNSET, max_span_attribute_length=SpanLimits.UNSET, ) # not removed for backward compat. please use SpanLimits instead. SPAN_ATTRIBUTE_COUNT_LIMIT = SpanLimits._from_env_if_absent( # pylint: disable=protected-access None, OTEL_SPAN_ATTRIBUTE_COUNT_LIMIT, _DEFAULT_OTEL_SPAN_ATTRIBUTE_COUNT_LIMIT, ) class Span(trace_api.Span, ReadableSpan): """See `opentelemetry.trace.Span`. Users should create `Span` objects via the `Tracer` instead of this constructor. Args: name: The name of the operation this span represents context: The immutable span context parent: This span's parent's `opentelemetry.trace.SpanContext`, or None if this is a root span sampler: The sampler used to create this span trace_config: TODO resource: Entity producing telemetry attributes: The span's attributes to be exported events: Timestamped events to be exported links: Links to other spans to be exported span_processor: `SpanProcessor` to invoke when starting and ending this `Span`. limits: `SpanLimits` instance that was passed to the `TracerProvider` """ def __new__(cls, *args, **kwargs): if cls is Span: raise TypeError("Span must be instantiated via a tracer.") return super().__new__(cls) # pylint: disable=too-many-locals def __init__( self, name: str, context: trace_api.SpanContext, parent: Optional[trace_api.SpanContext] = None, sampler: Optional[sampling.Sampler] = None, trace_config: None = None, # TODO resource: Optional[Resource] = None, attributes: types.Attributes = None, events: Optional[Sequence[Event]] = None, links: Sequence[trace_api.Link] = (), kind: trace_api.SpanKind = trace_api.SpanKind.INTERNAL, span_processor: SpanProcessor = SpanProcessor(), instrumentation_info: Optional[InstrumentationInfo] = None, record_exception: bool = True, set_status_on_exception: bool = True, limits=_UnsetLimits, instrumentation_scope: Optional[InstrumentationScope] = None, ) -> None: if resource is None: resource = Resource.create({}) super().__init__( name=name, context=context, parent=parent, kind=kind, resource=resource, instrumentation_info=instrumentation_info, instrumentation_scope=instrumentation_scope, ) self._sampler = sampler self._trace_config = trace_config self._record_exception = record_exception self._set_status_on_exception = set_status_on_exception self._span_processor = span_processor self._limits = limits self._lock = threading.Lock() self._attributes = BoundedAttributes( self._limits.max_span_attributes, attributes, immutable=False, max_value_len=self._limits.max_span_attribute_length, ) self._events = self._new_events() if events: for event in events: event._attributes = BoundedAttributes( self._limits.max_event_attributes, event.attributes, max_value_len=self._limits.max_attribute_length, ) self._events.append(event) self._links = self._new_links(links) def __repr__(self): return f'{type(self).__name__}(name="{self._name}", context={self._context})' def _new_events(self): return BoundedList(self._limits.max_events) def _new_links(self, links: Sequence[trace_api.Link]): if not links: return BoundedList(self._limits.max_links) valid_links = [] for link in links: if link and _is_valid_link(link.context, link.attributes): # pylint: disable=protected-access link._attributes = BoundedAttributes( self._limits.max_link_attributes, link.attributes, max_value_len=self._limits.max_attribute_length, ) valid_links.append(link) return BoundedList.from_seq(self._limits.max_links, valid_links) def get_span_context(self): return self._context def set_attributes( self, attributes: Mapping[str, types.AttributeValue] ) -> None: with self._lock: if self._end_time is not None: logger.warning("Setting attribute on ended span.") return for key, value in attributes.items(): self._attributes[key] = value def set_attribute(self, key: str, value: types.AttributeValue) -> None: return self.set_attributes({key: value}) @_check_span_ended def _add_event(self, event: EventBase) -> None: self._events.append(event) def add_event( self, name: str, attributes: types.Attributes = None, timestamp: Optional[int] = None, ) -> None: attributes = BoundedAttributes( self._limits.max_event_attributes, attributes, max_value_len=self._limits.max_attribute_length, ) self._add_event( Event( name=name, attributes=attributes, timestamp=timestamp, ) ) @_check_span_ended def _add_link(self, link: trace_api.Link) -> None: self._links.append(link) def add_link( self, context: SpanContext, attributes: types.Attributes = None, ) -> None: if not _is_valid_link(context, attributes): return attributes = BoundedAttributes( self._limits.max_link_attributes, attributes, max_value_len=self._limits.max_attribute_length, ) self._add_link( trace_api.Link( context=context, attributes=attributes, ) ) def _readable_span(self) -> ReadableSpan: return ReadableSpan( name=self._name, context=self._context, parent=self._parent, resource=self._resource, attributes=self._attributes, events=self._events, links=self._links, kind=self.kind, status=self._status, start_time=self._start_time, end_time=self._end_time, instrumentation_info=self._instrumentation_info, instrumentation_scope=self._instrumentation_scope, ) def start( self, start_time: Optional[int] = None, parent_context: Optional[context_api.Context] = None, ) -> None: with self._lock: if self._start_time is not None: logger.warning("Calling start() on a started span.") return self._start_time = ( start_time if start_time is not None else time_ns() ) self._span_processor.on_start(self, parent_context=parent_context) def end(self, end_time: Optional[int] = None) -> None: with self._lock: if self._start_time is None: raise RuntimeError("Calling end() on a not started span.") if self._end_time is not None: logger.warning("Calling end() on an ended span.") return self._end_time = end_time if end_time is not None else time_ns() self._span_processor.on_end(self._readable_span()) @_check_span_ended def update_name(self, name: str) -> None: self._name = name def is_recording(self) -> bool: return self._end_time is None @_check_span_ended def set_status( self, status: typing.Union[Status, StatusCode], description: typing.Optional[str] = None, ) -> None: # Ignore future calls if status is already set to OK # Ignore calls to set to StatusCode.UNSET if isinstance(status, Status): if ( self._status and self._status.status_code is StatusCode.OK or status.status_code is StatusCode.UNSET ): return if description is not None: logger.warning( "Description %s ignored. Use either `Status` or `(StatusCode, Description)`", description, ) self._status = status elif isinstance(status, StatusCode): if ( self._status and self._status.status_code is StatusCode.OK or status is StatusCode.UNSET ): return self._status = Status(status, description) def __exit__( self, exc_type: Optional[Type[BaseException]], exc_val: Optional[BaseException], exc_tb: Optional[TracebackType], ) -> None: """Ends context manager and calls `end` on the `Span`.""" if exc_val is not None and self.is_recording(): # Record the exception as an event # pylint:disable=protected-access if self._record_exception: self.record_exception(exception=exc_val, escaped=True) # Records status if span is used as context manager # i.e. with tracer.start_span() as span: if self._set_status_on_exception: self.set_status( Status( status_code=StatusCode.ERROR, description=f"{exc_type.__name__}: {exc_val}", ) ) super().__exit__(exc_type, exc_val, exc_tb) def record_exception( self, exception: BaseException, attributes: types.Attributes = None, timestamp: Optional[int] = None, escaped: bool = False, ) -> None: """Records an exception as a span event.""" # TODO: keep only exception as first argument after baseline is 3.10 stacktrace = "".join( traceback.format_exception( type(exception), value=exception, tb=exception.__traceback__ ) ) module = type(exception).__module__ qualname = type(exception).__qualname__ exception_type = ( f"{module}.{qualname}" if module and module != "builtins" else qualname ) _attributes: MutableMapping[str, types.AttributeValue] = { EXCEPTION_TYPE: exception_type, EXCEPTION_MESSAGE: str(exception), EXCEPTION_STACKTRACE: stacktrace, EXCEPTION_ESCAPED: str(escaped), } if attributes: _attributes.update(attributes) self.add_event( name="exception", attributes=_attributes, timestamp=timestamp ) class _Span(Span): """Protected implementation of `opentelemetry.trace.Span`. This constructor exists to prevent the instantiation of the `Span` class by other mechanisms than through the `Tracer`. """ class Tracer(trace_api.Tracer): """See `opentelemetry.trace.Tracer`.""" def __init__( self, sampler: sampling.Sampler, resource: Resource, span_processor: Union[ SynchronousMultiSpanProcessor, ConcurrentMultiSpanProcessor ], id_generator: IdGenerator, instrumentation_info: InstrumentationInfo, span_limits: SpanLimits, instrumentation_scope: InstrumentationScope, ) -> None: self.sampler = sampler self.resource = resource self.span_processor = span_processor self.id_generator = id_generator self.instrumentation_info = instrumentation_info self._span_limits = span_limits self._instrumentation_scope = instrumentation_scope @_agnosticcontextmanager # pylint: disable=protected-access def start_as_current_span( self, name: str, context: Optional[context_api.Context] = None, kind: trace_api.SpanKind = trace_api.SpanKind.INTERNAL, attributes: types.Attributes = None, links: Optional[Sequence[trace_api.Link]] = (), start_time: Optional[int] = None, record_exception: bool = True, set_status_on_exception: bool = True, end_on_exit: bool = True, ) -> Iterator[trace_api.Span]: span = self.start_span( name=name, context=context, kind=kind, attributes=attributes, links=links, start_time=start_time, record_exception=record_exception, set_status_on_exception=set_status_on_exception, ) with trace_api.use_span( span, end_on_exit=end_on_exit, record_exception=record_exception, set_status_on_exception=set_status_on_exception, ) as span: yield span def start_span( # pylint: disable=too-many-locals self, name: str, context: Optional[context_api.Context] = None, kind: trace_api.SpanKind = trace_api.SpanKind.INTERNAL, attributes: types.Attributes = None, links: Optional[Sequence[trace_api.Link]] = (), start_time: Optional[int] = None, record_exception: bool = True, set_status_on_exception: bool = True, ) -> trace_api.Span: parent_span_context = trace_api.get_current_span( context ).get_span_context() if parent_span_context is not None and not isinstance( parent_span_context, trace_api.SpanContext ): raise TypeError( "parent_span_context must be a SpanContext or None." ) # is_valid determines root span if parent_span_context is None or not parent_span_context.is_valid: parent_span_context = None trace_id = self.id_generator.generate_trace_id() else: trace_id = parent_span_context.trace_id # The sampler decides whether to create a real or no-op span at the # time of span creation. No-op spans do not record events, and are not # exported. # The sampler may also add attributes to the newly-created span, e.g. # to include information about the sampling result. # The sampler may also modify the parent span context's tracestate sampling_result = self.sampler.should_sample( context, trace_id, name, kind, attributes, links ) trace_flags = ( trace_api.TraceFlags(trace_api.TraceFlags.SAMPLED) if sampling_result.decision.is_sampled() else trace_api.TraceFlags(trace_api.TraceFlags.DEFAULT) ) span_context = trace_api.SpanContext( trace_id, self.id_generator.generate_span_id(), is_remote=False, trace_flags=trace_flags, trace_state=sampling_result.trace_state, ) # Only record if is_recording() is true if sampling_result.decision.is_recording(): # pylint:disable=protected-access span = _Span( name=name, context=span_context, parent=parent_span_context, sampler=self.sampler, resource=self.resource, attributes=sampling_result.attributes.copy(), span_processor=self.span_processor, kind=kind, links=links, instrumentation_info=self.instrumentation_info, record_exception=record_exception, set_status_on_exception=set_status_on_exception, limits=self._span_limits, instrumentation_scope=self._instrumentation_scope, ) span.start(start_time=start_time, parent_context=context) else: span = trace_api.NonRecordingSpan(context=span_context) return span class TracerProvider(trace_api.TracerProvider): """See `opentelemetry.trace.TracerProvider`.""" def __init__( self, sampler: Optional[sampling.Sampler] = None, resource: Optional[Resource] = None, shutdown_on_exit: bool = True, active_span_processor: Union[ SynchronousMultiSpanProcessor, ConcurrentMultiSpanProcessor, None ] = None, id_generator: Optional[IdGenerator] = None, span_limits: Optional[SpanLimits] = None, ) -> None: self._active_span_processor = ( active_span_processor or SynchronousMultiSpanProcessor() ) if id_generator is None: self.id_generator = RandomIdGenerator() else: self.id_generator = id_generator if resource is None: self._resource = Resource.create({}) else: self._resource = resource if not sampler: sampler = sampling._get_from_env_or_default() self.sampler = sampler self._span_limits = span_limits or SpanLimits() disabled = environ.get(OTEL_SDK_DISABLED, "") self._disabled = disabled.lower().strip() == "true" self._atexit_handler = None if shutdown_on_exit: self._atexit_handler = atexit.register(self.shutdown) @property def resource(self) -> Resource: return self._resource def get_tracer( self, instrumenting_module_name: str, instrumenting_library_version: typing.Optional[str] = None, schema_url: typing.Optional[str] = None, attributes: typing.Optional[types.Attributes] = None, ) -> "trace_api.Tracer": if self._disabled: return NoOpTracer() if not instrumenting_module_name: # Reject empty strings too. instrumenting_module_name = "" logger.error("get_tracer called with missing module name.") if instrumenting_library_version is None: instrumenting_library_version = "" filterwarnings( "ignore", message=( r"You should use InstrumentationScope. Deprecated since version 1.11.1." ), category=DeprecationWarning, module="opentelemetry.sdk.trace", ) instrumentation_info = InstrumentationInfo( instrumenting_module_name, instrumenting_library_version, schema_url, ) return Tracer( self.sampler, self.resource, self._active_span_processor, self.id_generator, instrumentation_info, self._span_limits, InstrumentationScope( instrumenting_module_name, instrumenting_library_version, schema_url, attributes, ), ) def add_span_processor(self, span_processor: SpanProcessor) -> None: """Registers a new :class:`SpanProcessor` for this `TracerProvider`. The span processors are invoked in the same order they are registered. """ # no lock here because add_span_processor is thread safe for both # SynchronousMultiSpanProcessor and ConcurrentMultiSpanProcessor. self._active_span_processor.add_span_processor(span_processor) def shutdown(self) -> None: """Shut down the span processors added to the tracer provider.""" self._active_span_processor.shutdown() if self._atexit_handler is not None: atexit.unregister(self._atexit_handler) self._atexit_handler = None def force_flush(self, timeout_millis: int = 30000) -> bool: """Requests the active span processor to process all spans that have not yet been processed. By default force flush is called sequentially on all added span processors. This means that span processors further back in the list have less time to flush their spans. To have span processors flush their spans in parallel it is possible to initialize the tracer provider with an instance of `ConcurrentMultiSpanProcessor` at the cost of using multiple threads. Args: timeout_millis: The maximum amount of time to wait for spans to be processed. Returns: False if the timeout is exceeded, True otherwise. """ return self._active_span_processor.force_flush(timeout_millis) python-opentelemetry-1.39.1/opentelemetry-sdk/src/opentelemetry/sdk/trace/_sampling_experimental/000077500000000000000000000000001511654350100335645ustar00rootroot00000000000000__init__.py000066400000000000000000000021301511654350100356120ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-sdk/src/opentelemetry/sdk/trace/_sampling_experimental# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. __all__ = [ "ComposableSampler", "SamplingIntent", "composable_always_off", "composable_always_on", "composable_parent_threshold", "composable_traceid_ratio_based", "composite_sampler", ] from ._always_off import composable_always_off from ._always_on import composable_always_on from ._composable import ComposableSampler, SamplingIntent from ._parent_threshold import composable_parent_threshold from ._sampler import composite_sampler from ._traceid_ratio import composable_traceid_ratio_based _always_off.py000066400000000000000000000033521511654350100363530ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-sdk/src/opentelemetry/sdk/trace/_sampling_experimental# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import annotations from typing import Sequence from opentelemetry.context import Context from opentelemetry.trace import Link, SpanKind, TraceState from opentelemetry.util.types import Attributes from ._composable import ComposableSampler, SamplingIntent from ._util import INVALID_THRESHOLD _intent = SamplingIntent(threshold=INVALID_THRESHOLD, threshold_reliable=False) class _ComposableAlwaysOffSampler(ComposableSampler): def sampling_intent( self, parent_ctx: Context | None, name: str, span_kind: SpanKind | None, attributes: Attributes, links: Sequence[Link] | None, trace_state: TraceState | None = None, ) -> SamplingIntent: return _intent def get_description(self) -> str: return "ComposableAlwaysOff" _always_off = _ComposableAlwaysOffSampler() def composable_always_off() -> ComposableSampler: """Returns a composable sampler that does not sample any span. - Always returns a SamplingIntent with no threshold, indicating all spans should be dropped - Sets threshold_reliable to false - Does not add any attributes """ return _always_off _always_on.py000066400000000000000000000032671511654350100362220ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-sdk/src/opentelemetry/sdk/trace/_sampling_experimental# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import annotations from typing import Sequence from opentelemetry.context import Context from opentelemetry.trace import Link, SpanKind, TraceState from opentelemetry.util.types import Attributes from ._composable import ComposableSampler, SamplingIntent from ._util import MIN_THRESHOLD _intent = SamplingIntent(threshold=MIN_THRESHOLD) class _ComposableAlwaysOnSampler(ComposableSampler): def sampling_intent( self, parent_ctx: Context | None, name: str, span_kind: SpanKind | None, attributes: Attributes, links: Sequence[Link] | None, trace_state: TraceState | None = None, ) -> SamplingIntent: return _intent def get_description(self) -> str: return "ComposableAlwaysOn" _always_on = _ComposableAlwaysOnSampler() def composable_always_on() -> ComposableSampler: """Returns a composable sampler that samples all spans. - Always returns a SamplingIntent with threshold set to sample all spans (threshold = 0) - Sets threshold_reliable to true - Does not add any attributes """ return _always_on _composable.py000066400000000000000000000041111511654350100363370ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-sdk/src/opentelemetry/sdk/trace/_sampling_experimental# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import annotations from dataclasses import dataclass, field from typing import Callable, Protocol, Sequence from opentelemetry.context import Context from opentelemetry.trace import Link, SpanKind, TraceState from opentelemetry.util.types import Attributes @dataclass(frozen=True) class SamplingIntent: """Information to make a consistent sampling decision.""" threshold: int """The sampling threshold value. A lower threshold increases the likelihood of sampling.""" threshold_reliable: bool = field(default=True) """Indicates whether the threshold is reliable for Span-to-Metrics estimation.""" attributes: Attributes = field(default=None) """Any attributes to be added to a sampled span.""" update_trace_state: Callable[[TraceState], TraceState] = field( default=lambda ts: ts ) """Any updates to be made to trace state.""" class ComposableSampler(Protocol): """A sampler that can be composed to make a final sampling decision.""" def sampling_intent( self, parent_ctx: Context | None, name: str, span_kind: SpanKind | None, attributes: Attributes, links: Sequence[Link] | None, trace_state: TraceState | None, ) -> SamplingIntent: """Returns information to make a sampling decision.""" ... # pylint: disable=unnecessary-ellipsis def get_description(self) -> str: """Returns a description of the sampler.""" ... # pylint: disable=unnecessary-ellipsis _parent_threshold.py000066400000000000000000000064121511654350100375660ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-sdk/src/opentelemetry/sdk/trace/_sampling_experimental# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import annotations from typing import Sequence from opentelemetry.context import Context from opentelemetry.trace import Link, SpanKind, TraceState, get_current_span from opentelemetry.util.types import Attributes from ._composable import ComposableSampler, SamplingIntent from ._trace_state import OtelTraceState from ._util import ( INVALID_THRESHOLD, MIN_THRESHOLD, is_valid_threshold, ) class _ComposableParentThreshold(ComposableSampler): def __init__(self, root_sampler: ComposableSampler): self._root_sampler = root_sampler self._description = f"ComposableParentThreshold{{root={root_sampler.get_description()}}}" def sampling_intent( self, parent_ctx: Context | None, name: str, span_kind: SpanKind | None, attributes: Attributes, links: Sequence[Link] | None, trace_state: TraceState | None = None, ) -> SamplingIntent: parent_span = get_current_span(parent_ctx) parent_span_ctx = parent_span.get_span_context() is_root = not parent_span_ctx.is_valid if is_root: return self._root_sampler.sampling_intent( parent_ctx, name, span_kind, attributes, links, trace_state ) ot_trace_state = OtelTraceState.parse(trace_state) if is_valid_threshold(ot_trace_state.threshold): return SamplingIntent( threshold=ot_trace_state.threshold, threshold_reliable=True, ) threshold = ( MIN_THRESHOLD if parent_span_ctx.trace_flags.sampled else INVALID_THRESHOLD ) return SamplingIntent(threshold=threshold, threshold_reliable=False) def get_description(self) -> str: return self._description def composable_parent_threshold( root_sampler: ComposableSampler, ) -> ComposableSampler: """Returns a consistent sampler that respects the sampling decision of the parent span or falls-back to the given sampler if it is a root span. - For spans without a parent context, delegate to the root sampler - For spans with a parent context, returns a SamplingIntent that propagates the parent's sampling decision - Returns the parent's threshold if available; otherwise, if the parent's sampled flag is set, returns threshold=0; otherwise, if the parent's sampled flag is not set, no threshold is returned. - Sets threshold_reliable to match the parent’s reliability, which is true if the parent had a threshold. - Does not add any attributes Args: root_sampler: The root sampler to use for spans without a parent context. """ return _ComposableParentThreshold(root_sampler) _sampler.py000066400000000000000000000067131511654350100356700ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-sdk/src/opentelemetry/sdk/trace/_sampling_experimental# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import annotations from typing import Sequence from opentelemetry.context import Context from opentelemetry.sdk.trace.sampling import Decision, Sampler, SamplingResult from opentelemetry.trace import Link, SpanKind, TraceState from opentelemetry.util.types import Attributes from ._composable import ComposableSampler, SamplingIntent from ._trace_state import OTEL_TRACE_STATE_KEY, OtelTraceState from ._util import INVALID_THRESHOLD, is_valid_random_value, is_valid_threshold class _CompositeSampler(Sampler): def __init__(self, delegate: ComposableSampler): self._delegate = delegate def should_sample( self, parent_context: Context | None, trace_id: int, name: str, kind: SpanKind | None = None, attributes: Attributes | None = None, links: Sequence[Link] | None = None, trace_state: TraceState | None = None, ) -> SamplingResult: ot_trace_state = OtelTraceState.parse(trace_state) intent = self._delegate.sampling_intent( parent_context, name, kind, attributes, links, trace_state ) threshold = intent.threshold if is_valid_threshold(threshold): adjusted_count_correct = intent.threshold_reliable if is_valid_random_value(ot_trace_state.random_value): randomness = ot_trace_state.random_value else: # Use last 56 bits of trace_id as randomness randomness = trace_id & 0x00FFFFFFFFFFFFFF sampled = threshold <= randomness else: sampled = False adjusted_count_correct = False decision = Decision.RECORD_AND_SAMPLE if sampled else Decision.DROP if sampled and adjusted_count_correct: ot_trace_state.threshold = threshold else: ot_trace_state.threshold = INVALID_THRESHOLD return SamplingResult( decision, intent.attributes, _update_trace_state(trace_state, ot_trace_state, intent), ) def get_description(self) -> str: return self._delegate.get_description() def _update_trace_state( trace_state: TraceState | None, ot_trace_state: OtelTraceState, intent: SamplingIntent, ) -> TraceState | None: otts = ot_trace_state.serialize() if not trace_state: if otts: return TraceState(((OTEL_TRACE_STATE_KEY, otts),)) return None new_trace_state = intent.update_trace_state(trace_state) if otts: return new_trace_state.update(OTEL_TRACE_STATE_KEY, otts) return new_trace_state def composite_sampler(delegate: ComposableSampler) -> Sampler: """A sampler that uses a a composable sampler to make its decision while handling tracestate. Args: delegate: The composable sampler to use for making sampling decisions. """ return _CompositeSampler(delegate) _trace_state.py000066400000000000000000000100311511654350100365070ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-sdk/src/opentelemetry/sdk/trace/_sampling_experimental# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import annotations from dataclasses import dataclass from typing import Sequence from opentelemetry.trace import TraceState from ._util import ( INVALID_RANDOM_VALUE, INVALID_THRESHOLD, MAX_THRESHOLD, is_valid_random_value, is_valid_threshold, ) OTEL_TRACE_STATE_KEY = "ot" _TRACE_STATE_SIZE_LIMIT = 256 _MAX_VALUE_LENGTH = 14 # 56 bits, 4 bits per hex digit @dataclass class OtelTraceState: """Marshals OpenTelemetry tracestate for sampling parameters. https://opentelemetry.io/docs/specs/otel/trace/tracestate-probability-sampling/ """ random_value: int threshold: int rest: Sequence[str] @staticmethod def invalid() -> "OtelTraceState": return OtelTraceState(INVALID_RANDOM_VALUE, INVALID_THRESHOLD, ()) @staticmethod def parse(trace_state: TraceState | None) -> "OtelTraceState": if not trace_state: return OtelTraceState.invalid() ot = trace_state.get(OTEL_TRACE_STATE_KEY, "") if not ot or len(ot) > _TRACE_STATE_SIZE_LIMIT: return OtelTraceState.invalid() threshold = INVALID_THRESHOLD random_value = INVALID_RANDOM_VALUE members = ot.split(";") rest: list[str] | None = None for member in members: if member.startswith("th:"): threshold = _parse_th(member[len("th:") :], INVALID_THRESHOLD) continue if member.startswith("rv:"): random_value = _parse_rv( member[len("rv:") :], INVALID_RANDOM_VALUE ) continue if rest is None: rest = [member] else: rest.append(member) return OtelTraceState( random_value=random_value, threshold=threshold, rest=rest or () ) def serialize(self) -> str: if ( not is_valid_threshold(self.threshold) and not is_valid_random_value(self.random_value) and not self.rest ): return "" parts: list[str] = [] if ( is_valid_threshold(self.threshold) and self.threshold != MAX_THRESHOLD ): parts.append(f"th:{serialize_th(self.threshold)}") if is_valid_random_value(self.random_value): parts.append(f"rv:{_serialize_rv(self.random_value)}") if self.rest: parts.extend(self.rest) res = ";".join(parts) while len(res) > _TRACE_STATE_SIZE_LIMIT: delim_idx = res.rfind(";") if delim_idx == -1: break res = res[:delim_idx] return res def _parse_th(value: str, default: int) -> int: if not value or len(value) > _MAX_VALUE_LENGTH: return default try: parsed = int(value, 16) except ValueError: return default # th value is compressed by removing all trailing zeros, # so we restore them to get the real value. trailing_zeros = _MAX_VALUE_LENGTH - len(value) return parsed << (trailing_zeros * 4) def _parse_rv(value: str, default: int) -> int: if not value or len(value) != _MAX_VALUE_LENGTH: return default try: return int(value, 16) except ValueError: return default def serialize_th(threshold: int) -> str: if not threshold: return "0" return f"{threshold:014x}".rstrip("0") def _serialize_rv(random_value: int) -> str: return f"{random_value:014x}" _traceid_ratio.py000066400000000000000000000051631511654350100370340ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-sdk/src/opentelemetry/sdk/trace/_sampling_experimental# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import annotations from typing import Sequence from opentelemetry.context import Context from opentelemetry.trace import Link, SpanKind, TraceState from opentelemetry.util.types import Attributes from ._composable import ComposableSampler, SamplingIntent from ._trace_state import serialize_th from ._util import INVALID_THRESHOLD, MAX_THRESHOLD, calculate_threshold class ComposableTraceIDRatioBased(ComposableSampler): _threshold: int _description: str def __init__(self, ratio: float): threshold = calculate_threshold(ratio) if threshold == MAX_THRESHOLD: threshold_str = "max" else: threshold_str = serialize_th(threshold) if threshold != MAX_THRESHOLD: intent = SamplingIntent(threshold=threshold) else: intent = SamplingIntent( threshold=INVALID_THRESHOLD, threshold_reliable=False ) self._intent = intent self._description = f"ComposableTraceIDRatioBased{{threshold={threshold_str}, ratio={ratio}}}" def sampling_intent( self, parent_ctx: Context | None, name: str, span_kind: SpanKind | None, attributes: Attributes, links: Sequence[Link] | None, trace_state: TraceState | None, ) -> SamplingIntent: return self._intent def get_description(self) -> str: return self._description def composable_traceid_ratio_based( ratio: float, ) -> ComposableSampler: """Returns a composable sampler that samples each span with a fixed ratio. - Returns a SamplingIntent with threshold determined by the configured sampling ratio - Sets threshold_reliable to true - Does not add any attributes Note: If the ratio is 0, it will behave as an ComposableAlwaysOff sampler instead. Args: ratio: The sampling ratio to use (between 0.0 and 1.0). """ if not 0.0 <= ratio <= 1.0: raise ValueError("Sampling ratio must be between 0.0 and 1.0") return ComposableTraceIDRatioBased(ratio) _util.py000066400000000000000000000022741511654350100352000ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-sdk/src/opentelemetry/sdk/trace/_sampling_experimental# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. RANDOM_VALUE_BITS = 56 MAX_THRESHOLD = 1 << RANDOM_VALUE_BITS # 0% sampling MIN_THRESHOLD = 0 # 100% sampling MAX_RANDOM_VALUE = MAX_THRESHOLD - 1 INVALID_THRESHOLD = -1 INVALID_RANDOM_VALUE = -1 _probability_threshold_scale = float.fromhex("0x1p56") def calculate_threshold(sampling_probability: float) -> int: return MAX_THRESHOLD - round( sampling_probability * _probability_threshold_scale ) def is_valid_threshold(threshold: int) -> bool: return MIN_THRESHOLD <= threshold <= MAX_THRESHOLD def is_valid_random_value(random_value: int) -> bool: return 0 <= random_value <= MAX_RANDOM_VALUE python-opentelemetry-1.39.1/opentelemetry-sdk/src/opentelemetry/sdk/trace/export/000077500000000000000000000000001511654350100303575ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-sdk/src/opentelemetry/sdk/trace/export/__init__.py000066400000000000000000000232471511654350100325000ustar00rootroot00000000000000# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import annotations import logging import sys import typing from enum import Enum from os import environ, linesep from opentelemetry.context import ( _SUPPRESS_INSTRUMENTATION_KEY, Context, attach, detach, set_value, ) from opentelemetry.sdk._shared_internal import BatchProcessor from opentelemetry.sdk.environment_variables import ( OTEL_BSP_EXPORT_TIMEOUT, OTEL_BSP_MAX_EXPORT_BATCH_SIZE, OTEL_BSP_MAX_QUEUE_SIZE, OTEL_BSP_SCHEDULE_DELAY, ) from opentelemetry.sdk.trace import ReadableSpan, Span, SpanProcessor _DEFAULT_SCHEDULE_DELAY_MILLIS = 5000 _DEFAULT_MAX_EXPORT_BATCH_SIZE = 512 _DEFAULT_EXPORT_TIMEOUT_MILLIS = 30000 _DEFAULT_MAX_QUEUE_SIZE = 2048 _ENV_VAR_INT_VALUE_ERROR_MESSAGE = ( "Unable to parse value for %s as integer. Defaulting to %s." ) logger = logging.getLogger(__name__) class SpanExportResult(Enum): SUCCESS = 0 FAILURE = 1 class SpanExporter: """Interface for exporting spans. Interface to be implemented by services that want to export spans recorded in their own format. To export data this MUST be registered to the :class`opentelemetry.sdk.trace.Tracer` using a `SimpleSpanProcessor` or a `BatchSpanProcessor`. """ def export( self, spans: typing.Sequence[ReadableSpan] ) -> "SpanExportResult": # pyright: ignore[reportReturnType] """Exports a batch of telemetry data. Args: spans: The list of `opentelemetry.trace.Span` objects to be exported Returns: The result of the export """ def shutdown(self) -> None: """Shuts down the exporter. Called when the SDK is shut down. """ def force_flush(self, timeout_millis: int = 30000) -> bool: # pyright: ignore[reportReturnType] """Hint to ensure that the export of any spans the exporter has received prior to the call to ForceFlush SHOULD be completed as soon as possible, preferably before returning from this method. """ class SimpleSpanProcessor(SpanProcessor): """Simple SpanProcessor implementation. SimpleSpanProcessor is an implementation of `SpanProcessor` that passes ended spans directly to the configured `SpanExporter`. """ def __init__(self, span_exporter: SpanExporter): self.span_exporter = span_exporter def on_start( self, span: Span, parent_context: typing.Optional[Context] = None ) -> None: pass def on_end(self, span: ReadableSpan) -> None: if not (span.context and span.context.trace_flags.sampled): return token = attach(set_value(_SUPPRESS_INSTRUMENTATION_KEY, True)) try: self.span_exporter.export((span,)) # pylint: disable=broad-exception-caught except Exception: logger.exception("Exception while exporting Span.") detach(token) def shutdown(self) -> None: self.span_exporter.shutdown() def force_flush(self, timeout_millis: int = 30000) -> bool: # pylint: disable=unused-argument return True class BatchSpanProcessor(SpanProcessor): """Batch span processor implementation. `BatchSpanProcessor` is an implementation of `SpanProcessor` that batches ended spans and pushes them to the configured `SpanExporter`. `BatchSpanProcessor` is configurable with the following environment variables which correspond to constructor parameters: - :envvar:`OTEL_BSP_SCHEDULE_DELAY` - :envvar:`OTEL_BSP_MAX_QUEUE_SIZE` - :envvar:`OTEL_BSP_MAX_EXPORT_BATCH_SIZE` - :envvar:`OTEL_BSP_EXPORT_TIMEOUT` All the logic for emitting spans, shutting down etc. resides in the `BatchProcessor` class. """ def __init__( self, span_exporter: SpanExporter, max_queue_size: int | None = None, schedule_delay_millis: float | None = None, max_export_batch_size: int | None = None, export_timeout_millis: float | None = None, ): if max_queue_size is None: max_queue_size = BatchSpanProcessor._default_max_queue_size() if schedule_delay_millis is None: schedule_delay_millis = ( BatchSpanProcessor._default_schedule_delay_millis() ) if max_export_batch_size is None: max_export_batch_size = ( BatchSpanProcessor._default_max_export_batch_size() ) # Not used. No way currently to pass timeout to export. if export_timeout_millis is None: export_timeout_millis = ( BatchSpanProcessor._default_export_timeout_millis() ) BatchSpanProcessor._validate_arguments( max_queue_size, schedule_delay_millis, max_export_batch_size ) self._batch_processor = BatchProcessor( span_exporter, schedule_delay_millis, max_export_batch_size, export_timeout_millis, max_queue_size, "Span", ) # Added for backward compatibility. Not recommended to directly access/use underlying exporter. @property def span_exporter(self): return self._batch_processor._exporter # pylint: disable=protected-access def on_start( self, span: Span, parent_context: Context | None = None ) -> None: pass def on_end(self, span: ReadableSpan) -> None: if not (span.context and span.context.trace_flags.sampled): return self._batch_processor.emit(span) def shutdown(self): return self._batch_processor.shutdown() def force_flush(self, timeout_millis: typing.Optional[int] = None) -> bool: return self._batch_processor.force_flush(timeout_millis) @staticmethod def _default_max_queue_size(): try: return int( environ.get(OTEL_BSP_MAX_QUEUE_SIZE, _DEFAULT_MAX_QUEUE_SIZE) ) except ValueError: logger.exception( _ENV_VAR_INT_VALUE_ERROR_MESSAGE, OTEL_BSP_MAX_QUEUE_SIZE, _DEFAULT_MAX_QUEUE_SIZE, ) return _DEFAULT_MAX_QUEUE_SIZE @staticmethod def _default_schedule_delay_millis(): try: return int( environ.get( OTEL_BSP_SCHEDULE_DELAY, _DEFAULT_SCHEDULE_DELAY_MILLIS ) ) except ValueError: logger.exception( _ENV_VAR_INT_VALUE_ERROR_MESSAGE, OTEL_BSP_SCHEDULE_DELAY, _DEFAULT_SCHEDULE_DELAY_MILLIS, ) return _DEFAULT_SCHEDULE_DELAY_MILLIS @staticmethod def _default_max_export_batch_size(): try: return int( environ.get( OTEL_BSP_MAX_EXPORT_BATCH_SIZE, _DEFAULT_MAX_EXPORT_BATCH_SIZE, ) ) except ValueError: logger.exception( _ENV_VAR_INT_VALUE_ERROR_MESSAGE, OTEL_BSP_MAX_EXPORT_BATCH_SIZE, _DEFAULT_MAX_EXPORT_BATCH_SIZE, ) return _DEFAULT_MAX_EXPORT_BATCH_SIZE @staticmethod def _default_export_timeout_millis(): try: return int( environ.get( OTEL_BSP_EXPORT_TIMEOUT, _DEFAULT_EXPORT_TIMEOUT_MILLIS ) ) except ValueError: logger.exception( _ENV_VAR_INT_VALUE_ERROR_MESSAGE, OTEL_BSP_EXPORT_TIMEOUT, _DEFAULT_EXPORT_TIMEOUT_MILLIS, ) return _DEFAULT_EXPORT_TIMEOUT_MILLIS @staticmethod def _validate_arguments( max_queue_size, schedule_delay_millis, max_export_batch_size ): if max_queue_size <= 0: raise ValueError("max_queue_size must be a positive integer.") if schedule_delay_millis <= 0: raise ValueError("schedule_delay_millis must be positive.") if max_export_batch_size <= 0: raise ValueError( "max_export_batch_size must be a positive integer." ) if max_export_batch_size > max_queue_size: raise ValueError( "max_export_batch_size must be less than or equal to max_queue_size." ) class ConsoleSpanExporter(SpanExporter): """Implementation of :class:`SpanExporter` that prints spans to the console. This class can be used for diagnostic purposes. It prints the exported spans to the console STDOUT. """ def __init__( self, service_name: str | None = None, out: typing.IO = sys.stdout, formatter: typing.Callable[ [ReadableSpan], str ] = lambda span: span.to_json() + linesep, ): self.out = out self.formatter = formatter self.service_name = service_name def export(self, spans: typing.Sequence[ReadableSpan]) -> SpanExportResult: for span in spans: self.out.write(self.formatter(span)) self.out.flush() return SpanExportResult.SUCCESS def force_flush(self, timeout_millis: int = 30000) -> bool: return True in_memory_span_exporter.py000066400000000000000000000041001511654350100356140ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-sdk/src/opentelemetry/sdk/trace/export# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import threading import typing from opentelemetry.sdk.trace import ReadableSpan from opentelemetry.sdk.trace.export import SpanExporter, SpanExportResult class InMemorySpanExporter(SpanExporter): """Implementation of :class:`.SpanExporter` that stores spans in memory. This class can be used for testing purposes. It stores the exported spans in a list in memory that can be retrieved using the :func:`.get_finished_spans` method. """ def __init__(self) -> None: self._finished_spans: typing.List[ReadableSpan] = [] self._stopped = False self._lock = threading.Lock() def clear(self) -> None: """Clear list of collected spans.""" with self._lock: self._finished_spans.clear() def get_finished_spans(self) -> typing.Tuple[ReadableSpan, ...]: """Get list of collected spans.""" with self._lock: return tuple(self._finished_spans) def export(self, spans: typing.Sequence[ReadableSpan]) -> SpanExportResult: """Stores a list of spans in memory.""" if self._stopped: return SpanExportResult.FAILURE with self._lock: self._finished_spans.extend(spans) return SpanExportResult.SUCCESS def shutdown(self) -> None: """Shut downs the exporter. Calls to export after the exporter has been shut down will fail. """ self._stopped = True def force_flush(self, timeout_millis: int = 30000) -> bool: return True python-opentelemetry-1.39.1/opentelemetry-sdk/src/opentelemetry/sdk/trace/id_generator.py000066400000000000000000000036471511654350100320640ustar00rootroot00000000000000# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import abc import random from opentelemetry import trace class IdGenerator(abc.ABC): @abc.abstractmethod def generate_span_id(self) -> int: """Get a new span ID. Returns: A 64-bit int for use as a span ID """ @abc.abstractmethod def generate_trace_id(self) -> int: """Get a new trace ID. Implementations should at least make the 64 least significant bits uniformly random. Samplers like the `TraceIdRatioBased` sampler rely on this randomness to make sampling decisions. See `the specification on TraceIdRatioBased `_. Returns: A 128-bit int for use as a trace ID """ class RandomIdGenerator(IdGenerator): """The default ID generator for TracerProvider which randomly generates all bits when generating IDs. """ def generate_span_id(self) -> int: span_id = random.getrandbits(64) while span_id == trace.INVALID_SPAN_ID: span_id = random.getrandbits(64) return span_id def generate_trace_id(self) -> int: trace_id = random.getrandbits(128) while trace_id == trace.INVALID_TRACE_ID: trace_id = random.getrandbits(128) return trace_id python-opentelemetry-1.39.1/opentelemetry-sdk/src/opentelemetry/sdk/trace/sampling.py000066400000000000000000000407541511654350100312340ustar00rootroot00000000000000# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ For general information about sampling, see `the specification `_. OpenTelemetry provides two types of samplers: - `StaticSampler` - `TraceIdRatioBased` A `StaticSampler` always returns the same sampling result regardless of the conditions. Both possible StaticSamplers are already created: - Always sample spans: ALWAYS_ON - Never sample spans: ALWAYS_OFF A `TraceIdRatioBased` sampler makes a random sampling result based on the sampling probability given. If the span being sampled has a parent, `ParentBased` will respect the parent delegate sampler. Otherwise, it returns the sampling result from the given root sampler. Currently, sampling results are always made during the creation of the span. However, this might not always be the case in the future (see `OTEP #115 `_). Custom samplers can be created by subclassing `Sampler` and implementing `Sampler.should_sample` as well as `Sampler.get_description`. Samplers are able to modify the `opentelemetry.trace.span.TraceState` of the parent of the span being created. For custom samplers, it is suggested to implement `Sampler.should_sample` to utilize the parent span context's `opentelemetry.trace.span.TraceState` and pass into the `SamplingResult` instead of the explicit trace_state field passed into the parameter of `Sampler.should_sample`. To use a sampler, pass it into the tracer provider constructor. For example: .. code:: python from opentelemetry import trace from opentelemetry.sdk.trace import TracerProvider from opentelemetry.sdk.trace.export import ( ConsoleSpanExporter, SimpleSpanProcessor, ) from opentelemetry.sdk.trace.sampling import TraceIdRatioBased # sample 1 in every 1000 traces sampler = TraceIdRatioBased(1/1000) # set the sampler onto the global tracer provider trace.set_tracer_provider(TracerProvider(sampler=sampler)) # set up an exporter for sampled spans trace.get_tracer_provider().add_span_processor( SimpleSpanProcessor(ConsoleSpanExporter()) ) # created spans will now be sampled by the TraceIdRatioBased sampler with trace.get_tracer(__name__).start_as_current_span("Test Span"): ... The tracer sampler can also be configured via environment variables ``OTEL_TRACES_SAMPLER`` and ``OTEL_TRACES_SAMPLER_ARG`` (only if applicable). The list of built-in values for ``OTEL_TRACES_SAMPLER`` are: * always_on - Sampler that always samples spans, regardless of the parent span's sampling decision. * always_off - Sampler that never samples spans, regardless of the parent span's sampling decision. * traceidratio - Sampler that samples probabilistically based on rate. * parentbased_always_on - (default) Sampler that respects its parent span's sampling decision, but otherwise always samples. * parentbased_always_off - Sampler that respects its parent span's sampling decision, but otherwise never samples. * parentbased_traceidratio - Sampler that respects its parent span's sampling decision, but otherwise samples probabilistically based on rate. Sampling probability can be set with ``OTEL_TRACES_SAMPLER_ARG`` if the sampler is traceidratio or parentbased_traceidratio. Rate must be in the range [0.0,1.0]. When not provided rate will be set to 1.0 (maximum rate possible). Prev example but with environment variables. Please make sure to set the env ``OTEL_TRACES_SAMPLER=traceidratio`` and ``OTEL_TRACES_SAMPLER_ARG=0.001``. .. code:: python from opentelemetry import trace from opentelemetry.sdk.trace import TracerProvider from opentelemetry.sdk.trace.export import ( ConsoleSpanExporter, SimpleSpanProcessor, ) trace.set_tracer_provider(TracerProvider()) # set up an exporter for sampled spans trace.get_tracer_provider().add_span_processor( SimpleSpanProcessor(ConsoleSpanExporter()) ) # created spans will now be sampled by the TraceIdRatioBased sampler with rate 1/1000. with trace.get_tracer(__name__).start_as_current_span("Test Span"): ... When utilizing a configurator, you can configure a custom sampler. In order to create a configurable custom sampler, create an entry point for the custom sampler factory method or function under the entry point group, ``opentelemetry_traces_sampler``. The custom sampler factory method must be of type ``Callable[[str], Sampler]``, taking a single string argument and returning a Sampler object. The single input will come from the string value of the ``OTEL_TRACES_SAMPLER_ARG`` environment variable. If ``OTEL_TRACES_SAMPLER_ARG`` is not configured, the input will be an empty string. For example: .. code:: python setup( ... entry_points={ ... "opentelemetry_traces_sampler": [ "custom_sampler_name = path.to.sampler.factory.method:CustomSamplerFactory.get_sampler" ] } ) # ... class CustomRatioSampler(Sampler): def __init__(rate): # ... # ... class CustomSamplerFactory: @staticmethod def get_sampler(sampler_argument): try: rate = float(sampler_argument) return CustomSampler(rate) except ValueError: # In case argument is empty string. return CustomSampler(0.5) In order to configure you application with a custom sampler's entry point, set the ``OTEL_TRACES_SAMPLER`` environment variable to the key name of the entry point. For example, to configured the above sampler, set ``OTEL_TRACES_SAMPLER=custom_sampler_name`` and ``OTEL_TRACES_SAMPLER_ARG=0.5``. """ import abc import enum import os from logging import getLogger from types import MappingProxyType from typing import Optional, Sequence # pylint: disable=unused-import from opentelemetry.context import Context from opentelemetry.sdk.environment_variables import ( OTEL_TRACES_SAMPLER, OTEL_TRACES_SAMPLER_ARG, ) from opentelemetry.trace import Link, SpanKind, get_current_span from opentelemetry.trace.span import TraceState from opentelemetry.util.types import Attributes _logger = getLogger(__name__) class Decision(enum.Enum): # IsRecording() == false, span will not be recorded and all events and attributes will be dropped. DROP = 0 # IsRecording() == true, but Sampled flag MUST NOT be set. RECORD_ONLY = 1 # IsRecording() == true AND Sampled flag` MUST be set. RECORD_AND_SAMPLE = 2 def is_recording(self): return self in (Decision.RECORD_ONLY, Decision.RECORD_AND_SAMPLE) def is_sampled(self): return self is Decision.RECORD_AND_SAMPLE class SamplingResult: """A sampling result as applied to a newly-created Span. Args: decision: A sampling decision based off of whether the span is recorded and the sampled flag in trace flags in the span context. attributes: Attributes to add to the `opentelemetry.trace.Span`. trace_state: The tracestate used for the `opentelemetry.trace.Span`. Could possibly have been modified by the sampler. """ def __repr__(self) -> str: return f"{type(self).__name__}({str(self.decision)}, attributes={str(self.attributes)})" def __init__( self, decision: Decision, attributes: "Attributes" = None, trace_state: Optional["TraceState"] = None, ) -> None: self.decision = decision if attributes is None: self.attributes = MappingProxyType({}) else: self.attributes = MappingProxyType(attributes) self.trace_state = trace_state class Sampler(abc.ABC): @abc.abstractmethod def should_sample( self, parent_context: Optional["Context"], trace_id: int, name: str, kind: Optional[SpanKind] = None, attributes: Attributes = None, links: Optional[Sequence["Link"]] = None, trace_state: Optional["TraceState"] = None, ) -> "SamplingResult": pass @abc.abstractmethod def get_description(self) -> str: pass class StaticSampler(Sampler): """Sampler that always returns the same decision.""" def __init__(self, decision: "Decision") -> None: self._decision = decision def should_sample( self, parent_context: Optional["Context"], trace_id: int, name: str, kind: Optional[SpanKind] = None, attributes: Attributes = None, links: Optional[Sequence["Link"]] = None, trace_state: Optional["TraceState"] = None, ) -> "SamplingResult": if self._decision is Decision.DROP: attributes = None return SamplingResult( self._decision, attributes, _get_parent_trace_state(parent_context), ) def get_description(self) -> str: if self._decision is Decision.DROP: return "AlwaysOffSampler" return "AlwaysOnSampler" ALWAYS_OFF = StaticSampler(Decision.DROP) """Sampler that never samples spans, regardless of the parent span's sampling decision.""" ALWAYS_ON = StaticSampler(Decision.RECORD_AND_SAMPLE) """Sampler that always samples spans, regardless of the parent span's sampling decision.""" class TraceIdRatioBased(Sampler): """ Sampler that makes sampling decisions probabilistically based on `rate`. Args: rate: Probability (between 0 and 1) that a span will be sampled """ def __init__(self, rate: float): if rate < 0.0 or rate > 1.0: raise ValueError("Probability must be in range [0.0, 1.0].") self._rate = rate self._bound = self.get_bound_for_rate(self._rate) # For compatibility with 64 bit trace IDs, the sampler checks the 64 # low-order bits of the trace ID to decide whether to sample a given trace. TRACE_ID_LIMIT = (1 << 64) - 1 @classmethod def get_bound_for_rate(cls, rate: float) -> int: return round(rate * (cls.TRACE_ID_LIMIT + 1)) @property def rate(self) -> float: return self._rate @property def bound(self) -> int: return self._bound def should_sample( self, parent_context: Optional["Context"], trace_id: int, name: str, kind: Optional[SpanKind] = None, attributes: Attributes = None, links: Optional[Sequence["Link"]] = None, trace_state: Optional["TraceState"] = None, ) -> "SamplingResult": decision = Decision.DROP if trace_id & self.TRACE_ID_LIMIT < self.bound: decision = Decision.RECORD_AND_SAMPLE if decision is Decision.DROP: attributes = None return SamplingResult( decision, attributes, _get_parent_trace_state(parent_context), ) def get_description(self) -> str: return f"TraceIdRatioBased{{{self._rate}}}" class ParentBased(Sampler): """ If a parent is set, applies the respective delegate sampler. Otherwise, uses the root provided at initialization to make a decision. Args: root: Sampler called for spans with no parent (root spans). remote_parent_sampled: Sampler called for a remote sampled parent. remote_parent_not_sampled: Sampler called for a remote parent that is not sampled. local_parent_sampled: Sampler called for a local sampled parent. local_parent_not_sampled: Sampler called for a local parent that is not sampled. """ def __init__( self, root: Sampler, remote_parent_sampled: Sampler = ALWAYS_ON, remote_parent_not_sampled: Sampler = ALWAYS_OFF, local_parent_sampled: Sampler = ALWAYS_ON, local_parent_not_sampled: Sampler = ALWAYS_OFF, ): self._root = root self._remote_parent_sampled = remote_parent_sampled self._remote_parent_not_sampled = remote_parent_not_sampled self._local_parent_sampled = local_parent_sampled self._local_parent_not_sampled = local_parent_not_sampled def should_sample( self, parent_context: Optional["Context"], trace_id: int, name: str, kind: Optional[SpanKind] = None, attributes: Attributes = None, links: Optional[Sequence["Link"]] = None, trace_state: Optional["TraceState"] = None, ) -> "SamplingResult": parent_span_context = get_current_span( parent_context ).get_span_context() # default to the root sampler sampler = self._root # respect the sampling and remote flag of the parent if present if parent_span_context is not None and parent_span_context.is_valid: if parent_span_context.is_remote: if parent_span_context.trace_flags.sampled: sampler = self._remote_parent_sampled else: sampler = self._remote_parent_not_sampled else: if parent_span_context.trace_flags.sampled: sampler = self._local_parent_sampled else: sampler = self._local_parent_not_sampled return sampler.should_sample( parent_context=parent_context, trace_id=trace_id, name=name, kind=kind, attributes=attributes, links=links, ) def get_description(self): return f"ParentBased{{root:{self._root.get_description()},remoteParentSampled:{self._remote_parent_sampled.get_description()},remoteParentNotSampled:{self._remote_parent_not_sampled.get_description()},localParentSampled:{self._local_parent_sampled.get_description()},localParentNotSampled:{self._local_parent_not_sampled.get_description()}}}" DEFAULT_OFF = ParentBased(ALWAYS_OFF) """Sampler that respects its parent span's sampling decision, but otherwise never samples.""" DEFAULT_ON = ParentBased(ALWAYS_ON) """Sampler that respects its parent span's sampling decision, but otherwise always samples.""" class ParentBasedTraceIdRatio(ParentBased): """ Sampler that respects its parent span's sampling decision, but otherwise samples probabilistically based on `rate`. """ def __init__(self, rate: float): root = TraceIdRatioBased(rate=rate) super().__init__(root=root) class _AlwaysOff(StaticSampler): def __init__(self, _): super().__init__(Decision.DROP) class _AlwaysOn(StaticSampler): def __init__(self, _): super().__init__(Decision.RECORD_AND_SAMPLE) class _ParentBasedAlwaysOff(ParentBased): def __init__(self, _): super().__init__(ALWAYS_OFF) class _ParentBasedAlwaysOn(ParentBased): def __init__(self, _): super().__init__(ALWAYS_ON) _KNOWN_SAMPLERS = { "always_on": ALWAYS_ON, "always_off": ALWAYS_OFF, "parentbased_always_on": DEFAULT_ON, "parentbased_always_off": DEFAULT_OFF, "traceidratio": TraceIdRatioBased, "parentbased_traceidratio": ParentBasedTraceIdRatio, } def _get_from_env_or_default() -> Sampler: trace_sampler = os.getenv( OTEL_TRACES_SAMPLER, "parentbased_always_on" ).lower() if trace_sampler not in _KNOWN_SAMPLERS: _logger.warning("Couldn't recognize sampler %s.", trace_sampler) trace_sampler = "parentbased_always_on" if trace_sampler in ("traceidratio", "parentbased_traceidratio"): try: rate = float(os.getenv(OTEL_TRACES_SAMPLER_ARG, "")) except (ValueError, TypeError): _logger.warning("Could not convert TRACES_SAMPLER_ARG to float.") rate = 1.0 return _KNOWN_SAMPLERS[trace_sampler](rate) return _KNOWN_SAMPLERS[trace_sampler] def _get_parent_trace_state( parent_context: Optional[Context], ) -> Optional["TraceState"]: parent_span_context = get_current_span(parent_context).get_span_context() if parent_span_context is None or not parent_span_context.is_valid: return None return parent_span_context.trace_state python-opentelemetry-1.39.1/opentelemetry-sdk/src/opentelemetry/sdk/util/000077500000000000000000000000001511654350100267155ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-sdk/src/opentelemetry/sdk/util/__init__.py000066400000000000000000000104621511654350100310310ustar00rootroot00000000000000# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import datetime import threading from collections import deque from collections.abc import MutableMapping, Sequence from typing import Optional from typing_extensions import deprecated def ns_to_iso_str(nanoseconds): """Get an ISO 8601 string from time_ns value.""" ts = datetime.datetime.fromtimestamp( nanoseconds / 1e9, tz=datetime.timezone.utc ) return ts.strftime("%Y-%m-%dT%H:%M:%S.%fZ") def get_dict_as_key(labels): """Converts a dict to be used as a unique key""" return tuple( sorted( map( lambda kv: ( (kv[0], tuple(kv[1])) if isinstance(kv[1], list) else kv ), labels.items(), ) ) ) class BoundedList(Sequence): """An append only list with a fixed max size. Calls to `append` and `extend` will drop the oldest elements if there is not enough room. """ def __init__(self, maxlen: Optional[int]): self.dropped = 0 self._dq = deque(maxlen=maxlen) # type: deque self._lock = threading.Lock() def __repr__(self): return f"{type(self).__name__}({list(self._dq)}, maxlen={self._dq.maxlen})" def __getitem__(self, index): return self._dq[index] def __len__(self): return len(self._dq) def __iter__(self): with self._lock: return iter(deque(self._dq)) def append(self, item): with self._lock: if ( self._dq.maxlen is not None and len(self._dq) == self._dq.maxlen ): self.dropped += 1 self._dq.append(item) def extend(self, seq): with self._lock: if self._dq.maxlen is not None: to_drop = len(seq) + len(self._dq) - self._dq.maxlen if to_drop > 0: self.dropped += to_drop self._dq.extend(seq) @classmethod def from_seq(cls, maxlen, seq): seq = tuple(seq) bounded_list = cls(maxlen) bounded_list.extend(seq) return bounded_list @deprecated("Deprecated since version 1.4.0.") class BoundedDict(MutableMapping): """An ordered dict with a fixed max capacity. Oldest elements are dropped when the dict is full and a new element is added. """ def __init__(self, maxlen: Optional[int]): if maxlen is not None: if not isinstance(maxlen, int): raise ValueError if maxlen < 0: raise ValueError self.maxlen = maxlen self.dropped = 0 self._dict = {} # type: dict self._lock = threading.Lock() # type: threading.Lock def __repr__(self): return ( f"{type(self).__name__}({dict(self._dict)}, maxlen={self.maxlen})" ) def __getitem__(self, key): return self._dict[key] def __setitem__(self, key, value): with self._lock: if self.maxlen is not None and self.maxlen == 0: self.dropped += 1 return if key in self._dict: del self._dict[key] elif self.maxlen is not None and len(self._dict) == self.maxlen: del self._dict[next(iter(self._dict.keys()))] self.dropped += 1 self._dict[key] = value def __delitem__(self, key): del self._dict[key] def __iter__(self): with self._lock: return iter(self._dict.copy()) def __len__(self): return len(self._dict) @classmethod def from_map(cls, maxlen, mapping): mapping = dict(mapping) bounded_dict = cls(maxlen) for key, value in mapping.items(): bounded_dict[key] = value return bounded_dict python-opentelemetry-1.39.1/opentelemetry-sdk/src/opentelemetry/sdk/util/__init__.pyi000066400000000000000000000044561511654350100312100ustar00rootroot00000000000000# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import ( Iterable, Iterator, Mapping, MutableMapping, Sequence, TypeVar, overload, ) from opentelemetry.util.types import AttributesAsKey, AttributeValue _T = TypeVar("_T") _KT = TypeVar("_KT") _VT = TypeVar("_VT") def ns_to_iso_str(nanoseconds: int) -> str: ... def get_dict_as_key( labels: Mapping[str, AttributeValue], ) -> AttributesAsKey: ... # pylint: disable=no-self-use class BoundedList(Sequence[_T]): """An append only list with a fixed max size. Calls to `append` and `extend` will drop the oldest elements if there is not enough room. """ dropped: int def __init__(self, maxlen: int): ... def insert(self, index: int, value: _T) -> None: ... @overload def __getitem__(self, i: int) -> _T: ... @overload def __getitem__(self, s: slice) -> Sequence[_T]: ... def __len__(self) -> int: ... def append(self, item: _T) -> None: ... def extend(self, seq: Sequence[_T]) -> None: ... @classmethod def from_seq(cls, maxlen: int, seq: Iterable[_T]) -> BoundedList[_T]: ... # pylint: disable=undefined-variable class BoundedDict(MutableMapping[_KT, _VT]): """An ordered dict with a fixed max capacity. Oldest elements are dropped when the dict is full and a new element is added. """ dropped: int def __init__(self, maxlen: int): ... def __getitem__(self, k: _KT) -> _VT: ... def __setitem__(self, k: _KT, v: _VT) -> None: ... def __delitem__(self, v: _KT) -> None: ... def __iter__(self) -> Iterator[_KT]: ... def __len__(self) -> int: ... @classmethod def from_map( cls, maxlen: int, mapping: Mapping[_KT, _VT] ) -> BoundedDict[_KT, _VT]: ... # pylint: disable=undefined-variable python-opentelemetry-1.39.1/opentelemetry-sdk/src/opentelemetry/sdk/util/instrumentation.py000066400000000000000000000114561511654350100325410ustar00rootroot00000000000000# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from json import dumps from typing import Optional from typing_extensions import deprecated from opentelemetry.attributes import BoundedAttributes from opentelemetry.util.types import Attributes, _ExtendedAttributes class InstrumentationInfo: """Immutable information about an instrumentation library module. See `opentelemetry.trace.TracerProvider.get_tracer` for the meaning of these properties. """ __slots__ = ("_name", "_version", "_schema_url") @deprecated( "You should use InstrumentationScope. Deprecated since version 1.11.1." ) def __init__( self, name: str, version: Optional[str] = None, schema_url: Optional[str] = None, ): self._name = name self._version = version if schema_url is None: schema_url = "" self._schema_url = schema_url def __repr__(self): return f"{type(self).__name__}({self._name}, {self._version}, {self._schema_url})" def __hash__(self): return hash((self._name, self._version, self._schema_url)) def __eq__(self, value): return type(value) is type(self) and ( self._name, self._version, self._schema_url, ) == (value._name, value._version, value._schema_url) def __lt__(self, value): if type(value) is not type(self): return NotImplemented return (self._name, self._version, self._schema_url) < ( value._name, value._version, value._schema_url, ) @property def schema_url(self) -> Optional[str]: return self._schema_url @property def version(self) -> Optional[str]: return self._version @property def name(self) -> str: return self._name class InstrumentationScope: """A logical unit of the application code with which the emitted telemetry can be associated. See `opentelemetry.trace.TracerProvider.get_tracer` for the meaning of these properties. """ __slots__ = ("_name", "_version", "_schema_url", "_attributes") def __init__( self, name: str, version: Optional[str] = None, schema_url: Optional[str] = None, attributes: Optional[_ExtendedAttributes] = None, ) -> None: self._name = name self._version = version if schema_url is None: schema_url = "" self._schema_url = schema_url self._attributes = BoundedAttributes(attributes=attributes) def __repr__(self) -> str: return f"{type(self).__name__}({self._name}, {self._version}, {self._schema_url}, {self._attributes})" def __hash__(self) -> int: return hash((self._name, self._version, self._schema_url)) def __eq__(self, value: object) -> bool: if not isinstance(value, InstrumentationScope): return NotImplemented return ( self._name, self._version, self._schema_url, self._attributes, ) == ( value._name, value._version, value._schema_url, value._attributes, ) def __lt__(self, value: object) -> bool: if not isinstance(value, InstrumentationScope): return NotImplemented return ( self._name, self._version, self._schema_url, self._attributes, ) < ( value._name, value._version, value._schema_url, value._attributes, ) @property def schema_url(self) -> Optional[str]: return self._schema_url @property def version(self) -> Optional[str]: return self._version @property def name(self) -> str: return self._name @property def attributes(self) -> Attributes: return self._attributes def to_json(self, indent: Optional[int] = 4) -> str: return dumps( { "name": self._name, "version": self._version, "schema_url": self._schema_url, "attributes": ( dict(self._attributes) if bool(self._attributes) else None ), }, indent=indent, ) python-opentelemetry-1.39.1/opentelemetry-sdk/src/opentelemetry/sdk/version/000077500000000000000000000000001511654350100274255ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-sdk/src/opentelemetry/sdk/version/__init__.py000066400000000000000000000011401511654350100315320ustar00rootroot00000000000000# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. __version__ = "1.39.1" python-opentelemetry-1.39.1/opentelemetry-sdk/test-requirements.txt000066400000000000000000000005401511654350100257340ustar00rootroot00000000000000asgiref==3.7.2 flaky==3.7.0 importlib-metadata==6.11.0 iniconfig==2.0.0 packaging==24.0 pluggy==1.5.0 psutil==5.9.6; sys_platform != 'win32' py-cpuinfo==9.0.0 pytest==7.4.4 tomli==2.0.1 typing_extensions==4.10.0 wrapt==1.16.0 zipp==3.19.2 -e tests/opentelemetry-test-utils -e opentelemetry-api -e opentelemetry-semantic-conventions -e opentelemetry-sdkpython-opentelemetry-1.39.1/opentelemetry-sdk/tests/000077500000000000000000000000001511654350100226365ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-sdk/tests/__init__.py000066400000000000000000000011101511654350100247400ustar00rootroot00000000000000# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. python-opentelemetry-1.39.1/opentelemetry-sdk/tests/conftest.py000066400000000000000000000021241511654350100250340ustar00rootroot00000000000000# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import random from os import environ import pytest from opentelemetry.environment_variables import OTEL_PYTHON_CONTEXT def pytest_sessionstart(session): # pylint: disable=unused-argument environ[OTEL_PYTHON_CONTEXT] = "contextvars_context" def pytest_sessionfinish(session): # pylint: disable=unused-argument environ.pop(OTEL_PYTHON_CONTEXT) @pytest.fixture(autouse=True) def random_seed(): # We use random numbers a lot in sampling tests, make sure they are always the same. random.seed(0) python-opentelemetry-1.39.1/opentelemetry-sdk/tests/context/000077500000000000000000000000001511654350100243225ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-sdk/tests/context/__init__.py000066400000000000000000000000001511654350100264210ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-sdk/tests/context/test_asyncio.py000066400000000000000000000065001511654350100274010ustar00rootroot00000000000000# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import asyncio import unittest from unittest.mock import patch from opentelemetry import context from opentelemetry.context.contextvars_context import ContextVarsRuntimeContext from opentelemetry.sdk import trace from opentelemetry.sdk.trace import export from opentelemetry.sdk.trace.export.in_memory_span_exporter import ( InMemorySpanExporter, ) _SPAN_NAMES = [ "test_span1", "test_span2", "test_span3", "test_span4", "test_span5", ] def stop_loop_when(loop, cond_func, timeout=5.0): """Registers a periodic callback that stops the loop when cond_func() == True. Compatible with both Tornado and asyncio. """ if cond_func() or timeout <= 0.0: loop.stop() return timeout -= 0.1 loop.call_later(0.1, stop_loop_when, loop, cond_func, timeout) class TestAsyncio(unittest.TestCase): async def task(self, name): with self.tracer.start_as_current_span(name): context.set_value("say", "bar") def submit_another_task(self, name): self.loop.create_task(self.task(name)) def setUp(self): self.token = context.attach(context.Context()) self.tracer_provider = trace.TracerProvider() self.tracer = self.tracer_provider.get_tracer(__name__) self.memory_exporter = InMemorySpanExporter() span_processor = export.SimpleSpanProcessor(self.memory_exporter) self.tracer_provider.add_span_processor(span_processor) self.loop = asyncio.get_event_loop() def tearDown(self): context.detach(self.token) @patch( "opentelemetry.context._RUNTIME_CONTEXT", ContextVarsRuntimeContext() ) def test_with_asyncio(self): with self.tracer.start_as_current_span("asyncio_test"): for name in _SPAN_NAMES: self.submit_another_task(name) stop_loop_when( self.loop, lambda: len(self.memory_exporter.get_finished_spans()) >= 5, timeout=5.0, ) self.loop.run_forever() span_list = self.memory_exporter.get_finished_spans() span_names_list = [span.name for span in span_list] expected = [ "test_span1", "test_span2", "test_span3", "test_span4", "test_span5", "asyncio_test", ] self.assertCountEqual(span_names_list, expected) span_names_list.sort() expected.sort() self.assertListEqual(span_names_list, expected) expected_parent = next( span for span in span_list if span.name == "asyncio_test" ) for span in span_list: if span is expected_parent: continue self.assertEqual(span.parent, expected_parent.context) python-opentelemetry-1.39.1/opentelemetry-sdk/tests/error_handler/000077500000000000000000000000001511654350100254645ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-sdk/tests/error_handler/__init__.py000066400000000000000000000000001511654350100275630ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-sdk/tests/error_handler/test_error_handler.py000066400000000000000000000102521511654350100317230ustar00rootroot00000000000000# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from logging import ERROR from unittest import TestCase from unittest.mock import Mock, patch from opentelemetry.sdk.error_handler import ( ErrorHandler, GlobalErrorHandler, logger, ) class TestErrorHandler(TestCase): @patch("opentelemetry.sdk.error_handler.entry_points") def test_default_error_handler(self, mock_entry_points): with self.assertLogs(logger, ERROR): with GlobalErrorHandler(): # pylint: disable=broad-exception-raised raise Exception("some exception") # pylint: disable=no-self-use @patch("opentelemetry.sdk.error_handler.entry_points") def test_plugin_error_handler(self, mock_entry_points): class ZeroDivisionErrorHandler(ErrorHandler, ZeroDivisionError): # pylint: disable=arguments-differ _handle = Mock() class AssertionErrorHandler(ErrorHandler, AssertionError): # pylint: disable=arguments-differ _handle = Mock() mock_entry_point_zero_division_error_handler = Mock() mock_entry_point_zero_division_error_handler.configure_mock( **{"load.return_value": ZeroDivisionErrorHandler} ) mock_entry_point_assertion_error_handler = Mock() mock_entry_point_assertion_error_handler.configure_mock( **{"load.return_value": AssertionErrorHandler} ) mock_entry_points.configure_mock( **{ "return_value": [ mock_entry_point_zero_division_error_handler, mock_entry_point_assertion_error_handler, ] } ) error = ZeroDivisionError() with GlobalErrorHandler(): raise error # pylint: disable=protected-access ZeroDivisionErrorHandler._handle.assert_called_with(error) error = AssertionError() with GlobalErrorHandler(): raise error AssertionErrorHandler._handle.assert_called_with(error) @patch("opentelemetry.sdk.error_handler.entry_points") def test_error_in_handler(self, mock_entry_points): class ErrorErrorHandler(ErrorHandler, ZeroDivisionError): # pylint: disable=arguments-differ def _handle(self, error: Exception): assert False mock_entry_point_error_error_handler = Mock() mock_entry_point_error_error_handler.configure_mock( **{"load.return_value": ErrorErrorHandler} ) mock_entry_points.configure_mock( **{"return_value": [mock_entry_point_error_error_handler]} ) error = ZeroDivisionError() with self.assertLogs(logger, ERROR): with GlobalErrorHandler(): raise error # pylint: disable=no-self-use @patch("opentelemetry.sdk.error_handler.entry_points") def test_plugin_error_handler_context_manager(self, mock_entry_points): mock_error_handler_instance = Mock() class MockErrorHandlerClass(IndexError): def __new__(cls): return mock_error_handler_instance mock_entry_point_error_handler = Mock() mock_entry_point_error_handler.configure_mock( **{"load.return_value": MockErrorHandlerClass} ) mock_entry_points.configure_mock( **{"return_value": [mock_entry_point_error_handler]} ) error = IndexError() with GlobalErrorHandler(): raise error with GlobalErrorHandler(): pass # pylint: disable=protected-access mock_error_handler_instance._handle.assert_called_once_with(error) python-opentelemetry-1.39.1/opentelemetry-sdk/tests/events/000077500000000000000000000000001511654350100241425ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-sdk/tests/events/__init__.py000066400000000000000000000011101511654350100262440ustar00rootroot00000000000000# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. python-opentelemetry-1.39.1/opentelemetry-sdk/tests/events/test_events.py000066400000000000000000000160241511654350100270620ustar00rootroot00000000000000# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # pylint: disable=protected-access,no-self-use import unittest from unittest.mock import Mock, patch from opentelemetry._events import Event from opentelemetry._logs import SeverityNumber, set_logger_provider from opentelemetry.sdk._events import EventLoggerProvider from opentelemetry.sdk._logs import LoggerProvider from opentelemetry.sdk._logs._internal import Logger, NoOpLogger from opentelemetry.sdk.environment_variables import OTEL_SDK_DISABLED class TestEventLoggerProvider(unittest.TestCase): def test_event_logger_provider(self): logger_provider = LoggerProvider() event_logger_provider = EventLoggerProvider( logger_provider=logger_provider ) self.assertEqual( event_logger_provider._logger_provider, logger_provider, ) def test_event_logger_provider_default(self): logger_provider = LoggerProvider() set_logger_provider(logger_provider) event_logger_provider = EventLoggerProvider() self.assertEqual( event_logger_provider._logger_provider, logger_provider, ) def test_get_event_logger(self): logger_provider = LoggerProvider() event_logger = EventLoggerProvider(logger_provider).get_event_logger( "name", version="version", schema_url="schema_url", attributes={"key": "value"}, ) self.assertTrue( event_logger._logger, Logger, ) logger = event_logger._logger self.assertEqual(logger._instrumentation_scope.name, "name") self.assertEqual(logger._instrumentation_scope.version, "version") self.assertEqual( logger._instrumentation_scope.schema_url, "schema_url" ) self.assertEqual( logger._instrumentation_scope.attributes, {"key": "value"} ) @patch.dict("os.environ", {OTEL_SDK_DISABLED: "true"}) def test_get_event_logger_with_sdk_disabled(self): logger_provider = LoggerProvider() event_logger = EventLoggerProvider(logger_provider).get_event_logger( "name", version="version", schema_url="schema_url", attributes={"key": "value"}, ) self.assertIsInstance(event_logger._logger, NoOpLogger) def test_force_flush(self): logger_provider = Mock() event_logger = EventLoggerProvider(logger_provider) event_logger.force_flush(1000) logger_provider.force_flush.assert_called_once_with(1000) def test_shutdown(self): logger_provider = Mock() event_logger = EventLoggerProvider(logger_provider) event_logger.shutdown() logger_provider.shutdown.assert_called_once() @patch("opentelemetry.sdk._logs._internal.LoggerProvider.get_logger") def test_event_logger(self, logger_mock): logger_provider = LoggerProvider() logger_mock_inst = Mock() logger_mock.return_value = logger_mock_inst EventLoggerProvider(logger_provider).get_event_logger( "name", version="version", schema_url="schema_url", attributes={"key": "value"}, ) logger_mock.assert_called_once_with( "name", "version", "schema_url", {"key": "value"} ) @patch("opentelemetry.sdk._events.LogRecord") @patch("opentelemetry.sdk._logs._internal.LoggerProvider.get_logger") def test_event_logger_emit(self, logger_mock, log_record_mock): logger_provider = LoggerProvider() logger_mock_inst = Mock() logger_mock.return_value = logger_mock_inst event_logger = EventLoggerProvider(logger_provider).get_event_logger( "name", version="version", schema_url="schema_url", attributes={"key": "value"}, ) logger_mock.assert_called_once_with( "name", "version", "schema_url", {"key": "value"} ) now = Mock() trace_id = Mock() span_id = Mock() trace_flags = Mock() event = Event( name="test_event", timestamp=now, trace_id=trace_id, span_id=span_id, trace_flags=trace_flags, body="test body", severity_number=SeverityNumber.ERROR, attributes={ "key": "val", "foo": "bar", "event.name": "not this one", }, ) log_record_mock_inst = Mock() log_record_mock.return_value = log_record_mock_inst event_logger.emit(event) log_record_mock.assert_called_once_with( timestamp=now, observed_timestamp=None, trace_id=trace_id, span_id=span_id, trace_flags=trace_flags, severity_text=None, severity_number=SeverityNumber.ERROR, body="test body", attributes={ "key": "val", "foo": "bar", "event.name": "test_event", }, ) logger_mock_inst.emit.assert_called_once_with(log_record_mock_inst) @patch("opentelemetry.sdk._events.LogRecord") @patch("opentelemetry.sdk._logs._internal.LoggerProvider.get_logger") def test_event_logger_emit_sdk_disabled( self, logger_mock, log_record_mock ): logger_provider = LoggerProvider() logger_mock_inst = Mock(spec=NoOpLogger) logger_mock.return_value = logger_mock_inst event_logger = EventLoggerProvider(logger_provider).get_event_logger( "name", version="version", schema_url="schema_url", attributes={"key": "value"}, ) logger_mock.assert_called_once_with( "name", "version", "schema_url", {"key": "value"} ) now = Mock() trace_id = Mock() span_id = Mock() trace_flags = Mock() event = Event( name="test_event", timestamp=now, trace_id=trace_id, span_id=span_id, trace_flags=trace_flags, body="test body", severity_number=SeverityNumber.ERROR, attributes={ "key": "val", "foo": "bar", "event.name": "not this one", }, ) log_record_mock_inst = Mock() log_record_mock.return_value = log_record_mock_inst event_logger.emit(event) logger_mock_inst.emit.assert_not_called() python-opentelemetry-1.39.1/opentelemetry-sdk/tests/logs/000077500000000000000000000000001511654350100236025ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-sdk/tests/logs/__init__.py000066400000000000000000000011101511654350100257040ustar00rootroot00000000000000# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. python-opentelemetry-1.39.1/opentelemetry-sdk/tests/logs/test_export.py000066400000000000000000000607541511654350100265500ustar00rootroot00000000000000# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # pylint: disable=protected-access import logging import os import time import unittest from concurrent.futures import ThreadPoolExecutor from sys import version_info from unittest.mock import Mock, patch from pytest import mark from opentelemetry._logs import LogRecord, SeverityNumber from opentelemetry.sdk import trace from opentelemetry.sdk._logs import ( LoggerProvider, LoggingHandler, ReadableLogRecord, ReadWriteLogRecord, ) from opentelemetry.sdk._logs._internal.export import _logger from opentelemetry.sdk._logs.export import ( BatchLogRecordProcessor, ConsoleLogRecordExporter, InMemoryLogRecordExporter, SimpleLogRecordProcessor, ) from opentelemetry.sdk.environment_variables import ( OTEL_BLRP_EXPORT_TIMEOUT, OTEL_BLRP_MAX_EXPORT_BATCH_SIZE, OTEL_BLRP_MAX_QUEUE_SIZE, OTEL_BLRP_SCHEDULE_DELAY, ) from opentelemetry.sdk.resources import Resource as SDKResource from opentelemetry.sdk.util.instrumentation import InstrumentationScope from opentelemetry.trace import ( NonRecordingSpan, SpanContext, TraceFlags, set_span_in_context, ) from opentelemetry.trace.span import INVALID_SPAN_CONTEXT EMPTY_LOG = ReadWriteLogRecord( log_record=LogRecord(), instrumentation_scope=InstrumentationScope("example", "example"), ) class TestSimpleLogRecordProcessor(unittest.TestCase): def test_simple_log_record_processor_default_level(self): exporter = InMemoryLogRecordExporter() logger_provider = LoggerProvider() logger_provider.add_log_record_processor( SimpleLogRecordProcessor(exporter) ) logger = logging.getLogger("default_level") logger.propagate = False logger.addHandler(LoggingHandler(logger_provider=logger_provider)) logger.warning("Something is wrong") finished_logs = exporter.get_finished_logs() self.assertEqual(len(finished_logs), 1) warning_log_record = finished_logs[0] self.assertEqual( warning_log_record.log_record.body, "Something is wrong" ) self.assertEqual(warning_log_record.log_record.severity_text, "WARN") self.assertEqual( warning_log_record.log_record.severity_number, SeverityNumber.WARN ) self.assertEqual( finished_logs[0].instrumentation_scope.name, "default_level" ) def test_simple_log_record_processor_custom_level(self): exporter = InMemoryLogRecordExporter() logger_provider = LoggerProvider() logger_provider.add_log_record_processor( SimpleLogRecordProcessor(exporter) ) logger = logging.getLogger("custom_level") logger.propagate = False logger.setLevel(logging.ERROR) logger.addHandler(LoggingHandler(logger_provider=logger_provider)) logger.warning("Warning message") logger.debug("Debug message") logger.error("Error message") logger.critical("Critical message") finished_logs = exporter.get_finished_logs() # Make sure only level >= logging.CRITICAL logs are recorded self.assertEqual(len(finished_logs), 2) critical_log_record = finished_logs[0] fatal_log_record = finished_logs[1] self.assertEqual(critical_log_record.log_record.body, "Error message") self.assertEqual(critical_log_record.log_record.severity_text, "ERROR") self.assertEqual( critical_log_record.log_record.severity_number, SeverityNumber.ERROR, ) self.assertEqual(fatal_log_record.log_record.body, "Critical message") self.assertEqual(fatal_log_record.log_record.severity_text, "CRITICAL") self.assertEqual( fatal_log_record.log_record.severity_number, SeverityNumber.FATAL ) self.assertEqual( finished_logs[0].instrumentation_scope.name, "custom_level" ) self.assertEqual( finished_logs[1].instrumentation_scope.name, "custom_level" ) def test_simple_log_record_processor_trace_correlation(self): exporter = InMemoryLogRecordExporter() logger_provider = LoggerProvider() logger_provider.add_log_record_processor( SimpleLogRecordProcessor(exporter) ) logger = logging.getLogger("trace_correlation") logger.propagate = False logger.addHandler(LoggingHandler(logger_provider=logger_provider)) logger.warning("Warning message") finished_logs = exporter.get_finished_logs() self.assertEqual(len(finished_logs), 1) sdk_record = finished_logs[0] self.assertEqual(sdk_record.log_record.body, "Warning message") self.assertEqual(sdk_record.log_record.severity_text, "WARN") self.assertEqual( sdk_record.log_record.severity_number, SeverityNumber.WARN ) self.assertEqual( sdk_record.log_record.trace_id, INVALID_SPAN_CONTEXT.trace_id ) self.assertEqual( sdk_record.log_record.span_id, INVALID_SPAN_CONTEXT.span_id ) self.assertEqual( sdk_record.log_record.trace_flags, INVALID_SPAN_CONTEXT.trace_flags ) self.assertEqual( finished_logs[0].instrumentation_scope.name, "trace_correlation" ) exporter.clear() tracer = trace.TracerProvider().get_tracer(__name__) with tracer.start_as_current_span("test") as span: logger.critical("Critical message within span") finished_logs = exporter.get_finished_logs() sdk_record = finished_logs[0] self.assertEqual( sdk_record.log_record.body, "Critical message within span" ) self.assertEqual(sdk_record.log_record.severity_text, "CRITICAL") self.assertEqual( sdk_record.log_record.severity_number, SeverityNumber.FATAL ) self.assertEqual( finished_logs[0].instrumentation_scope.name, "trace_correlation", ) span_context = span.get_span_context() self.assertEqual( sdk_record.log_record.trace_id, span_context.trace_id ) self.assertEqual( sdk_record.log_record.span_id, span_context.span_id ) self.assertEqual( sdk_record.log_record.trace_flags, span_context.trace_flags ) def test_simple_log_record_processor_shutdown(self): exporter = InMemoryLogRecordExporter() logger_provider = LoggerProvider() logger_provider.add_log_record_processor( SimpleLogRecordProcessor(exporter) ) logger = logging.getLogger("shutdown") logger.propagate = False logger.addHandler(LoggingHandler(logger_provider=logger_provider)) logger.warning("Something is wrong") finished_logs = exporter.get_finished_logs() self.assertEqual(len(finished_logs), 1) warning_log_record = finished_logs[0] self.assertEqual( warning_log_record.log_record.body, "Something is wrong" ) self.assertEqual(warning_log_record.log_record.severity_text, "WARN") self.assertEqual( warning_log_record.log_record.severity_number, SeverityNumber.WARN ) self.assertEqual( finished_logs[0].instrumentation_scope.name, "shutdown" ) exporter.clear() logger_provider.shutdown() logger.warning("Log after shutdown") finished_logs = exporter.get_finished_logs() self.assertEqual(len(finished_logs), 0) def test_simple_log_record_processor_different_msg_types(self): exporter = InMemoryLogRecordExporter() log_record_processor = BatchLogRecordProcessor(exporter) provider = LoggerProvider() provider.add_log_record_processor(log_record_processor) logger = logging.getLogger("different_msg_types") logger.addHandler(LoggingHandler(logger_provider=provider)) logger.warning("warning message: %s", "possible upcoming heatwave") logger.error("Very high rise in temperatures across the globe") logger.critical("Temperature hits high 420 C in Hyderabad") logger.warning(["list", "of", "strings"]) logger.error({"key": "value"}) log_record_processor.shutdown() finished_logs = exporter.get_finished_logs() expected = [ ("warning message: possible upcoming heatwave", "WARN"), ("Very high rise in temperatures across the globe", "ERROR"), ( "Temperature hits high 420 C in Hyderabad", "CRITICAL", ), (["list", "of", "strings"], "WARN"), ({"key": "value"}, "ERROR"), ] emitted = [ (item.log_record.body, item.log_record.severity_text) for item in finished_logs ] self.assertEqual(expected, emitted) for item in finished_logs: self.assertEqual( item.instrumentation_scope.name, "different_msg_types" ) def test_simple_log_record_processor_custom_single_obj(self): """ Tests that special-case handling for logging a single non-string object is correctly applied. """ exporter = InMemoryLogRecordExporter() log_record_processor = BatchLogRecordProcessor(exporter) provider = LoggerProvider() provider.add_log_record_processor(log_record_processor) logger = logging.getLogger("single_obj") logger.addHandler(LoggingHandler(logger_provider=provider)) # NOTE: the behaviour of `record.getMessage` is detailed in the # `logging.Logger.debug` documentation: # > The msg is the message format string, and the args are the arguments # > which are merged into msg using the string formatting operator. [...] # > No % formatting operation is performed on msg when no args are supplied. # This test uses the presence of '%s' in the first arg to determine if # formatting was applied # string msg with no args - getMessage bypasses formatting and sets the string directly logger.warning("a string with a percent-s: %s") # string msg with args - getMessage formats args into the msg logger.warning("a string with a percent-s: %s", "and arg") # non-string msg with args - getMessage stringifies msg and formats args into it logger.warning(["a non-string with a percent-s", "%s"], "and arg") # non-string msg with no args: # - normally getMessage would stringify the object and bypass formatting # - SPECIAL CASE: bypass stringification as well to keep the raw object logger.warning(["a non-string with a percent-s", "%s"]) log_record_processor.shutdown() finished_logs = exporter.get_finished_logs() expected = [ ("a string with a percent-s: %s"), ("a string with a percent-s: and arg"), ("['a non-string with a percent-s', 'and arg']"), (["a non-string with a percent-s", "%s"]), ] for emitted, expected in zip(finished_logs, expected): self.assertEqual(emitted.log_record.body, expected) self.assertEqual(emitted.instrumentation_scope.name, "single_obj") def test_simple_log_record_processor_different_msg_types_with_formatter( self, ): exporter = InMemoryLogRecordExporter() log_record_processor = BatchLogRecordProcessor(exporter) provider = LoggerProvider() provider.add_log_record_processor(log_record_processor) logger = logging.getLogger("different_msg_types") handler = LoggingHandler(logger_provider=provider) handler.setFormatter( logging.Formatter("%(name)s - %(levelname)s - %(message)s") ) logger.addHandler(handler) logger.warning("warning message: %s", "possible upcoming heatwave") logger.error("Very high rise in temperatures across the globe") logger.critical("Temperature hits high 420 C in Hyderabad") logger.warning(["list", "of", "strings"]) logger.error({"key": "value"}) log_record_processor.shutdown() finished_logs = exporter.get_finished_logs() expected = [ ( "different_msg_types - WARNING - warning message: possible upcoming heatwave", "WARN", ), ( "different_msg_types - ERROR - Very high rise in temperatures across the globe", "ERROR", ), ( "different_msg_types - CRITICAL - Temperature hits high 420 C in Hyderabad", "CRITICAL", ), ( "different_msg_types - WARNING - ['list', 'of', 'strings']", "WARN", ), ("different_msg_types - ERROR - {'key': 'value'}", "ERROR"), ] emitted = [ (item.log_record.body, item.log_record.severity_text) for item in finished_logs ] self.assertEqual(expected, emitted) # Many more test cases for the BatchLogRecordProcessor exist under # opentelemetry-sdk/tests/shared_internal/test_batch_processor.py. # Important: make sure to call .shutdown() on the BatchLogRecordProcessor # before the end of the test, otherwise the worker thread will continue # to run after the end of the test. class TestBatchLogRecordProcessor(unittest.TestCase): def test_emit_call_log_record(self): exporter = InMemoryLogRecordExporter() log_record_processor = Mock(wraps=BatchLogRecordProcessor(exporter)) provider = LoggerProvider() provider.add_log_record_processor(log_record_processor) logger = logging.getLogger("emit_call") logger.propagate = False logger.addHandler(LoggingHandler(logger_provider=provider)) logger.error("error") self.assertEqual(log_record_processor.on_emit.call_count, 1) log_record_processor.shutdown() def test_with_multiple_threads(self): # pylint: disable=no-self-use exporter = InMemoryLogRecordExporter() batch_processor = BatchLogRecordProcessor( exporter, max_queue_size=3000, max_export_batch_size=50, schedule_delay_millis=30000, export_timeout_millis=500, ) def bulk_emit(num_emit): for _ in range(num_emit): batch_processor.on_emit(EMPTY_LOG) total_expected_logs = 0 with ThreadPoolExecutor(max_workers=69) as executor: for num_logs_to_emit in range(1, 70): executor.submit(bulk_emit, num_logs_to_emit) total_expected_logs += num_logs_to_emit executor.shutdown() batch_processor.shutdown() # Wait a bit for logs to flush. time.sleep(2) assert len(exporter.get_finished_logs()) == total_expected_logs @mark.skipif( version_info < (3, 10), reason="assertNoLogs only exists in python 3.10+.", ) def test_logging_lib_not_invoked_in_batch_log_record_emit(self): # pylint: disable=no-self-use # See https://github.com/open-telemetry/opentelemetry-python/issues/4261 exporter = Mock() processor = BatchLogRecordProcessor(exporter) logger_provider = LoggerProvider( resource=SDKResource.create( { "service.name": "shoppingcart", "service.instance.id": "instance-12", } ), ) logger_provider.add_log_record_processor(processor) handler = LoggingHandler( level=logging.INFO, logger_provider=logger_provider ) sdk_logger = logging.getLogger("opentelemetry.sdk") # Attach OTLP handler to SDK logger sdk_logger.addHandler(handler) # If `emit` calls logging.log then this test will throw a maximum recursion depth exceeded exception and fail. try: with self.assertNoLogs(sdk_logger, logging.NOTSET): processor.on_emit(EMPTY_LOG) processor.shutdown() with self.assertNoLogs(sdk_logger, logging.NOTSET): processor.on_emit(EMPTY_LOG) finally: sdk_logger.removeHandler(handler) def test_args(self): exporter = InMemoryLogRecordExporter() log_record_processor = BatchLogRecordProcessor( exporter, max_queue_size=1024, schedule_delay_millis=2500, max_export_batch_size=256, export_timeout_millis=15000, ) self.assertEqual( log_record_processor._batch_processor._exporter, exporter ) self.assertEqual( log_record_processor._batch_processor._max_queue_size, 1024 ) self.assertEqual( log_record_processor._batch_processor._schedule_delay, 2.5 ) self.assertEqual( log_record_processor._batch_processor._max_export_batch_size, 256 ) self.assertEqual( log_record_processor._batch_processor._export_timeout_millis, 15000 ) log_record_processor.shutdown() @patch.dict( "os.environ", { OTEL_BLRP_MAX_QUEUE_SIZE: "1024", OTEL_BLRP_SCHEDULE_DELAY: "2500", OTEL_BLRP_MAX_EXPORT_BATCH_SIZE: "256", OTEL_BLRP_EXPORT_TIMEOUT: "15000", }, ) def test_env_vars(self): exporter = InMemoryLogRecordExporter() log_record_processor = BatchLogRecordProcessor(exporter) self.assertEqual( log_record_processor._batch_processor._exporter, exporter ) self.assertEqual( log_record_processor._batch_processor._max_queue_size, 1024 ) self.assertEqual( log_record_processor._batch_processor._schedule_delay, 2.5 ) self.assertEqual( log_record_processor._batch_processor._max_export_batch_size, 256 ) self.assertEqual( log_record_processor._batch_processor._export_timeout_millis, 15000 ) log_record_processor.shutdown() def test_args_defaults(self): exporter = InMemoryLogRecordExporter() log_record_processor = BatchLogRecordProcessor(exporter) self.assertEqual( log_record_processor._batch_processor._exporter, exporter ) self.assertEqual( log_record_processor._batch_processor._max_queue_size, 2048 ) self.assertEqual( log_record_processor._batch_processor._schedule_delay, 5 ) self.assertEqual( log_record_processor._batch_processor._max_export_batch_size, 512 ) self.assertEqual( log_record_processor._batch_processor._export_timeout_millis, 30000 ) log_record_processor.shutdown() @patch.dict( "os.environ", { OTEL_BLRP_MAX_QUEUE_SIZE: "a", OTEL_BLRP_SCHEDULE_DELAY: " ", OTEL_BLRP_MAX_EXPORT_BATCH_SIZE: "One", OTEL_BLRP_EXPORT_TIMEOUT: "@", }, ) def test_args_env_var_value_error(self): exporter = InMemoryLogRecordExporter() _logger.disabled = True log_record_processor = BatchLogRecordProcessor(exporter) _logger.disabled = False self.assertEqual( log_record_processor._batch_processor._exporter, exporter ) self.assertEqual( log_record_processor._batch_processor._max_queue_size, 2048 ) self.assertEqual( log_record_processor._batch_processor._schedule_delay, 5 ) self.assertEqual( log_record_processor._batch_processor._max_export_batch_size, 512 ) self.assertEqual( log_record_processor._batch_processor._export_timeout_millis, 30000 ) log_record_processor.shutdown() def test_args_none_defaults(self): exporter = InMemoryLogRecordExporter() log_record_processor = BatchLogRecordProcessor( exporter, max_queue_size=None, schedule_delay_millis=None, max_export_batch_size=None, export_timeout_millis=None, ) self.assertEqual( log_record_processor._batch_processor._exporter, exporter ) self.assertEqual( log_record_processor._batch_processor._max_queue_size, 2048 ) self.assertEqual( log_record_processor._batch_processor._schedule_delay, 5 ) self.assertEqual( log_record_processor._batch_processor._max_export_batch_size, 512 ) self.assertEqual( log_record_processor._batch_processor._export_timeout_millis, 30000 ) log_record_processor.shutdown() def test_validation_negative_max_queue_size(self): exporter = InMemoryLogRecordExporter() self.assertRaises( ValueError, BatchLogRecordProcessor, exporter, max_queue_size=0, ) self.assertRaises( ValueError, BatchLogRecordProcessor, exporter, max_queue_size=-1, ) self.assertRaises( ValueError, BatchLogRecordProcessor, exporter, schedule_delay_millis=0, ) self.assertRaises( ValueError, BatchLogRecordProcessor, exporter, schedule_delay_millis=-1, ) self.assertRaises( ValueError, BatchLogRecordProcessor, exporter, max_export_batch_size=0, ) self.assertRaises( ValueError, BatchLogRecordProcessor, exporter, max_export_batch_size=-1, ) self.assertRaises( ValueError, BatchLogRecordProcessor, exporter, max_queue_size=100, max_export_batch_size=101, ) class TestConsoleLogExporter(unittest.TestCase): def test_export(self): # pylint: disable=no-self-use """Check that the console exporter prints log records.""" ctx = set_span_in_context( NonRecordingSpan( SpanContext( 2604504634922341076776623263868986797, 5213367945872657620, False, TraceFlags(0x01), ) ) ) log_record = ReadableLogRecord( LogRecord( timestamp=int(time.time() * 1e9), context=ctx, severity_text="WARN", severity_number=SeverityNumber.WARN, body="Zhengzhou, We have a heaviest rains in 1000 years", attributes={"a": 1, "b": "c"}, ), resource=SDKResource({"key": "value"}), instrumentation_scope=InstrumentationScope( "first_name", "first_version" ), ) exporter = ConsoleLogRecordExporter() # Mocking stdout interferes with debugging and test reporting, mock on # the exporter instance instead. with patch.object(exporter, "out") as mock_stdout: exporter.export([log_record]) mock_stdout.write.assert_called_once_with( log_record.to_json() + os.linesep ) self.assertEqual(mock_stdout.write.call_count, 1) self.assertEqual(mock_stdout.flush.call_count, 1) def test_export_custom(self): # pylint: disable=no-self-use """Check that console exporter uses custom io, formatter.""" mock_record_str = Mock(str) def formatter(record): # pylint: disable=unused-argument return mock_record_str mock_stdout = Mock() exporter = ConsoleLogRecordExporter( out=mock_stdout, formatter=formatter ) exporter.export([EMPTY_LOG]) mock_stdout.write.assert_called_once_with(mock_record_str) python-opentelemetry-1.39.1/opentelemetry-sdk/tests/logs/test_handler.py000066400000000000000000000477261511654350100266500ustar00rootroot00000000000000# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging import os import unittest from unittest.mock import Mock, patch from opentelemetry._logs import NoOpLoggerProvider, SeverityNumber from opentelemetry._logs import get_logger as APIGetLogger from opentelemetry.attributes import BoundedAttributes from opentelemetry.sdk import trace from opentelemetry.sdk._logs import ( LoggerProvider, LoggingHandler, LogRecordProcessor, ReadableLogRecord, ) from opentelemetry.sdk.environment_variables import OTEL_ATTRIBUTE_COUNT_LIMIT from opentelemetry.semconv._incubating.attributes import code_attributes from opentelemetry.semconv.attributes import exception_attributes from opentelemetry.trace import ( INVALID_SPAN_CONTEXT, set_span_in_context, ) # pylint: disable=too-many-public-methods class TestLoggingHandler(unittest.TestCase): def test_handler_default_log_level(self): processor, logger = set_up_test_logging(logging.NOTSET) # Make sure debug messages are ignored by default logger.debug("Debug message") assert processor.emit_count() == 0 # Assert emit gets called for warning message with self.assertLogs(level=logging.WARNING): logger.warning("Warning message") self.assertEqual(processor.emit_count(), 1) def test_handler_custom_log_level(self): processor, logger = set_up_test_logging(logging.ERROR) with self.assertLogs(level=logging.WARNING): logger.warning("Warning message test custom log level") # Make sure any log with level < ERROR is ignored assert processor.emit_count() == 0 with self.assertLogs(level=logging.ERROR): logger.error("Mumbai, we have a major problem") with self.assertLogs(level=logging.CRITICAL): logger.critical("No Time For Caution") self.assertEqual(processor.emit_count(), 2) # pylint: disable=protected-access def test_log_record_emit_noop(self): noop_logger_provder = NoOpLoggerProvider() logger_mock = APIGetLogger( __name__, logger_provider=noop_logger_provder ) logger = logging.getLogger(__name__) handler_mock = Mock(spec=LoggingHandler) handler_mock._logger = logger_mock handler_mock.level = logging.WARNING logger.addHandler(handler_mock) with self.assertLogs(level=logging.WARNING): logger.warning("Warning message") def test_log_flush_noop(self): no_op_logger_provider = NoOpLoggerProvider() no_op_logger_provider.force_flush = Mock() logger = logging.getLogger("foo") handler = LoggingHandler( level=logging.NOTSET, logger_provider=no_op_logger_provider ) logger.addHandler(handler) with self.assertLogs(level=logging.WARNING): logger.warning("Warning message") logger.handlers[0].flush() no_op_logger_provider.force_flush.assert_not_called() def test_log_record_no_span_context(self): processor, logger = set_up_test_logging(logging.WARNING) # Assert emit gets called for warning message with self.assertLogs(level=logging.WARNING): logger.warning("Warning message") record = processor.get_log_record(0) self.assertIsNotNone(record) self.assertEqual( record.log_record.trace_id, INVALID_SPAN_CONTEXT.trace_id ) self.assertEqual( record.log_record.span_id, INVALID_SPAN_CONTEXT.span_id ) self.assertEqual( record.log_record.trace_flags, INVALID_SPAN_CONTEXT.trace_flags, ) def test_log_record_observed_timestamp(self): processor, logger = set_up_test_logging(logging.WARNING) with self.assertLogs(level=logging.WARNING): logger.warning("Warning message") record = processor.get_log_record(0) self.assertIsNotNone(record.log_record.observed_timestamp) def test_log_record_user_attributes(self): """Attributes can be injected into logs by adding them to the ReadWriteLogRecord""" processor, logger = set_up_test_logging(logging.WARNING) # Assert emit gets called for warning message with self.assertLogs(level=logging.WARNING): logger.warning("Warning message", extra={"http.status_code": 200}) record = processor.get_log_record(0) self.assertIsNotNone(record) self.assertEqual(len(record.log_record.attributes), 4) self.assertEqual(record.log_record.attributes["http.status_code"], 200) self.assertTrue( record.log_record.attributes[ code_attributes.CODE_FILE_PATH ].endswith("test_handler.py") ) self.assertEqual( record.log_record.attributes[code_attributes.CODE_FUNCTION_NAME], "test_log_record_user_attributes", ) # The line of the log statement is not a constant (changing tests may change that), # so only check that the attribute is present. self.assertTrue( code_attributes.CODE_LINE_NUMBER in record.log_record.attributes ) self.assertTrue( isinstance(record.log_record.attributes, BoundedAttributes) ) def test_log_record_exception(self): """Exception information will be included in attributes""" processor, logger = set_up_test_logging(logging.ERROR) try: raise ZeroDivisionError("division by zero") except ZeroDivisionError: with self.assertLogs(level=logging.ERROR): logger.exception("Zero Division Error") record = processor.get_log_record(0) self.assertIsNotNone(record) self.assertTrue(isinstance(record.log_record.body, str)) self.assertEqual(record.log_record.body, "Zero Division Error") self.assertEqual( record.log_record.attributes[exception_attributes.EXCEPTION_TYPE], ZeroDivisionError.__name__, ) self.assertEqual( record.log_record.attributes[ exception_attributes.EXCEPTION_MESSAGE ], "division by zero", ) stack_trace = record.log_record.attributes[ exception_attributes.EXCEPTION_STACKTRACE ] self.assertIsInstance(stack_trace, str) self.assertTrue("Traceback" in stack_trace) self.assertTrue("ZeroDivisionError" in stack_trace) self.assertTrue("division by zero" in stack_trace) self.assertTrue(__file__ in stack_trace) def test_log_record_recursive_exception(self): """Exception information will be included in attributes even though it is recursive""" processor, logger = set_up_test_logging(logging.ERROR) try: raise ZeroDivisionError( ZeroDivisionError(ZeroDivisionError("division by zero")) ) except ZeroDivisionError: with self.assertLogs(level=logging.ERROR): logger.exception("Zero Division Error") record = processor.get_log_record(0) self.assertIsNotNone(record) self.assertEqual(record.log_record.body, "Zero Division Error") self.assertEqual( record.log_record.attributes[exception_attributes.EXCEPTION_TYPE], ZeroDivisionError.__name__, ) self.assertEqual( record.log_record.attributes[ exception_attributes.EXCEPTION_MESSAGE ], "division by zero", ) stack_trace = record.log_record.attributes[ exception_attributes.EXCEPTION_STACKTRACE ] self.assertIsInstance(stack_trace, str) self.assertTrue("Traceback" in stack_trace) self.assertTrue("ZeroDivisionError" in stack_trace) self.assertTrue("division by zero" in stack_trace) self.assertTrue(__file__ in stack_trace) def test_log_exc_info_false(self): """Exception information will not be included in attributes""" processor, logger = set_up_test_logging(logging.NOTSET) try: raise ZeroDivisionError("division by zero") except ZeroDivisionError: with self.assertLogs(level=logging.ERROR): logger.error("Zero Division Error", exc_info=False) record = processor.get_log_record(0) self.assertIsNotNone(record) self.assertEqual(record.log_record.body, "Zero Division Error") self.assertNotIn( exception_attributes.EXCEPTION_TYPE, record.log_record.attributes, ) self.assertNotIn( exception_attributes.EXCEPTION_MESSAGE, record.log_record.attributes, ) self.assertNotIn( exception_attributes.EXCEPTION_STACKTRACE, record.log_record.attributes, ) def test_log_record_exception_with_object_payload(self): processor, logger = set_up_test_logging(logging.ERROR) class CustomException(Exception): def __str__(self): return "CustomException stringified" try: raise CustomException("CustomException message") except CustomException as exception: with self.assertLogs(level=logging.ERROR): logger.exception(exception) record = processor.get_log_record(0) self.assertIsNotNone(record) self.assertTrue(isinstance(record.log_record.body, str)) self.assertEqual(record.log_record.body, "CustomException stringified") self.assertEqual( record.log_record.attributes[exception_attributes.EXCEPTION_TYPE], CustomException.__name__, ) self.assertEqual( record.log_record.attributes[ exception_attributes.EXCEPTION_MESSAGE ], "CustomException message", ) stack_trace = record.log_record.attributes[ exception_attributes.EXCEPTION_STACKTRACE ] self.assertIsInstance(stack_trace, str) self.assertTrue("Traceback" in stack_trace) self.assertTrue("CustomException" in stack_trace) self.assertTrue(__file__ in stack_trace) def test_log_record_trace_correlation(self): processor, logger = set_up_test_logging(logging.WARNING) tracer = trace.TracerProvider().get_tracer(__name__) with tracer.start_as_current_span("test") as span: mock_context = set_span_in_context(span) with patch( "opentelemetry.sdk._logs._internal.get_current", return_value=mock_context, ): with self.assertLogs(level=logging.CRITICAL): logger.critical("Critical message within span") record = processor.get_log_record(0) self.assertEqual( record.log_record.body, "Critical message within span", ) self.assertEqual(record.log_record.severity_text, "CRITICAL") self.assertEqual( record.log_record.severity_number, SeverityNumber.FATAL, ) self.assertEqual(record.log_record.context, mock_context) span_context = span.get_span_context() self.assertEqual( record.log_record.trace_id, span_context.trace_id ) self.assertEqual( record.log_record.span_id, span_context.span_id ) self.assertEqual( record.log_record.trace_flags, span_context.trace_flags, ) def test_log_record_trace_correlation_deprecated(self): processor, logger = set_up_test_logging(logging.WARNING) tracer = trace.TracerProvider().get_tracer(__name__) with tracer.start_as_current_span("test") as span: with self.assertLogs(level=logging.CRITICAL): logger.critical("Critical message within span") record = processor.get_log_record(0) self.assertEqual( record.log_record.body, "Critical message within span" ) self.assertEqual(record.log_record.severity_text, "CRITICAL") self.assertEqual( record.log_record.severity_number, SeverityNumber.FATAL ) span_context = span.get_span_context() self.assertEqual(record.log_record.trace_id, span_context.trace_id) self.assertEqual(record.log_record.span_id, span_context.span_id) self.assertEqual( record.log_record.trace_flags, span_context.trace_flags ) def test_warning_without_formatter(self): processor, logger = set_up_test_logging(logging.WARNING) logger.warning("Test message") record = processor.get_log_record(0) self.assertEqual(record.log_record.body, "Test message") def test_exception_without_formatter(self): processor, logger = set_up_test_logging(logging.WARNING) logger.exception("Test exception") record = processor.get_log_record(0) self.assertEqual(record.log_record.body, "Test exception") def test_warning_with_formatter(self): processor, logger = set_up_test_logging( logging.WARNING, formatter=logging.Formatter( "%(name)s - %(levelname)s - %(message)s" ), ) logger.warning("Test message") record = processor.get_log_record(0) self.assertEqual( record.log_record.body, "foo - WARNING - Test message" ) def test_log_body_is_always_string_with_formatter(self): processor, logger = set_up_test_logging( logging.WARNING, formatter=logging.Formatter( "%(name)s - %(levelname)s - %(message)s" ), ) logger.warning(["something", "of", "note"]) record = processor.get_log_record(0) self.assertIsInstance(record.log_record.body, str) @patch.dict(os.environ, {"OTEL_SDK_DISABLED": "true"}) def test_handler_root_logger_with_disabled_sdk_does_not_go_into_recursion_error( self, ): processor, logger = set_up_test_logging( logging.NOTSET, root_logger=True ) logger.warning("hello") self.assertEqual(processor.emit_count(), 0) @patch.dict(os.environ, {OTEL_ATTRIBUTE_COUNT_LIMIT: "3"}) def test_otel_attribute_count_limit_respected_in_logging_handler(self): """Test that OTEL_ATTRIBUTE_COUNT_LIMIT is properly respected by LoggingHandler.""" # Create a new LoggerProvider within the patched environment # This will create LogRecordLimits() that reads from the environment variable logger_provider = LoggerProvider() processor = FakeProcessor() logger_provider.add_log_record_processor(processor) logger = logging.getLogger("env_test") handler = LoggingHandler( level=logging.WARNING, logger_provider=logger_provider ) logger.addHandler(handler) # Create a log record with many extra attributes extra_attrs = {f"custom_attr_{i}": f"value_{i}" for i in range(10)} with self.assertLogs(level=logging.WARNING): logger.warning( "Test message with many attributes", extra=extra_attrs ) record = processor.get_log_record(0) # With OTEL_ATTRIBUTE_COUNT_LIMIT=3, should have exactly 3 attributes total_attrs = len(record.log_record.attributes) self.assertEqual( total_attrs, 3, f"Should have exactly 3 attributes due to limit, got {total_attrs}", ) # Should have 10 dropped attributes (10 custom + 3 code - 3 kept = 10 dropped) self.assertEqual( record.dropped_attributes, 10, f"Should have 10 dropped attributes, got {record.dropped_attributes}", ) @patch.dict(os.environ, {OTEL_ATTRIBUTE_COUNT_LIMIT: "5"}) def test_otel_attribute_count_limit_includes_code_attributes(self): """Test that OTEL_ATTRIBUTE_COUNT_LIMIT applies to all attributes including code attributes.""" # Create a new LoggerProvider within the patched environment # This will create LogRecordLimits() that reads from the environment variable logger_provider = LoggerProvider() processor = FakeProcessor() logger_provider.add_log_record_processor(processor) logger = logging.getLogger("env_test_2") handler = LoggingHandler( level=logging.WARNING, logger_provider=logger_provider ) logger.addHandler(handler) # Create a log record with some extra attributes extra_attrs = {f"user_attr_{i}": f"value_{i}" for i in range(8)} with self.assertLogs(level=logging.WARNING): logger.warning("Test message", extra=extra_attrs) record = processor.get_log_record(0) # With OTEL_ATTRIBUTE_COUNT_LIMIT=5, should have exactly 5 attributes total_attrs = len(record.log_record.attributes) self.assertEqual( total_attrs, 5, f"Should have exactly 5 attributes due to limit, got {total_attrs}", ) # Should have 6 dropped attributes (8 user + 3 code - 5 kept = 6 dropped) self.assertEqual( record.dropped_attributes, 6, f"Should have 6 dropped attributes, got {record.dropped_attributes}", ) def test_logging_handler_without_env_var_uses_default_limit(self): """Test that without OTEL_ATTRIBUTE_COUNT_LIMIT, default limit (128) should apply.""" processor, logger = set_up_test_logging(logging.WARNING) # Create a log record with many attributes (more than default limit of 128) extra_attrs = {f"attr_{i}": f"value_{i}" for i in range(150)} with self.assertLogs(level=logging.WARNING): logger.warning( "Test message with many attributes", extra=extra_attrs ) record = processor.get_log_record(0) # Should be limited to default limit (128) total attributes total_attrs = len(record.log_record.attributes) self.assertEqual( total_attrs, 128, f"Should have exactly 128 attributes (default limit), got {total_attrs}", ) # Should have 25 dropped attributes (150 user + 3 code - 128 kept = 25 dropped) self.assertEqual( record.dropped_attributes, 25, f"Should have 25 dropped attributes, got {record.dropped_attributes}", ) def set_up_test_logging(level, formatter=None, root_logger=False): logger_provider = LoggerProvider() processor = FakeProcessor() logger_provider.add_log_record_processor(processor) logger = logging.getLogger(None if root_logger else "foo") handler = LoggingHandler(level=level, logger_provider=logger_provider) if formatter: handler.setFormatter(formatter) logger.addHandler(handler) return processor, logger class FakeProcessor(LogRecordProcessor): def __init__(self): self.log_data_emitted = [] def on_emit(self, log_record: ReadableLogRecord): self.log_data_emitted.append(log_record) def shutdown(self): pass def force_flush(self, timeout_millis: int = 30000): pass def emit_count(self): return len(self.log_data_emitted) def get_log_record(self, i): return self.log_data_emitted[i] python-opentelemetry-1.39.1/opentelemetry-sdk/tests/logs/test_log_limits.py000066400000000000000000000047271511654350100273670ustar00rootroot00000000000000# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from unittest.mock import patch from opentelemetry.sdk._logs import LogRecordLimits from opentelemetry.sdk._logs._internal import ( _DEFAULT_OTEL_ATTRIBUTE_COUNT_LIMIT, ) from opentelemetry.sdk.environment_variables import ( OTEL_ATTRIBUTE_COUNT_LIMIT, OTEL_ATTRIBUTE_VALUE_LENGTH_LIMIT, ) class TestLogLimits(unittest.TestCase): def test_log_limits_repr_unset(self): expected = f"LogRecordLimits(max_attributes={_DEFAULT_OTEL_ATTRIBUTE_COUNT_LIMIT}, max_attribute_length=None)" limits = str(LogRecordLimits()) self.assertEqual(expected, limits) def test_log_limits_max_attributes(self): expected = 1 limits = LogRecordLimits(max_attributes=1) self.assertEqual(expected, limits.max_attributes) def test_log_limits_max_attribute_length(self): expected = 1 limits = LogRecordLimits(max_attribute_length=1) self.assertEqual(expected, limits.max_attribute_length) def test_invalid_env_vars_raise(self): env_vars = [ OTEL_ATTRIBUTE_COUNT_LIMIT, OTEL_ATTRIBUTE_VALUE_LENGTH_LIMIT, ] bad_values = ["bad", "-1"] test_cases = { env_var: bad_value for env_var in env_vars for bad_value in bad_values } for env_var, bad_value in test_cases.items(): with self.subTest(f"Testing {env_var}={bad_value}"): with self.assertRaises(ValueError) as error, patch.dict( "os.environ", {env_var: bad_value}, clear=True ): LogRecordLimits() expected_msg = f"{env_var} must be a non-negative integer but got {bad_value}" self.assertEqual( expected_msg, str(error.exception), f"Unexpected error message for {env_var}={bad_value}", ) python-opentelemetry-1.39.1/opentelemetry-sdk/tests/logs/test_log_record.py000066400000000000000000000205011511654350100273300ustar00rootroot00000000000000# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import unittest import warnings from opentelemetry._logs import LogRecord, SeverityNumber from opentelemetry.attributes import BoundedAttributes from opentelemetry.context import get_current from opentelemetry.sdk._logs import ( LogRecordDroppedAttributesWarning, LogRecordLimits, ReadableLogRecord, ReadWriteLogRecord, ) from opentelemetry.sdk.resources import Resource from opentelemetry.trace.span import TraceFlags class TestLogRecord(unittest.TestCase): def test_log_record_to_json(self): log_record = ReadableLogRecord( LogRecord( timestamp=0, observed_timestamp=0, body={"key": "logLine", "bytes": b"123"}, attributes={ "mapping": {"key": "value"}, "none": None, "sequence": [1, 2], "str": "string", }, event_name="a.event", ), resource=Resource({"service.name": "foo"}), ) self.assertEqual( log_record.to_json(indent=None), '{"body": {"key": "logLine", "bytes": "MTIz"}, "severity_number": null, "severity_text": null, "attributes": {"mapping": {"key": "value"}, "none": null, "sequence": [1, 2], "str": "string"}, "dropped_attributes": 0, "timestamp": "1970-01-01T00:00:00.000000Z", "observed_timestamp": "1970-01-01T00:00:00.000000Z", "trace_id": "0x00000000000000000000000000000000", "span_id": "0x0000000000000000", "trace_flags": 0, "resource": {"attributes": {"service.name": "foo"}, "schema_url": ""}, "event_name": "a.event"}', ) def test_log_record_to_json_serializes_severity_number_as_int(self): actual = ReadableLogRecord( LogRecord( timestamp=0, severity_number=SeverityNumber.WARN, observed_timestamp=0, body="a log line", ), resource=Resource({"service.name": "foo"}), ) decoded = json.loads(actual.to_json()) self.assertEqual(SeverityNumber.WARN.value, decoded["severity_number"]) def test_log_record_to_json_serializes_null_severity_number(self): actual = ReadableLogRecord( LogRecord( observed_timestamp=0, body="a log line", ), resource=Resource({"service.name": "foo"}), ) decoded = json.loads(actual.to_json()) self.assertEqual(None, decoded["timestamp"]) def test_log_record_bounded_attributes(self): attr = {"key": "value"} result = ReadWriteLogRecord( LogRecord(timestamp=0, body="a log line", attributes=attr) ) self.assertTrue( isinstance(result.log_record.attributes, BoundedAttributes) ) def test_log_record_dropped_attributes_empty_limits(self): attr = {"key": "value"} result = ReadWriteLogRecord( LogRecord(timestamp=0, body="a log line", attributes=attr) ) self.assertTrue(result.dropped_attributes == 0) def test_log_record_dropped_attributes_set_limits_max_attribute(self): attr = {"key": "value", "key2": "value2"} limits = LogRecordLimits( max_attributes=1, ) result = ReadWriteLogRecord( LogRecord(timestamp=0, body="a log line", attributes=attr), limits=limits, ) self.assertTrue(result.dropped_attributes == 1) def test_log_record_dropped_attributes_set_limits_max_attribute_length( self, ): attr = {"key": "value", "key2": "value2"} expected = {"key": "v", "key2": "v"} limits = LogRecordLimits( max_attribute_length=1, ) result = ReadWriteLogRecord( LogRecord( timestamp=0, body="a log line", attributes=attr, ), limits=limits, ) self.assertTrue(result.dropped_attributes == 0) self.assertEqual(expected, result.log_record.attributes) def test_log_record_dropped_attributes_set_limits(self): attr = {"key": "value", "key2": "value2"} expected = {"key2": "v"} limits = LogRecordLimits( max_attributes=1, max_attribute_length=1, ) result = ReadWriteLogRecord( LogRecord( timestamp=0, body="a log line", attributes=attr, ), limits=limits, ) self.assertTrue(result.dropped_attributes == 1) self.assertEqual(expected, result.log_record.attributes) def test_log_record_dropped_attributes_set_limits_warning_once(self): attr = {"key1": "value1", "key2": "value2"} limits = LogRecordLimits( max_attributes=1, max_attribute_length=1, ) with warnings.catch_warnings(record=True) as cw: for _ in range(10): ReadWriteLogRecord( LogRecord( timestamp=0, body="a log line", attributes=attr, ), limits=limits, ) # Check that at least one LogRecordDroppedAttributesWarning was emitted dropped_attributes_warnings = [ w for w in cw if isinstance(w.message, LogRecordDroppedAttributesWarning) ] self.assertEqual( len(dropped_attributes_warnings), 1, "Expected exactly one LogRecordDroppedAttributesWarning due to simplefilter('once')", ) # Check the message content of the LogRecordDroppedAttributesWarning warning_message = str(dropped_attributes_warnings[0].message) self.assertIn( "Log record attributes were dropped due to limits", warning_message, ) def test_log_record_dropped_attributes_unset_limits(self): attr = {"key": "value", "key2": "value2"} limits = LogRecordLimits() result = ReadWriteLogRecord( LogRecord( timestamp=0, body="a log line", attributes=attr, ), limits=limits, ) self.assertTrue(result.dropped_attributes == 0) self.assertEqual(attr, result.log_record.attributes) # pylint:disable=protected-access def test_log_record_from_api_log_record(self): api_log_record = LogRecord( timestamp=1, observed_timestamp=2, context=get_current(), severity_text="WARN", severity_number=SeverityNumber.WARN, body="a log line", attributes={"a": "b"}, event_name="an.event", ) resource = Resource.create({}) record = ReadWriteLogRecord._from_api_log_record( record=api_log_record, resource=resource ) self.assertEqual(record.log_record.timestamp, 1) self.assertEqual(record.log_record.observed_timestamp, 2) self.assertEqual(record.log_record.context, get_current()) # trace_id, span_id, and trace_flags come from the context's span self.assertEqual(record.log_record.trace_id, 0) self.assertEqual(record.log_record.span_id, 0) self.assertEqual(record.log_record.trace_flags, TraceFlags(0x00)) self.assertEqual(record.log_record.severity_text, "WARN") self.assertEqual( record.log_record.severity_number, SeverityNumber.WARN ) self.assertEqual(record.log_record.body, "a log line") self.assertEqual(record.log_record.attributes, {"a": "b"}) self.assertEqual(record.log_record.event_name, "an.event") self.assertEqual(record.resource, resource) python-opentelemetry-1.39.1/opentelemetry-sdk/tests/logs/test_logger_provider_cache.py000066400000000000000000000067661511654350100315460ustar00rootroot00000000000000import logging import unittest from opentelemetry.sdk._logs import LoggerProvider, LoggingHandler from opentelemetry.sdk._logs.export import ( InMemoryLogRecordExporter, SimpleLogRecordProcessor, ) def set_up_logging_handler(level): logger_provider = LoggerProvider() exporter = InMemoryLogRecordExporter() processor = SimpleLogRecordProcessor(exporter=exporter) logger_provider.add_log_record_processor(processor) handler = LoggingHandler(level=level, logger_provider=logger_provider) return handler, logger_provider def create_logger(handler, name): logger = logging.getLogger(name) logger.addHandler(handler) return logger class TestLoggerProviderCache(unittest.TestCase): def test_get_logger_single_handler(self): handler, logger_provider = set_up_logging_handler(level=logging.DEBUG) # pylint: disable=protected-access logger_cache = logger_provider._logger_cache logger = create_logger(handler, "test_logger") # Ensure logger is lazily cached self.assertEqual(0, len(logger_cache)) with self.assertLogs(level=logging.WARNING): logger.warning("test message") self.assertEqual(1, len(logger_cache)) # Ensure only one logger is cached with self.assertLogs(level=logging.WARNING): rounds = 100 for _ in range(rounds): logger.warning("test message") self.assertEqual(1, len(logger_cache)) def test_get_logger_multiple_loggers(self): handler, logger_provider = set_up_logging_handler(level=logging.DEBUG) # pylint: disable=protected-access logger_cache = logger_provider._logger_cache num_loggers = 10 loggers = [create_logger(handler, str(i)) for i in range(num_loggers)] # Ensure loggers are lazily cached self.assertEqual(0, len(logger_cache)) with self.assertLogs(level=logging.WARNING): for logger in loggers: logger.warning("test message") self.assertEqual(num_loggers, len(logger_cache)) with self.assertLogs(level=logging.WARNING): rounds = 100 for _ in range(rounds): for logger in loggers: logger.warning("test message") self.assertEqual(num_loggers, len(logger_cache)) def test_provider_get_logger_no_cache(self): _, logger_provider = set_up_logging_handler(level=logging.DEBUG) # pylint: disable=protected-access logger_cache = logger_provider._logger_cache logger_provider.get_logger( name="test_logger", version="version", schema_url="schema_url", attributes={"key": "value"}, ) # Ensure logger is not cached if attributes is set self.assertEqual(0, len(logger_cache)) def test_provider_get_logger_cached(self): _, logger_provider = set_up_logging_handler(level=logging.DEBUG) # pylint: disable=protected-access logger_cache = logger_provider._logger_cache logger_provider.get_logger( name="test_logger", version="version", schema_url="schema_url", ) # Ensure only one logger is cached self.assertEqual(1, len(logger_cache)) logger_provider.get_logger( name="test_logger", version="version", schema_url="schema_url", ) # Ensure only one logger is cached self.assertEqual(1, len(logger_cache)) python-opentelemetry-1.39.1/opentelemetry-sdk/tests/logs/test_logs.py000066400000000000000000000176571511654350100261770ustar00rootroot00000000000000# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # pylint: disable=protected-access import unittest from unittest.mock import Mock, patch from opentelemetry._logs import LogRecord, SeverityNumber from opentelemetry.context import get_current from opentelemetry.sdk._logs import ( Logger, LoggerProvider, ReadableLogRecord, ) from opentelemetry.sdk._logs._internal import ( NoOpLogger, SynchronousMultiLogRecordProcessor, ) from opentelemetry.sdk.environment_variables import OTEL_SDK_DISABLED from opentelemetry.sdk.resources import Resource from opentelemetry.sdk.util.instrumentation import InstrumentationScope class TestLoggerProvider(unittest.TestCase): def test_resource(self): """ `LoggerProvider` provides a way to allow a `Resource` to be specified. """ logger_provider_0 = LoggerProvider() logger_provider_1 = LoggerProvider() self.assertEqual( logger_provider_0.resource, logger_provider_1.resource, ) self.assertIsInstance(logger_provider_0.resource, Resource) self.assertIsInstance(logger_provider_1.resource, Resource) resource = Resource({"key": "value"}) self.assertIs(LoggerProvider(resource=resource).resource, resource) def test_get_logger(self): """ `LoggerProvider.get_logger` arguments are used to create an `InstrumentationScope` object on the created `Logger`. """ logger = LoggerProvider().get_logger( "name", version="version", schema_url="schema_url", attributes={"key": "value"}, ) self.assertEqual(logger._instrumentation_scope.name, "name") self.assertEqual(logger._instrumentation_scope.version, "version") self.assertEqual( logger._instrumentation_scope.schema_url, "schema_url" ) self.assertEqual( logger._instrumentation_scope.attributes, {"key": "value"} ) @patch.dict("os.environ", {OTEL_SDK_DISABLED: "true"}) def test_get_logger_with_sdk_disabled(self): logger = LoggerProvider().get_logger(Mock()) self.assertIsInstance(logger, NoOpLogger) @patch.object(Resource, "create") def test_logger_provider_init(self, resource_patch): logger_provider = LoggerProvider() resource_patch.assert_called_once() self.assertIsNotNone(logger_provider._resource) self.assertTrue( isinstance( logger_provider._multi_log_record_processor, SynchronousMultiLogRecordProcessor, ) ) self.assertIsNotNone(logger_provider._at_exit_handler) class TestReadableLogRecord(unittest.TestCase): def setUp(self): self.log_record = LogRecord( timestamp=1234567890, observed_timestamp=1234567891, body="Test log message", attributes={"key": "value"}, severity_number=SeverityNumber.INFO, severity_text="INFO", ) self.resource = Resource({"service.name": "test-service"}) self.readable_log_record = ReadableLogRecord( log_record=self.log_record, resource=self.resource, instrumentation_scope=None, ) def test_readable_log_record_is_frozen(self): """Test that ReadableLogRecord is frozen and cannot be modified.""" with self.assertRaises((AttributeError, TypeError)): self.readable_log_record.log_record = LogRecord( timestamp=999, body="Modified" ) def test_readable_log_record_can_read_attributes(self): """Test that ReadableLogRecord provides read access to all fields.""" self.assertEqual( self.readable_log_record.log_record.timestamp, 1234567890 ) self.assertEqual( self.readable_log_record.log_record.body, "Test log message" ) self.assertEqual( self.readable_log_record.log_record.attributes["key"], "value" ) self.assertEqual( self.readable_log_record.resource.attributes["service.name"], "test-service", ) class TestLogger(unittest.TestCase): @staticmethod def _get_logger(): log_record_processor_mock = Mock() logger = Logger( resource=Resource.create({}), multi_log_record_processor=log_record_processor_mock, instrumentation_scope=InstrumentationScope( "name", "version", "schema_url", {"an": "attribute"}, ), ) return logger, log_record_processor_mock def test_can_emit_logrecord(self): logger, log_record_processor_mock = self._get_logger() log_record = LogRecord( observed_timestamp=0, body="a log line", ) logger.emit(log_record) log_record_processor_mock.on_emit.assert_called_once() log_data = log_record_processor_mock.on_emit.call_args.args[0] self.assertTrue(isinstance(log_data.log_record, LogRecord)) self.assertTrue(log_data.log_record is log_record) def test_can_emit_api_logrecord(self): logger, log_record_processor_mock = self._get_logger() api_log_record = LogRecord( observed_timestamp=0, body="a log line", ) logger.emit(api_log_record) log_record_processor_mock.on_emit.assert_called_once() log_data = log_record_processor_mock.on_emit.call_args.args[0] log_record = log_data.log_record self.assertTrue(isinstance(log_record, LogRecord)) self.assertEqual(log_record.timestamp, None) self.assertEqual(log_record.observed_timestamp, 0) self.assertIsNotNone(log_record.context) self.assertEqual(log_record.severity_number, None) self.assertEqual(log_record.severity_text, None) self.assertEqual(log_record.body, "a log line") self.assertEqual(log_record.attributes, {}) self.assertEqual(log_record.event_name, None) self.assertEqual(log_data.resource, logger.resource) def test_can_emit_with_keywords_arguments(self): logger, log_record_processor_mock = self._get_logger() log_record = LogRecord( timestamp=100, observed_timestamp=101, context=get_current(), severity_number=SeverityNumber.WARN, severity_text="warn", body="a body", attributes={"some": "attributes"}, event_name="event_name", ) logger.emit(log_record) log_record_processor_mock.on_emit.assert_called_once() log_data = log_record_processor_mock.on_emit.call_args.args[0] result_log_record = log_data.log_record self.assertTrue(isinstance(result_log_record, LogRecord)) self.assertEqual(result_log_record.timestamp, 100) self.assertEqual(result_log_record.observed_timestamp, 101) self.assertIsNotNone(result_log_record.context) self.assertEqual( result_log_record.severity_number, SeverityNumber.WARN ) self.assertEqual(result_log_record.severity_text, "warn") self.assertEqual(result_log_record.body, "a body") self.assertEqual(result_log_record.attributes, {"some": "attributes"}) self.assertEqual(result_log_record.event_name, "event_name") self.assertEqual(log_data.resource, logger.resource) python-opentelemetry-1.39.1/opentelemetry-sdk/tests/logs/test_multi_log_processor.py000066400000000000000000000155311511654350100313120ustar00rootroot00000000000000# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # pylint:disable=protected-access,no-self-use,no-member import logging import threading import time import unittest from abc import ABC, abstractmethod from unittest.mock import Mock from opentelemetry._logs import LogRecord, SeverityNumber from opentelemetry.sdk._logs._internal import ( ConcurrentMultiLogRecordProcessor, LoggerProvider, LoggingHandler, LogRecordProcessor, ReadWriteLogRecord, SynchronousMultiLogRecordProcessor, ) class AnotherLogRecordProcessor(LogRecordProcessor): def __init__(self, exporter, logs_list): self._exporter = exporter self._log_list = logs_list self._closed = False def on_emit(self, log_record: ReadWriteLogRecord): if self._closed: return self._log_list.append( ( log_record.log_record.body, log_record.log_record.severity_text, ) ) def shutdown(self): self._closed = True self._exporter.shutdown() def force_flush(self, timeout_millis=30000): self._log_list.clear() return True class TestLogRecordProcessor(unittest.TestCase): def test_log_record_processor(self): provider = LoggerProvider() handler = LoggingHandler(logger_provider=provider) logs_list_1 = [] processor1 = AnotherLogRecordProcessor(Mock(), logs_list_1) logs_list_2 = [] processor2 = AnotherLogRecordProcessor(Mock(), logs_list_2) logger = logging.getLogger("test.span.processor") logger.addHandler(handler) # Test no proessor added with self.assertLogs(level=logging.CRITICAL): logger.critical("Odisha, we have another major cyclone") self.assertEqual(len(logs_list_1), 0) self.assertEqual(len(logs_list_2), 0) # Add one processor provider.add_log_record_processor(processor1) with self.assertLogs(level=logging.WARNING): logger.warning("Brace yourself") with self.assertLogs(level=logging.ERROR): logger.error("Some error message") expected_list_1 = [ ("Brace yourself", "WARN"), ("Some error message", "ERROR"), ] self.assertEqual(logs_list_1, expected_list_1) # Add another processor provider.add_log_record_processor(processor2) with self.assertLogs(level=logging.CRITICAL): logger.critical("Something disastrous") expected_list_1.append(("Something disastrous", "CRITICAL")) expected_list_2 = [("Something disastrous", "CRITICAL")] self.assertEqual(logs_list_1, expected_list_1) self.assertEqual(logs_list_2, expected_list_2) class MultiLogRecordProcessorTestBase(ABC): @abstractmethod def _get_multi_log_record_processor(self): pass def make_record(self): return ReadWriteLogRecord( LogRecord( timestamp=1622300111608942000, severity_text="WARN", severity_number=SeverityNumber.WARN, body="Warning message", ) ) def test_on_emit(self): multi_log_record_processor = self._get_multi_log_record_processor() mocks = [Mock(spec=LogRecordProcessor) for _ in range(5)] for mock in mocks: multi_log_record_processor.add_log_record_processor(mock) record = self.make_record() multi_log_record_processor.on_emit(record) for mock in mocks: mock.on_emit.assert_called_with(record) multi_log_record_processor.shutdown() def test_on_shutdown(self): multi_log_record_processor = self._get_multi_log_record_processor() mocks = [Mock(spec=LogRecordProcessor) for _ in range(5)] for mock in mocks: multi_log_record_processor.add_log_record_processor(mock) multi_log_record_processor.shutdown() for mock in mocks: mock.shutdown.assert_called_once_with() def test_on_force_flush(self): multi_log_record_processor = self._get_multi_log_record_processor() mocks = [Mock(spec=LogRecordProcessor) for _ in range(5)] for mock in mocks: multi_log_record_processor.add_log_record_processor(mock) ret_value = multi_log_record_processor.force_flush(100) self.assertTrue(ret_value) for mock_processor in mocks: self.assertEqual(1, mock_processor.force_flush.call_count) class TestSynchronousMultiLogRecordProcessor( MultiLogRecordProcessorTestBase, unittest.TestCase ): def _get_multi_log_record_processor(self): return SynchronousMultiLogRecordProcessor() def test_force_flush_delayed(self): multi_log_record_processor = SynchronousMultiLogRecordProcessor() def delay(_): time.sleep(0.09) mock_processor1 = Mock(spec=LogRecordProcessor) mock_processor1.force_flush = Mock(side_effect=delay) multi_log_record_processor.add_log_record_processor(mock_processor1) mock_processor2 = Mock(spec=LogRecordProcessor) multi_log_record_processor.add_log_record_processor(mock_processor2) ret_value = multi_log_record_processor.force_flush(50) self.assertFalse(ret_value) self.assertEqual(mock_processor1.force_flush.call_count, 1) self.assertEqual(mock_processor2.force_flush.call_count, 0) class TestConcurrentMultiLogRecordProcessor( MultiLogRecordProcessorTestBase, unittest.TestCase ): def _get_multi_log_record_processor(self): return ConcurrentMultiLogRecordProcessor() def test_force_flush_delayed(self): multi_log_record_processor = ConcurrentMultiLogRecordProcessor() wait_event = threading.Event() def delay(_): wait_event.wait() mock1 = Mock(spec=LogRecordProcessor) mock1.force_flush = Mock(side_effect=delay) mocks = [Mock(LogRecordProcessor) for _ in range(5)] mocks = [mock1] + mocks for mock_processor in mocks: multi_log_record_processor.add_log_record_processor(mock_processor) ret_value = multi_log_record_processor.force_flush(50) wait_event.set() self.assertFalse(ret_value) for mock in mocks: self.assertEqual(1, mock.force_flush.call_count) multi_log_record_processor.shutdown() python-opentelemetry-1.39.1/opentelemetry-sdk/tests/metrics/000077500000000000000000000000001511654350100243045ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-sdk/tests/metrics/exponential_histogram/000077500000000000000000000000001511654350100307075ustar00rootroot00000000000000test_exponent_mapping.py000066400000000000000000000371051511654350100356220ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-sdk/tests/metrics/exponential_histogram# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # pylint: disable=protected-access from math import inf, nextafter from sys import float_info from unittest.mock import patch from opentelemetry.sdk.metrics._internal.exponential_histogram.mapping.errors import ( MappingUnderflowError, ) from opentelemetry.sdk.metrics._internal.exponential_histogram.mapping.exponent_mapping import ( ExponentMapping, ) from opentelemetry.sdk.metrics._internal.exponential_histogram.mapping.ieee_754 import ( MAX_NORMAL_EXPONENT, MAX_NORMAL_VALUE, MIN_NORMAL_EXPONENT, MIN_NORMAL_VALUE, ) from opentelemetry.test import TestCase def right_boundary(scale: int, index: int) -> float: result = 2**index for _ in range(scale, 0): result = result * result return result class TestExponentMapping(TestCase): def test_singleton(self): self.assertIs(ExponentMapping(-3), ExponentMapping(-3)) self.assertIsNot(ExponentMapping(-3), ExponentMapping(-5)) @patch( "opentelemetry.sdk.metrics._internal.exponential_histogram.mapping." "exponent_mapping.ExponentMapping._mappings", new={}, ) @patch( "opentelemetry.sdk.metrics._internal.exponential_histogram.mapping." "exponent_mapping.ExponentMapping._init" ) def test_init_called_once(self, mock_init): # pylint: disable=no-self-use ExponentMapping(-3) ExponentMapping(-3) mock_init.assert_called_once() def test_exponent_mapping_0(self): with self.assertNotRaises(Exception): ExponentMapping(0) def test_exponent_mapping_zero(self): exponent_mapping = ExponentMapping(0) # This is the equivalent to 1.1 in hexadecimal hex_1_1 = 1 + (1 / 16) # Testing with values near +inf self.assertEqual( exponent_mapping.map_to_index(MAX_NORMAL_VALUE), MAX_NORMAL_EXPONENT, ) self.assertEqual(exponent_mapping.map_to_index(MAX_NORMAL_VALUE), 1023) self.assertEqual(exponent_mapping.map_to_index(2**1023), 1022) self.assertEqual(exponent_mapping.map_to_index(2**1022), 1021) self.assertEqual( exponent_mapping.map_to_index(hex_1_1 * (2**1023)), 1023 ) self.assertEqual( exponent_mapping.map_to_index(hex_1_1 * (2**1022)), 1022 ) # Testing with values near 1 self.assertEqual(exponent_mapping.map_to_index(4), 1) self.assertEqual(exponent_mapping.map_to_index(3), 1) self.assertEqual(exponent_mapping.map_to_index(2), 0) self.assertEqual(exponent_mapping.map_to_index(1), -1) self.assertEqual(exponent_mapping.map_to_index(0.75), -1) self.assertEqual(exponent_mapping.map_to_index(0.51), -1) self.assertEqual(exponent_mapping.map_to_index(0.5), -2) self.assertEqual(exponent_mapping.map_to_index(0.26), -2) self.assertEqual(exponent_mapping.map_to_index(0.25), -3) self.assertEqual(exponent_mapping.map_to_index(0.126), -3) self.assertEqual(exponent_mapping.map_to_index(0.125), -4) # Testing with values near 0 self.assertEqual(exponent_mapping.map_to_index(2**-1022), -1023) self.assertEqual( exponent_mapping.map_to_index(hex_1_1 * (2**-1022)), -1022 ) self.assertEqual(exponent_mapping.map_to_index(2**-1021), -1022) self.assertEqual( exponent_mapping.map_to_index(hex_1_1 * (2**-1021)), -1021 ) self.assertEqual( exponent_mapping.map_to_index(2**-1022), MIN_NORMAL_EXPONENT - 1 ) self.assertEqual( exponent_mapping.map_to_index(2**-1021), MIN_NORMAL_EXPONENT ) # The smallest subnormal value is 2 ** -1074 = 5e-324. # This value is also the result of: # s = 1 # while s / 2: # s = s / 2 # s == 5e-324 self.assertEqual( exponent_mapping.map_to_index(2**-1074), MIN_NORMAL_EXPONENT - 1 ) def test_exponent_mapping_min_scale(self): exponent_mapping = ExponentMapping(ExponentMapping._min_scale) self.assertEqual(exponent_mapping.map_to_index(1.000001), 0) self.assertEqual(exponent_mapping.map_to_index(1), -1) self.assertEqual(exponent_mapping.map_to_index(float_info.max), 0) self.assertEqual(exponent_mapping.map_to_index(float_info.min), -1) def test_invalid_scale(self): with self.assertRaises(Exception): ExponentMapping(1) with self.assertRaises(Exception): ExponentMapping(ExponentMapping._min_scale - 1) def test_exponent_mapping_neg_one(self): exponent_mapping = ExponentMapping(-1) self.assertEqual(exponent_mapping.map_to_index(17), 2) self.assertEqual(exponent_mapping.map_to_index(16), 1) self.assertEqual(exponent_mapping.map_to_index(15), 1) self.assertEqual(exponent_mapping.map_to_index(9), 1) self.assertEqual(exponent_mapping.map_to_index(8), 1) self.assertEqual(exponent_mapping.map_to_index(5), 1) self.assertEqual(exponent_mapping.map_to_index(4), 0) self.assertEqual(exponent_mapping.map_to_index(3), 0) self.assertEqual(exponent_mapping.map_to_index(2), 0) self.assertEqual(exponent_mapping.map_to_index(1.5), 0) self.assertEqual(exponent_mapping.map_to_index(1), -1) self.assertEqual(exponent_mapping.map_to_index(0.75), -1) self.assertEqual(exponent_mapping.map_to_index(0.5), -1) self.assertEqual(exponent_mapping.map_to_index(0.25), -2) self.assertEqual(exponent_mapping.map_to_index(0.20), -2) self.assertEqual(exponent_mapping.map_to_index(0.13), -2) self.assertEqual(exponent_mapping.map_to_index(0.125), -2) self.assertEqual(exponent_mapping.map_to_index(0.10), -2) self.assertEqual(exponent_mapping.map_to_index(0.0625), -3) self.assertEqual(exponent_mapping.map_to_index(0.06), -3) def test_exponent_mapping_neg_four(self): # pylint: disable=too-many-statements exponent_mapping = ExponentMapping(-4) self.assertEqual(exponent_mapping.map_to_index(float(0x1)), -1) self.assertEqual(exponent_mapping.map_to_index(float(0x10)), 0) self.assertEqual(exponent_mapping.map_to_index(float(0x100)), 0) self.assertEqual(exponent_mapping.map_to_index(float(0x1000)), 0) self.assertEqual( exponent_mapping.map_to_index(float(0x10000)), 0 ) # base == 2 ** 16 self.assertEqual(exponent_mapping.map_to_index(float(0x100000)), 1) self.assertEqual(exponent_mapping.map_to_index(float(0x1000000)), 1) self.assertEqual(exponent_mapping.map_to_index(float(0x10000000)), 1) self.assertEqual( exponent_mapping.map_to_index(float(0x100000000)), 1 ) # base == 2 ** 32 self.assertEqual(exponent_mapping.map_to_index(float(0x1000000000)), 2) self.assertEqual( exponent_mapping.map_to_index(float(0x10000000000)), 2 ) self.assertEqual( exponent_mapping.map_to_index(float(0x100000000000)), 2 ) self.assertEqual( exponent_mapping.map_to_index(float(0x1000000000000)), 2 ) # base == 2 ** 48 self.assertEqual( exponent_mapping.map_to_index(float(0x10000000000000)), 3 ) self.assertEqual( exponent_mapping.map_to_index(float(0x100000000000000)), 3 ) self.assertEqual( exponent_mapping.map_to_index(float(0x1000000000000000)), 3 ) self.assertEqual( exponent_mapping.map_to_index(float(0x10000000000000000)), 3 ) # base == 2 ** 64 self.assertEqual( exponent_mapping.map_to_index(float(0x100000000000000000)), 4 ) self.assertEqual( exponent_mapping.map_to_index(float(0x1000000000000000000)), 4 ) self.assertEqual( exponent_mapping.map_to_index(float(0x10000000000000000000)), 4 ) self.assertEqual( exponent_mapping.map_to_index(float(0x100000000000000000000)), 4 ) # base == 2 ** 80 self.assertEqual( exponent_mapping.map_to_index(float(0x1000000000000000000000)), 5 ) self.assertEqual(exponent_mapping.map_to_index(1 / float(0x1)), -1) self.assertEqual(exponent_mapping.map_to_index(1 / float(0x10)), -1) self.assertEqual(exponent_mapping.map_to_index(1 / float(0x100)), -1) self.assertEqual(exponent_mapping.map_to_index(1 / float(0x1000)), -1) self.assertEqual( exponent_mapping.map_to_index(1 / float(0x10000)), -2 ) # base == 2 ** -16 self.assertEqual( exponent_mapping.map_to_index(1 / float(0x100000)), -2 ) self.assertEqual( exponent_mapping.map_to_index(1 / float(0x1000000)), -2 ) self.assertEqual( exponent_mapping.map_to_index(1 / float(0x10000000)), -2 ) self.assertEqual( exponent_mapping.map_to_index(1 / float(0x100000000)), -3 ) # base == 2 ** -32 self.assertEqual( exponent_mapping.map_to_index(1 / float(0x1000000000)), -3 ) self.assertEqual( exponent_mapping.map_to_index(1 / float(0x10000000000)), -3 ) self.assertEqual( exponent_mapping.map_to_index(1 / float(0x100000000000)), -3 ) self.assertEqual( exponent_mapping.map_to_index(1 / float(0x1000000000000)), -4 ) # base == 2 ** -32 self.assertEqual( exponent_mapping.map_to_index(1 / float(0x10000000000000)), -4 ) self.assertEqual( exponent_mapping.map_to_index(1 / float(0x100000000000000)), -4 ) self.assertEqual( exponent_mapping.map_to_index(1 / float(0x1000000000000000)), -4 ) self.assertEqual( exponent_mapping.map_to_index(1 / float(0x10000000000000000)), -5 ) # base == 2 ** -64 self.assertEqual( exponent_mapping.map_to_index(1 / float(0x100000000000000000)), -5 ) self.assertEqual(exponent_mapping.map_to_index(float_info.max), 63) self.assertEqual(exponent_mapping.map_to_index(2**1023), 63) self.assertEqual(exponent_mapping.map_to_index(2**1019), 63) self.assertEqual(exponent_mapping.map_to_index(2**1009), 63) self.assertEqual(exponent_mapping.map_to_index(2**1008), 62) self.assertEqual(exponent_mapping.map_to_index(2**1007), 62) self.assertEqual(exponent_mapping.map_to_index(2**1000), 62) self.assertEqual(exponent_mapping.map_to_index(2**993), 62) self.assertEqual(exponent_mapping.map_to_index(2**992), 61) self.assertEqual(exponent_mapping.map_to_index(2**991), 61) self.assertEqual(exponent_mapping.map_to_index(2**-1074), -64) self.assertEqual(exponent_mapping.map_to_index(2**-1073), -64) self.assertEqual(exponent_mapping.map_to_index(2**-1072), -64) self.assertEqual(exponent_mapping.map_to_index(2**-1057), -64) self.assertEqual(exponent_mapping.map_to_index(2**-1056), -64) self.assertEqual(exponent_mapping.map_to_index(2**-1041), -64) self.assertEqual(exponent_mapping.map_to_index(2**-1040), -64) self.assertEqual(exponent_mapping.map_to_index(2**-1025), -64) self.assertEqual(exponent_mapping.map_to_index(2**-1024), -64) self.assertEqual(exponent_mapping.map_to_index(2**-1023), -64) self.assertEqual(exponent_mapping.map_to_index(2**-1022), -64) self.assertEqual(exponent_mapping.map_to_index(2**-1009), -64) self.assertEqual(exponent_mapping.map_to_index(2**-1008), -64) self.assertEqual(exponent_mapping.map_to_index(2**-1007), -63) self.assertEqual(exponent_mapping.map_to_index(2**-993), -63) self.assertEqual(exponent_mapping.map_to_index(2**-992), -63) self.assertEqual(exponent_mapping.map_to_index(2**-991), -62) self.assertEqual(exponent_mapping.map_to_index(2**-977), -62) self.assertEqual(exponent_mapping.map_to_index(2**-976), -62) self.assertEqual(exponent_mapping.map_to_index(2**-975), -61) def test_exponent_index_max(self): for scale in range( ExponentMapping._min_scale, ExponentMapping._max_scale ): exponent_mapping = ExponentMapping(scale) index = exponent_mapping.map_to_index(MAX_NORMAL_VALUE) max_index = ((MAX_NORMAL_EXPONENT + 1) >> -scale) - 1 self.assertEqual(index, max_index) boundary = exponent_mapping.get_lower_boundary(index) self.assertEqual(boundary, right_boundary(scale, max_index)) with self.assertRaises(Exception): exponent_mapping.get_lower_boundary(index + 1) def test_exponent_index_min(self): for scale in range( ExponentMapping._min_scale, ExponentMapping._max_scale + 1 ): exponent_mapping = ExponentMapping(scale) min_index = exponent_mapping.map_to_index(MIN_NORMAL_VALUE) boundary = exponent_mapping.get_lower_boundary(min_index) correct_min_index = MIN_NORMAL_EXPONENT >> -scale if MIN_NORMAL_EXPONENT % (1 << -scale) == 0: correct_min_index -= 1 # We do not check for correct_min_index to be greater than the # smallest integer because the smallest integer in Python is -inf. self.assertEqual(correct_min_index, min_index) correct_boundary = right_boundary(scale, correct_min_index) self.assertEqual(correct_boundary, boundary) self.assertGreater( right_boundary(scale, correct_min_index + 1), boundary ) self.assertEqual( correct_min_index, exponent_mapping.map_to_index(MIN_NORMAL_VALUE / 2), ) self.assertEqual( correct_min_index, exponent_mapping.map_to_index(MIN_NORMAL_VALUE / 3), ) self.assertEqual( correct_min_index, exponent_mapping.map_to_index(MIN_NORMAL_VALUE / 100), ) self.assertEqual( correct_min_index, exponent_mapping.map_to_index(2**-1050) ) self.assertEqual( correct_min_index, exponent_mapping.map_to_index(2**-1073) ) self.assertEqual( correct_min_index, exponent_mapping.map_to_index(1.1 * (2**-1073)), ) self.assertEqual( correct_min_index, exponent_mapping.map_to_index(2**-1074) ) with self.assertRaises(MappingUnderflowError): exponent_mapping.get_lower_boundary(min_index - 1) self.assertEqual( exponent_mapping.map_to_index( nextafter( # pylint: disable=possibly-used-before-assignment MIN_NORMAL_VALUE, inf ) ), MIN_NORMAL_EXPONENT >> -scale, ) test_exponential_bucket_histogram_aggregation.py000066400000000000000000001307101511654350100425520ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-sdk/tests/metrics/exponential_histogram# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # pylint: disable=protected-access,too-many-lines,invalid-name # pylint: disable=consider-using-enumerate,no-self-use,too-many-public-methods from inspect import currentframe from itertools import permutations from logging import WARNING from math import ldexp from random import Random, randrange from sys import float_info, maxsize from time import time_ns from types import MethodType from unittest.mock import Mock, patch from opentelemetry.context import Context from opentelemetry.sdk.metrics._internal.aggregation import ( AggregationTemporality, _ExponentialBucketHistogramAggregation, ) from opentelemetry.sdk.metrics._internal.exponential_histogram.buckets import ( Buckets, ) from opentelemetry.sdk.metrics._internal.exponential_histogram.mapping.exponent_mapping import ( ExponentMapping, ) from opentelemetry.sdk.metrics._internal.exponential_histogram.mapping.ieee_754 import ( MAX_NORMAL_EXPONENT, MIN_NORMAL_EXPONENT, ) from opentelemetry.sdk.metrics._internal.exponential_histogram.mapping.logarithm_mapping import ( LogarithmMapping, ) from opentelemetry.sdk.metrics._internal.measurement import Measurement from opentelemetry.sdk.metrics._internal.point import ( ExponentialHistogramDataPoint, ) from opentelemetry.sdk.metrics._internal.view import _default_reservoir_factory from opentelemetry.sdk.metrics.view import ( ExponentialBucketHistogramAggregation, ) from opentelemetry.test import TestCase def get_counts(buckets: Buckets) -> int: counts = [] for index in range(len(buckets)): counts.append(buckets[index]) return counts def center_val(mapping: ExponentMapping, index: int) -> float: return ( mapping.get_lower_boundary(index) + mapping.get_lower_boundary(index + 1) ) / 2 def swap( first: _ExponentialBucketHistogramAggregation, second: _ExponentialBucketHistogramAggregation, ): for attribute in [ "_value_positive", "_value_negative", "_sum", "_count", "_zero_count", "_min", "_max", "_mapping", ]: temp = getattr(first, attribute) setattr(first, attribute, getattr(second, attribute)) setattr(second, attribute, temp) class TestExponentialBucketHistogramAggregation(TestCase): @patch("opentelemetry.sdk.metrics._internal.aggregation.LogarithmMapping") def test_create_aggregation(self, mock_logarithm_mapping): exponential_bucket_histogram_aggregation = ( ExponentialBucketHistogramAggregation() )._create_aggregation(Mock(), Mock(), Mock(), Mock()) self.assertEqual( exponential_bucket_histogram_aggregation._max_scale, 20 ) mock_logarithm_mapping.assert_called_with(20) exponential_bucket_histogram_aggregation = ( ExponentialBucketHistogramAggregation(max_scale=10) )._create_aggregation(Mock(), Mock(), Mock(), Mock()) self.assertEqual( exponential_bucket_histogram_aggregation._max_scale, 10 ) mock_logarithm_mapping.assert_called_with(10) with self.assertLogs(level=WARNING): exponential_bucket_histogram_aggregation = ( ExponentialBucketHistogramAggregation(max_scale=100) )._create_aggregation(Mock(), Mock(), Mock(), Mock()) self.assertEqual( exponential_bucket_histogram_aggregation._max_scale, 100 ) mock_logarithm_mapping.assert_called_with(100) def assertInEpsilon(self, first, second, epsilon): self.assertLessEqual(first, (second * (1 + epsilon))) self.assertGreaterEqual(first, (second * (1 - epsilon))) def require_equal(self, a, b): if a._sum == 0 or b._sum == 0: self.assertAlmostEqual(a._sum, b._sum, 1e-6) else: self.assertInEpsilon(a._sum, b._sum, 1e-6) self.assertEqual(a._count, b._count) self.assertEqual(a._zero_count, b._zero_count) self.assertEqual(a._mapping.scale, b._mapping.scale) self.assertEqual(len(a._value_positive), len(b._value_positive)) self.assertEqual(len(a._value_negative), len(b._value_negative)) for index in range(len(a._value_positive)): self.assertEqual( a._value_positive[index], b._value_positive[index] ) for index in range(len(a._value_negative)): self.assertEqual( a._value_negative[index], b._value_negative[index] ) def test_alternating_growth_0(self): """ Tests insertion of [2, 4, 1]. The index of 2 (i.e., 0) becomes `indexBase`, the 4 goes to its right and the 1 goes in the last position of the backing array. With 3 binary orders of magnitude and MaxSize=4, this must finish with scale=0; with minimum value 1 this must finish with offset=-1 (all scales). """ # The corresponding Go test is TestAlternatingGrowth1 where: # agg := NewFloat64(NewConfig(WithMaxSize(4))) # agg is an instance of github.com/lightstep/otel-launcher-go/lightstep/sdk/metric/aggregator/histogram/structure.Histogram[float64] exponential_histogram_aggregation = ( _ExponentialBucketHistogramAggregation( Mock(), _default_reservoir_factory( _ExponentialBucketHistogramAggregation ), AggregationTemporality.DELTA, Mock(), max_size=4, ) ) now = time_ns() ctx = Context() exponential_histogram_aggregation.aggregate( Measurement(2, now, Mock(), ctx) ) exponential_histogram_aggregation.aggregate( Measurement(4, now, Mock(), ctx) ) exponential_histogram_aggregation.aggregate( Measurement(1, now, Mock(), ctx) ) self.assertEqual( exponential_histogram_aggregation._value_positive.offset, -1 ) self.assertEqual(exponential_histogram_aggregation._mapping.scale, 0) self.assertEqual( get_counts(exponential_histogram_aggregation._value_positive), [1, 1, 1], ) def test_alternating_growth_1(self): """ Tests insertion of [2, 2, 4, 1, 8, 0.5]. The test proceeds as¶ above but then downscales once further to scale=-1, thus index -1¶ holds range [0.25, 1.0), index 0 holds range [1.0, 4), index 1¶ holds range [4, 16).¶ """ exponential_histogram_aggregation = ( _ExponentialBucketHistogramAggregation( Mock(), _default_reservoir_factory( _ExponentialBucketHistogramAggregation ), AggregationTemporality.DELTA, Mock(), max_size=4, ) ) now = time_ns() ctx = Context() exponential_histogram_aggregation.aggregate( Measurement(2, now, Mock(), ctx) ) exponential_histogram_aggregation.aggregate( Measurement(2, now, Mock(), ctx) ) exponential_histogram_aggregation.aggregate( Measurement(2, now, Mock(), ctx) ) exponential_histogram_aggregation.aggregate( Measurement(1, now, Mock(), ctx) ) exponential_histogram_aggregation.aggregate( Measurement(8, now, Mock(), ctx) ) exponential_histogram_aggregation.aggregate( Measurement(0.5, now, Mock(), ctx) ) self.assertEqual( exponential_histogram_aggregation._value_positive.offset, -1 ) self.assertEqual(exponential_histogram_aggregation._mapping.scale, -1) self.assertEqual( get_counts(exponential_histogram_aggregation._value_positive), [2, 3, 1], ) def test_permutations(self): """ Tests that every permutation of certain sequences with maxSize=2 results in the same scale=-1 histogram. """ now = time_ns() ctx = Context() for test_values, expected in [ [ [0.5, 1.0, 2.0], { "scale": -1, "offset": -1, "len": 2, "at_0": 2, "at_1": 1, }, ], [ [1.0, 2.0, 4.0], { "scale": -1, "offset": -1, "len": 2, "at_0": 1, "at_1": 2, }, ], [ [0.25, 0.5, 1], { "scale": -1, "offset": -2, "len": 2, "at_0": 1, "at_1": 2, }, ], ]: for permutation in permutations(test_values): exponential_histogram_aggregation = ( _ExponentialBucketHistogramAggregation( Mock(), _default_reservoir_factory( _ExponentialBucketHistogramAggregation ), AggregationTemporality.DELTA, Mock(), max_size=2, ) ) for value in permutation: exponential_histogram_aggregation.aggregate( Measurement(value, now, Mock(), ctx) ) self.assertEqual( exponential_histogram_aggregation._mapping.scale, expected["scale"], ) self.assertEqual( exponential_histogram_aggregation._value_positive.offset, expected["offset"], ) self.assertEqual( len(exponential_histogram_aggregation._value_positive), expected["len"], ) self.assertEqual( exponential_histogram_aggregation._value_positive[0], expected["at_0"], ) self.assertEqual( exponential_histogram_aggregation._value_positive[1], expected["at_1"], ) def test_ascending_sequence(self): for max_size in [3, 4, 6, 9]: for offset in range(-5, 6): for init_scale in [0, 4]: self.ascending_sequence_test(max_size, offset, init_scale) # pylint: disable=too-many-locals def ascending_sequence_test( self, max_size: int, offset: int, init_scale: int ): now = time_ns() ctx = Context() for step in range(max_size, max_size * 4): exponential_histogram_aggregation = ( _ExponentialBucketHistogramAggregation( Mock(), _default_reservoir_factory( _ExponentialBucketHistogramAggregation ), AggregationTemporality.DELTA, Mock(), max_size=max_size, ) ) if init_scale <= 0: mapping = ExponentMapping(init_scale) else: mapping = LogarithmMapping(init_scale) min_val = center_val(mapping, offset) max_val = center_val(mapping, offset + step) sum_ = 0.0 for index in range(max_size): value = center_val(mapping, offset + index) exponential_histogram_aggregation.aggregate( Measurement(value, now, Mock(), ctx) ) sum_ += value self.assertEqual( init_scale, exponential_histogram_aggregation._mapping._scale ) self.assertEqual( offset, exponential_histogram_aggregation._value_positive.offset, ) exponential_histogram_aggregation.aggregate( Measurement(max_val, now, Mock(), ctx) ) sum_ += max_val self.assertNotEqual( 0, exponential_histogram_aggregation._value_positive[0] ) # The maximum-index filled bucket is at or # above the mid-point, (otherwise we # downscaled too much). max_fill = 0 total_count = 0 for index in range( len(exponential_histogram_aggregation._value_positive) ): total_count += ( exponential_histogram_aggregation._value_positive[index] ) if ( exponential_histogram_aggregation._value_positive[index] != 0 ): max_fill = index # FIXME the corresponding Go code is # require.GreaterOrEqual(t, maxFill, uint32(maxSize)/2), make sure # this is actually equivalent. self.assertGreaterEqual(max_fill, int(max_size / 2)) self.assertGreaterEqual(max_size + 1, total_count) self.assertGreaterEqual( max_size + 1, exponential_histogram_aggregation._count ) self.assertGreaterEqual( sum_, exponential_histogram_aggregation._sum ) if init_scale <= 0: mapping = ExponentMapping( exponential_histogram_aggregation._mapping.scale ) else: mapping = LogarithmMapping( exponential_histogram_aggregation._mapping.scale ) index = mapping.map_to_index(min_val) self.assertEqual( index, exponential_histogram_aggregation._value_positive.offset ) index = mapping.map_to_index(max_val) self.assertEqual( index, exponential_histogram_aggregation._value_positive.offset + len(exponential_histogram_aggregation._value_positive) - 1, ) def test_reset(self): now = time_ns() ctx = Context() for increment in [0x1, 0x100, 0x10000, 0x100000000, 0x200000000]: def mock_increment(self, bucket_index: int) -> None: """ Increments a bucket """ # pylint: disable=cell-var-from-loop self._counts[bucket_index] += increment exponential_histogram_aggregation = ( _ExponentialBucketHistogramAggregation( Mock(), _default_reservoir_factory( _ExponentialBucketHistogramAggregation ), AggregationTemporality.DELTA, Mock(), max_size=256, ) ) self.assertEqual( exponential_histogram_aggregation._count, exponential_histogram_aggregation._zero_count, ) self.assertEqual(0, exponential_histogram_aggregation._sum) expect = 0 exponential_histogram_aggregation._value_positive = Buckets() for value in range(2, 257): expect += value * increment with patch.object( exponential_histogram_aggregation._value_positive, "increment_bucket", MethodType( mock_increment, exponential_histogram_aggregation._value_positive, ), ): exponential_histogram_aggregation.aggregate( Measurement(value, now, Mock(), ctx) ) exponential_histogram_aggregation._count *= increment exponential_histogram_aggregation._sum *= increment self.assertEqual(expect, exponential_histogram_aggregation._sum) self.assertEqual( 255 * increment, exponential_histogram_aggregation._count ) # See test_integer_aggregation about why scale is 5, len is # 256 - (1 << scale)- 1 and offset is (1 << scale) - 1. scale = exponential_histogram_aggregation._mapping.scale self.assertEqual(5, scale) self.assertEqual( 256 - ((1 << scale) - 1), len(exponential_histogram_aggregation._value_positive), ) self.assertEqual( (1 << scale) - 1, exponential_histogram_aggregation._value_positive.offset, ) for index in range(0, 256): self.assertLessEqual( exponential_histogram_aggregation._value_positive[index], 6 * increment, ) def test_move_into(self): now = time_ns() ctx = Context() exponential_histogram_aggregation_0 = ( _ExponentialBucketHistogramAggregation( Mock(), _default_reservoir_factory( _ExponentialBucketHistogramAggregation ), AggregationTemporality.DELTA, Mock(), max_size=256, ) ) exponential_histogram_aggregation_1 = ( _ExponentialBucketHistogramAggregation( Mock(), _default_reservoir_factory( _ExponentialBucketHistogramAggregation ), AggregationTemporality.DELTA, Mock(), max_size=256, ) ) expect = 0 for index in range(2, 257): expect += index exponential_histogram_aggregation_0.aggregate( Measurement(index, now, Mock(), ctx) ) exponential_histogram_aggregation_0.aggregate( Measurement(0, now, Mock(), ctx) ) swap( exponential_histogram_aggregation_0, exponential_histogram_aggregation_1, ) self.assertEqual(0, exponential_histogram_aggregation_0._sum) self.assertEqual(0, exponential_histogram_aggregation_0._count) self.assertEqual(0, exponential_histogram_aggregation_0._zero_count) self.assertEqual(expect, exponential_histogram_aggregation_1._sum) self.assertEqual(255 * 2, exponential_histogram_aggregation_1._count) self.assertEqual(255, exponential_histogram_aggregation_1._zero_count) scale = exponential_histogram_aggregation_1._mapping.scale self.assertEqual(5, scale) self.assertEqual( 256 - ((1 << scale) - 1), len(exponential_histogram_aggregation_1._value_positive), ) self.assertEqual( (1 << scale) - 1, exponential_histogram_aggregation_1._value_positive.offset, ) for index in range(0, 256): self.assertLessEqual( exponential_histogram_aggregation_1._value_positive[index], 6 ) def test_very_large_numbers(self): now = time_ns() ctx = Context() exponential_histogram_aggregation = ( _ExponentialBucketHistogramAggregation( Mock(), _default_reservoir_factory( _ExponentialBucketHistogramAggregation ), AggregationTemporality.DELTA, Mock(), max_size=2, ) ) def expect_balanced(count: int): self.assertEqual( 2, len(exponential_histogram_aggregation._value_positive) ) self.assertEqual( -1, exponential_histogram_aggregation._value_positive.offset ) self.assertEqual( count, exponential_histogram_aggregation._value_positive[0] ) self.assertEqual( count, exponential_histogram_aggregation._value_positive[1] ) exponential_histogram_aggregation.aggregate( Measurement(2**-100, now, Mock(), ctx) ) exponential_histogram_aggregation.aggregate( Measurement(2**100, now, Mock(), ctx) ) self.assertLessEqual( 2**100, (exponential_histogram_aggregation._sum * (1 + 1e-5)) ) self.assertGreaterEqual( 2**100, (exponential_histogram_aggregation._sum * (1 - 1e-5)) ) self.assertEqual(2, exponential_histogram_aggregation._count) self.assertEqual(-7, exponential_histogram_aggregation._mapping.scale) expect_balanced(1) exponential_histogram_aggregation.aggregate( Measurement(2**-127, now, Mock(), ctx) ) exponential_histogram_aggregation.aggregate( Measurement(2**128, now, Mock(), ctx) ) self.assertLessEqual( 2**128, (exponential_histogram_aggregation._sum * (1 + 1e-5)) ) self.assertGreaterEqual( 2**128, (exponential_histogram_aggregation._sum * (1 - 1e-5)) ) self.assertEqual(4, exponential_histogram_aggregation._count) self.assertEqual(-7, exponential_histogram_aggregation._mapping.scale) expect_balanced(2) exponential_histogram_aggregation.aggregate( Measurement(2**-129, now, Mock(), ctx) ) exponential_histogram_aggregation.aggregate( Measurement(2**255, now, Mock(), ctx) ) self.assertLessEqual( 2**255, (exponential_histogram_aggregation._sum * (1 + 1e-5)) ) self.assertGreaterEqual( 2**255, (exponential_histogram_aggregation._sum * (1 - 1e-5)) ) self.assertEqual(6, exponential_histogram_aggregation._count) self.assertEqual(-8, exponential_histogram_aggregation._mapping.scale) expect_balanced(3) def test_full_range(self): now = time_ns() ctx = Context() exponential_histogram_aggregation = ( _ExponentialBucketHistogramAggregation( Mock(), _default_reservoir_factory( _ExponentialBucketHistogramAggregation ), AggregationTemporality.DELTA, Mock(), max_size=2, ) ) exponential_histogram_aggregation.aggregate( Measurement(float_info.max, now, Mock(), ctx) ) exponential_histogram_aggregation.aggregate( Measurement(1, now, Mock(), ctx) ) exponential_histogram_aggregation.aggregate( Measurement(2**-1074, now, Mock(), ctx) ) self.assertEqual( float_info.max, exponential_histogram_aggregation._sum ) self.assertEqual(3, exponential_histogram_aggregation._count) self.assertEqual( ExponentMapping._min_scale, exponential_histogram_aggregation._mapping.scale, ) self.assertEqual( _ExponentialBucketHistogramAggregation._min_max_size, len(exponential_histogram_aggregation._value_positive), ) self.assertEqual( -1, exponential_histogram_aggregation._value_positive.offset ) self.assertLessEqual( exponential_histogram_aggregation._value_positive[0], 2 ) self.assertLessEqual( exponential_histogram_aggregation._value_positive[1], 1 ) def test_aggregator_min_max(self): now = time_ns() ctx = Context() exponential_histogram_aggregation = ( _ExponentialBucketHistogramAggregation( Mock(), _default_reservoir_factory( _ExponentialBucketHistogramAggregation ), AggregationTemporality.DELTA, Mock(), ) ) for value in [1, 3, 5, 7, 9]: exponential_histogram_aggregation.aggregate( Measurement(value, now, Mock(), ctx) ) self.assertEqual(1, exponential_histogram_aggregation._min) self.assertEqual(9, exponential_histogram_aggregation._max) exponential_histogram_aggregation = ( _ExponentialBucketHistogramAggregation( Mock(), _default_reservoir_factory( _ExponentialBucketHistogramAggregation ), AggregationTemporality.DELTA, Mock(), ) ) for value in [-1, -3, -5, -7, -9]: exponential_histogram_aggregation.aggregate( Measurement(value, now, Mock(), ctx) ) self.assertEqual(-9, exponential_histogram_aggregation._min) self.assertEqual(-1, exponential_histogram_aggregation._max) def test_aggregator_copy_swap(self): now = time_ns() ctx = Context() exponential_histogram_aggregation_0 = ( _ExponentialBucketHistogramAggregation( Mock(), _default_reservoir_factory( _ExponentialBucketHistogramAggregation ), AggregationTemporality.DELTA, Mock(), ) ) for value in [1, 3, 5, 7, 9, -1, -3, -5]: exponential_histogram_aggregation_0.aggregate( Measurement(value, now, Mock(), ctx) ) exponential_histogram_aggregation_1 = ( _ExponentialBucketHistogramAggregation( Mock(), _default_reservoir_factory( _ExponentialBucketHistogramAggregation ), AggregationTemporality.DELTA, Mock(), ) ) for value in [5, 4, 3, 2]: exponential_histogram_aggregation_1.aggregate( Measurement(value, now, Mock(), ctx) ) exponential_histogram_aggregation_2 = ( _ExponentialBucketHistogramAggregation( Mock(), _default_reservoir_factory( _ExponentialBucketHistogramAggregation ), AggregationTemporality.DELTA, Mock(), ) ) swap( exponential_histogram_aggregation_0, exponential_histogram_aggregation_1, ) # pylint: disable=unnecessary-dunder-call exponential_histogram_aggregation_2._value_positive.__init__() exponential_histogram_aggregation_2._value_negative.__init__() exponential_histogram_aggregation_2._sum = 0 exponential_histogram_aggregation_2._count = 0 exponential_histogram_aggregation_2._zero_count = 0 exponential_histogram_aggregation_2._min = 0 exponential_histogram_aggregation_2._max = 0 exponential_histogram_aggregation_2._mapping = LogarithmMapping( LogarithmMapping._max_scale ) for attribute in [ "_value_positive", "_value_negative", "_sum", "_count", "_zero_count", "_min", "_max", "_mapping", ]: setattr( exponential_histogram_aggregation_2, attribute, getattr(exponential_histogram_aggregation_1, attribute), ) self.require_equal( exponential_histogram_aggregation_1, exponential_histogram_aggregation_2, ) def test_zero_count_by_increment(self): now = time_ns() ctx = Context() exponential_histogram_aggregation_0 = ( _ExponentialBucketHistogramAggregation( Mock(), _default_reservoir_factory( _ExponentialBucketHistogramAggregation ), AggregationTemporality.DELTA, Mock(), ) ) increment = 10 for _ in range(increment): exponential_histogram_aggregation_0.aggregate( Measurement(0, now, Mock(), ctx) ) exponential_histogram_aggregation_1 = ( _ExponentialBucketHistogramAggregation( Mock(), _default_reservoir_factory( _ExponentialBucketHistogramAggregation ), AggregationTemporality.DELTA, Mock(), ) ) def mock_increment(self, bucket_index: int) -> None: """ Increments a bucket """ self._counts[bucket_index] += increment exponential_histogram_aggregation_1._value_positive = Buckets() with patch.object( exponential_histogram_aggregation_1._value_positive, "increment_bucket", MethodType( mock_increment, exponential_histogram_aggregation_1._value_positive, ), ): exponential_histogram_aggregation_1.aggregate( Measurement(0, now, Mock(), ctx) ) exponential_histogram_aggregation_1._count *= increment exponential_histogram_aggregation_1._zero_count *= increment self.require_equal( exponential_histogram_aggregation_0, exponential_histogram_aggregation_1, ) def test_one_count_by_increment(self): now = time_ns() ctx = Context() exponential_histogram_aggregation_0 = ( _ExponentialBucketHistogramAggregation( Mock(), _default_reservoir_factory( _ExponentialBucketHistogramAggregation ), AggregationTemporality.DELTA, Mock(), ) ) increment = 10 for _ in range(increment): exponential_histogram_aggregation_0.aggregate( Measurement(1, now, Mock(), ctx) ) exponential_histogram_aggregation_1 = ( _ExponentialBucketHistogramAggregation( Mock(), _default_reservoir_factory( _ExponentialBucketHistogramAggregation ), AggregationTemporality.DELTA, Mock(), ) ) def mock_increment(self, bucket_index: int) -> None: """ Increments a bucket """ self._counts[bucket_index] += increment exponential_histogram_aggregation_1._value_positive = Buckets() with patch.object( exponential_histogram_aggregation_1._value_positive, "increment_bucket", MethodType( mock_increment, exponential_histogram_aggregation_1._value_positive, ), ): exponential_histogram_aggregation_1.aggregate( Measurement(1, now, Mock(), ctx) ) exponential_histogram_aggregation_1._count *= increment exponential_histogram_aggregation_1._sum *= increment self.require_equal( exponential_histogram_aggregation_0, exponential_histogram_aggregation_1, ) def test_boundary_statistics(self): total = MAX_NORMAL_EXPONENT - MIN_NORMAL_EXPONENT + 1 for scale in range( LogarithmMapping._min_scale, LogarithmMapping._max_scale + 1 ): above = 0 below = 0 if scale <= 0: mapping = ExponentMapping(scale) else: mapping = LogarithmMapping(scale) for exp in range(MIN_NORMAL_EXPONENT, MAX_NORMAL_EXPONENT + 1): value = ldexp(1, exp) index = mapping.map_to_index(value) with self.assertNotRaises(Exception): boundary = mapping.get_lower_boundary(index + 1) if boundary < value: above += 1 elif boundary > value: below += 1 self.assertInEpsilon(0.5, above / total, 0.05) self.assertInEpsilon(0.5, below / total, 0.06) def test_min_max_size(self): """ Tests that the minimum max_size is the right value. """ exponential_histogram_aggregation = ( _ExponentialBucketHistogramAggregation( Mock(), _default_reservoir_factory( _ExponentialBucketHistogramAggregation ), AggregationTemporality.DELTA, Mock(), max_size=_ExponentialBucketHistogramAggregation._min_max_size, ) ) # The minimum and maximum normal floating point values are used here to # make sure the mapping can contain the full range of values. exponential_histogram_aggregation.aggregate(Mock(value=float_info.min)) exponential_histogram_aggregation.aggregate(Mock(value=float_info.max)) # This means the smallest max_scale is enough for the full range of the # normal floating point values. self.assertEqual( len(exponential_histogram_aggregation._value_positive._counts), exponential_histogram_aggregation._min_max_size, ) def test_aggregate_collect(self): """ Tests a repeated cycle of aggregation and collection. """ now = time_ns() ctx = Context() exponential_histogram_aggregation = ( _ExponentialBucketHistogramAggregation( Mock(), _default_reservoir_factory( _ExponentialBucketHistogramAggregation ), AggregationTemporality.DELTA, Mock(), ) ) exponential_histogram_aggregation.aggregate( Measurement(2, now, Mock(), ctx) ) exponential_histogram_aggregation.collect( AggregationTemporality.CUMULATIVE, 0 ) exponential_histogram_aggregation.aggregate( Measurement(2, now, Mock(), ctx) ) exponential_histogram_aggregation.collect( AggregationTemporality.CUMULATIVE, 0 ) exponential_histogram_aggregation.aggregate( Measurement(2, now, Mock(), ctx) ) exponential_histogram_aggregation.collect( AggregationTemporality.CUMULATIVE, 0 ) def test_collect_results_cumulative(self) -> None: now = time_ns() ctx = Context() exponential_histogram_aggregation = ( _ExponentialBucketHistogramAggregation( Mock(), _default_reservoir_factory( _ExponentialBucketHistogramAggregation ), AggregationTemporality.DELTA, Mock(), ) ) self.maxDiff = None self.assertEqual(exponential_histogram_aggregation._mapping._scale, 20) exponential_histogram_aggregation.aggregate( Measurement(2, now, Mock(), ctx) ) self.assertEqual(exponential_histogram_aggregation._mapping._scale, 20) exponential_histogram_aggregation.aggregate( Measurement(4, now, Mock(), ctx) ) self.assertEqual(exponential_histogram_aggregation._mapping._scale, 7) exponential_histogram_aggregation.aggregate( Measurement(1, now, Mock(), ctx) ) self.assertEqual(exponential_histogram_aggregation._mapping._scale, 6) collection_0 = exponential_histogram_aggregation.collect( AggregationTemporality.CUMULATIVE, Mock() ) self.assertEqual(len(collection_0.positive.bucket_counts), 160) self.assertEqual(collection_0.count, 3) self.assertEqual(collection_0.sum, 7) self.assertEqual(collection_0.scale, 6) self.assertEqual(collection_0.zero_count, 0) self.assertEqual( collection_0.positive.bucket_counts, [1, *[0] * 63, 1, *[0] * 63, 1, *[0] * 31], ) self.assertEqual(collection_0.flags, 0) self.assertEqual(collection_0.min, 1) self.assertEqual(collection_0.max, 4) exponential_histogram_aggregation.aggregate( Measurement(1, now, Mock(), ctx) ) exponential_histogram_aggregation.aggregate( Measurement(8, now, Mock(), ctx) ) exponential_histogram_aggregation.aggregate( Measurement(0.5, now, Mock(), ctx) ) exponential_histogram_aggregation.aggregate( Measurement(0.1, now, Mock(), ctx) ) exponential_histogram_aggregation.aggregate( Measurement(0.045, now, Mock(), ctx) ) collection_1 = exponential_histogram_aggregation.collect( AggregationTemporality.CUMULATIVE, Mock() ) previous_count = collection_1.positive.bucket_counts[0] count_counts = [[previous_count, 0]] for count in collection_1.positive.bucket_counts: if count == previous_count: count_counts[-1][1] += 1 else: previous_count = count count_counts.append([previous_count, 1]) self.assertEqual(collection_1.count, 8) self.assertEqual(collection_1.sum, 16.645) self.assertEqual(collection_1.scale, 4) self.assertEqual(collection_1.zero_count, 0) self.assertEqual( collection_1.positive.bucket_counts, [ 1, *[0] * 17, 1, *[0] * 36, 1, *[0] * 15, 2, *[0] * 15, 1, *[0] * 15, 1, *[0] * 15, 1, *[0] * 40, ], ) self.assertEqual(collection_1.flags, 0) self.assertEqual(collection_1.min, 0.045) self.assertEqual(collection_1.max, 8) def test_cumulative_aggregation_with_random_data(self) -> None: now = time_ns() ctx = Context() histogram = _ExponentialBucketHistogramAggregation( Mock(), _default_reservoir_factory(_ExponentialBucketHistogramAggregation), AggregationTemporality.DELTA, Mock(), ) def collect_and_validate(values, histogram) -> None: result: ExponentialHistogramDataPoint = histogram.collect( AggregationTemporality.CUMULATIVE, 0 ) buckets = result.positive.bucket_counts scale = result.scale index_start = result.positive.offset for i in range(len(buckets)): index = index_start + i count = buckets[i] lower_bound = 2 ** (index / (2**scale)) upper_bound = 2 ** ((index + 1) / (2**scale)) matches = 0 for value in values: # pylint: disable=chained-comparison if value > lower_bound and value <= upper_bound: matches += 1 assert matches == count, ( f"index: {index}, count: {count}, scale: {scale}, lower_bound: {lower_bound}, upper_bound: {upper_bound}, matches: {matches}" ) assert sum(buckets) + result.zero_count == len(values) assert result.sum == sum(values) assert result.count == len(values) assert result.min == min(values) assert result.max == max(values) assert result.zero_count == len([v for v in values if v == 0]) assert scale >= 3 seed = randrange(maxsize) # This test case is executed with random values every time. In order to # run this test case with the same values used in a previous execution, # check the value printed by that previous execution of this test case # and use the same value for the seed variable in the line below. # seed = 3373389994391084876 random_generator = Random(seed) print(f"seed for {currentframe().f_code.co_name} is {seed}") values = [] for i in range(2000): # avoid both values being 0 value = random_generator.randint(0 if i else 1, 1000) values.append(value) histogram.aggregate(Measurement(value, now, Mock(), ctx)) if i % 20 == 0: collect_and_validate(values, histogram) collect_and_validate(values, histogram) def test_merge_collect_cumulative(self): now = time_ns() ctx = Context() exponential_histogram_aggregation = ( _ExponentialBucketHistogramAggregation( Mock(), _default_reservoir_factory( _ExponentialBucketHistogramAggregation ), AggregationTemporality.DELTA, Mock(), max_size=4, ) ) for value in [2, 4, 8, 16]: exponential_histogram_aggregation.aggregate( Measurement(value, now, Mock(), ctx) ) self.assertEqual(exponential_histogram_aggregation._mapping.scale, 0) self.assertEqual( exponential_histogram_aggregation._value_positive.offset, 0 ) self.assertEqual( exponential_histogram_aggregation._value_positive.counts, [1, 1, 1, 1], ) result_0 = exponential_histogram_aggregation.collect( AggregationTemporality.CUMULATIVE, 0, ) self.assertEqual(result_0.scale, 0) for value in [1, 2, 4, 8]: exponential_histogram_aggregation.aggregate( Measurement(1 / value, now, Mock(), ctx) ) self.assertEqual(exponential_histogram_aggregation._mapping.scale, 0) self.assertEqual( exponential_histogram_aggregation._value_positive.offset, -4 ) self.assertEqual( exponential_histogram_aggregation._value_positive.counts, [1, 1, 1, 1], ) result_1 = exponential_histogram_aggregation.collect( AggregationTemporality.CUMULATIVE, 0, ) self.assertEqual(result_1.scale, -1) def test_merge_collect_delta(self): now = time_ns() ctx = Context() exponential_histogram_aggregation = ( _ExponentialBucketHistogramAggregation( Mock(), _default_reservoir_factory( _ExponentialBucketHistogramAggregation ), AggregationTemporality.DELTA, Mock(), max_size=4, ) ) for value in [2, 4, 8, 16]: exponential_histogram_aggregation.aggregate( Measurement(value, now, Mock(), ctx) ) self.assertEqual(exponential_histogram_aggregation._mapping.scale, 0) self.assertEqual( exponential_histogram_aggregation._value_positive.offset, 0 ) self.assertEqual( exponential_histogram_aggregation._value_positive.counts, [1, 1, 1, 1], ) result = exponential_histogram_aggregation.collect( AggregationTemporality.DELTA, 0, ) for value in [1, 2, 4, 8]: exponential_histogram_aggregation.aggregate( Measurement(1 / value, now, Mock(), ctx) ) self.assertEqual(exponential_histogram_aggregation._mapping.scale, 0) self.assertEqual( exponential_histogram_aggregation._value_positive.offset, -4 ) self.assertEqual( exponential_histogram_aggregation._value_positive.counts, [1, 1, 1, 1], ) result_1 = exponential_histogram_aggregation.collect( AggregationTemporality.DELTA, 0, ) self.assertEqual(result.scale, result_1.scale) test_logarithm_mapping.py000066400000000000000000000205241511654350100357450ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-sdk/tests/metrics/exponential_histogram# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # pylint: disable=protected-access from math import sqrt from unittest import TestCase from unittest.mock import patch from opentelemetry.sdk.metrics._internal.exponential_histogram.mapping.errors import ( MappingOverflowError, MappingUnderflowError, ) from opentelemetry.sdk.metrics._internal.exponential_histogram.mapping.ieee_754 import ( MAX_NORMAL_EXPONENT, MAX_NORMAL_VALUE, MIN_NORMAL_EXPONENT, MIN_NORMAL_VALUE, ) from opentelemetry.sdk.metrics._internal.exponential_histogram.mapping.logarithm_mapping import ( LogarithmMapping, ) def left_boundary(scale: int, index: int) -> float: # This is implemented in this way to avoid using a third-party bigfloat # package. The Go implementation uses a bigfloat package that is part of # their standard library. The assumption here is that the smallest float # available in Python is 2 ** -1022 (from sys.float_info.min). while scale > 0: if index < -1022: index /= 2 scale -= 1 else: break result = 2**index for _ in range(scale, 0, -1): result = sqrt(result) return result class TestLogarithmMapping(TestCase): # pylint: disable=invalid-name def assertInEpsilon(self, first, second, epsilon): self.assertLessEqual(first, (second * (1 + epsilon))) self.assertGreaterEqual(first, (second * (1 - epsilon))) @patch( "opentelemetry.sdk.metrics._internal.exponential_histogram.mapping." "logarithm_mapping.LogarithmMapping._mappings", new={}, ) @patch( "opentelemetry.sdk.metrics._internal.exponential_histogram.mapping." "logarithm_mapping.LogarithmMapping._init" ) def test_init_called_once(self, mock_init): # pylint: disable=no-self-use LogarithmMapping(3) LogarithmMapping(3) mock_init.assert_called_once() def test_invalid_scale(self): with self.assertRaises(Exception): LogarithmMapping(-1) def test_logarithm_mapping_scale_one(self): # The exponentiation factor for this logarithm exponent histogram # mapping is square_root(2). # Scale 1 means 1 division between every power of two, having # a factor sqare_root(2) times the lower boundary. logarithm_exponent_histogram_mapping = LogarithmMapping(1) self.assertEqual(logarithm_exponent_histogram_mapping.scale, 1) # Note: Do not test exact boundaries, with the exception of # 1, because we expect errors in that case (e.g., # MapToIndex(8) returns 5, an off-by-one. See the following # test. self.assertEqual( logarithm_exponent_histogram_mapping.map_to_index(15), 7 ) self.assertEqual( logarithm_exponent_histogram_mapping.map_to_index(9), 6 ) self.assertEqual( logarithm_exponent_histogram_mapping.map_to_index(7), 5 ) self.assertEqual( logarithm_exponent_histogram_mapping.map_to_index(5), 4 ) self.assertEqual( logarithm_exponent_histogram_mapping.map_to_index(3), 3 ) self.assertEqual( logarithm_exponent_histogram_mapping.map_to_index(2.5), 2 ) self.assertEqual( logarithm_exponent_histogram_mapping.map_to_index(1.5), 1 ) self.assertEqual( logarithm_exponent_histogram_mapping.map_to_index(1.2), 0 ) # This one is actually an exact test self.assertEqual( logarithm_exponent_histogram_mapping.map_to_index(1), -1 ) self.assertEqual( logarithm_exponent_histogram_mapping.map_to_index(0.75), -1 ) self.assertEqual( logarithm_exponent_histogram_mapping.map_to_index(0.55), -2 ) self.assertEqual( logarithm_exponent_histogram_mapping.map_to_index(0.45), -3 ) def test_logarithm_boundary(self): for scale in [1, 2, 3, 4, 10, 15]: logarithm_exponent_histogram_mapping = LogarithmMapping(scale) for index in [-100, -10, -1, 0, 1, 10, 100]: lower_boundary = ( logarithm_exponent_histogram_mapping.get_lower_boundary( index ) ) mapped_index = ( logarithm_exponent_histogram_mapping.map_to_index( lower_boundary ) ) self.assertLessEqual(index - 1, mapped_index) self.assertGreaterEqual(index, mapped_index) self.assertInEpsilon( lower_boundary, left_boundary(scale, index), 1e-9 ) def test_logarithm_index_max(self): for scale in range( LogarithmMapping._min_scale, LogarithmMapping._max_scale + 1 ): logarithm_mapping = LogarithmMapping(scale) index = logarithm_mapping.map_to_index(MAX_NORMAL_VALUE) max_index = ((MAX_NORMAL_EXPONENT + 1) << scale) - 1 # We do not check for max_index to be lesser than the # greatest integer because the greatest integer in Python is inf. self.assertEqual(index, max_index) boundary = logarithm_mapping.get_lower_boundary(index) base = logarithm_mapping.get_lower_boundary(1) self.assertLess(boundary, MAX_NORMAL_VALUE) self.assertInEpsilon( (MAX_NORMAL_VALUE - boundary) / boundary, base - 1, 1e-6 ) with self.assertRaises(MappingOverflowError): logarithm_mapping.get_lower_boundary(index + 1) with self.assertRaises(MappingOverflowError): logarithm_mapping.get_lower_boundary(index + 2) def test_logarithm_index_min(self): for scale in range( LogarithmMapping._min_scale, LogarithmMapping._max_scale + 1 ): logarithm_mapping = LogarithmMapping(scale) min_index = logarithm_mapping.map_to_index(MIN_NORMAL_VALUE) correct_min_index = (MIN_NORMAL_EXPONENT << scale) - 1 self.assertEqual(min_index, correct_min_index) correct_mapped = left_boundary(scale, correct_min_index) self.assertLess(correct_mapped, MIN_NORMAL_VALUE) correct_mapped_upper = left_boundary(scale, correct_min_index + 1) self.assertEqual(correct_mapped_upper, MIN_NORMAL_VALUE) mapped = logarithm_mapping.get_lower_boundary(min_index + 1) self.assertInEpsilon(mapped, MIN_NORMAL_VALUE, 1e-6) self.assertEqual( logarithm_mapping.map_to_index(MIN_NORMAL_VALUE / 2), correct_min_index, ) self.assertEqual( logarithm_mapping.map_to_index(MIN_NORMAL_VALUE / 3), correct_min_index, ) self.assertEqual( logarithm_mapping.map_to_index(MIN_NORMAL_VALUE / 100), correct_min_index, ) self.assertEqual( logarithm_mapping.map_to_index(2**-1050), correct_min_index ) self.assertEqual( logarithm_mapping.map_to_index(2**-1073), correct_min_index ) self.assertEqual( logarithm_mapping.map_to_index(1.1 * 2**-1073), correct_min_index, ) self.assertEqual( logarithm_mapping.map_to_index(2**-1074), correct_min_index ) mapped_lower = logarithm_mapping.get_lower_boundary(min_index) self.assertInEpsilon(correct_mapped, mapped_lower, 1e-6) with self.assertRaises(MappingUnderflowError): logarithm_mapping.get_lower_boundary(min_index - 1) python-opentelemetry-1.39.1/opentelemetry-sdk/tests/metrics/integration_test/000077500000000000000000000000001511654350100276665ustar00rootroot00000000000000test_console_exporter.py000066400000000000000000000110621511654350100346120ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-sdk/tests/metrics/integration_test# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from io import StringIO from json import loads from os import linesep from unittest import TestCase from unittest.mock import Mock, patch from opentelemetry.context import Context from opentelemetry.metrics import get_meter, set_meter_provider from opentelemetry.sdk.metrics import AlwaysOnExemplarFilter, MeterProvider from opentelemetry.sdk.metrics.export import ( ConsoleMetricExporter, PeriodicExportingMetricReader, ) from opentelemetry.test.globals_test import reset_metrics_globals TEST_TIMESTAMP = 1_234_567_890 class TestConsoleExporter(TestCase): def setUp(self): reset_metrics_globals() def tearDown(self): reset_metrics_globals() def test_console_exporter(self): output = StringIO() exporter = ConsoleMetricExporter(out=output) reader = PeriodicExportingMetricReader( exporter, export_interval_millis=100 ) provider = MeterProvider(metric_readers=[reader]) set_meter_provider(provider) meter = get_meter(__name__) counter = meter.create_counter( "name", description="description", unit="unit" ) counter.add(1, attributes={"a": "b"}) provider.shutdown() output.seek(0) result_0 = loads("".join(output.readlines())) self.assertGreater(len(result_0), 0) metrics = result_0["resource_metrics"][0]["scope_metrics"][0] self.assertEqual(metrics["scope"]["name"], "test_console_exporter") metrics = metrics["metrics"][0] self.assertEqual(metrics["name"], "name") self.assertEqual(metrics["description"], "description") self.assertEqual(metrics["unit"], "unit") metrics = metrics["data"] self.assertEqual(metrics["aggregation_temporality"], 2) self.assertTrue(metrics["is_monotonic"]) metrics = metrics["data_points"][0] self.assertEqual(metrics["attributes"], {"a": "b"}) self.assertEqual(metrics["value"], 1) def test_console_exporter_no_export(self): output = StringIO() exporter = ConsoleMetricExporter(out=output) reader = PeriodicExportingMetricReader( exporter, export_interval_millis=100 ) provider = MeterProvider(metric_readers=[reader]) provider.shutdown() output.seek(0) actual = "".join(output.readlines()) expected = "" self.assertEqual(actual, expected) @patch( "opentelemetry.sdk.metrics._internal.instrument.time_ns", Mock(return_value=TEST_TIMESTAMP), ) def test_console_exporter_with_exemplars(self): ctx = Context() output = StringIO() exporter = ConsoleMetricExporter(out=output) reader = PeriodicExportingMetricReader( exporter, export_interval_millis=100 ) provider = MeterProvider( metric_readers=[reader], exemplar_filter=AlwaysOnExemplarFilter() ) set_meter_provider(provider) meter = get_meter(__name__) counter = meter.create_counter( "name", description="description", unit="unit" ) counter.add(1, attributes={"a": "b"}, context=ctx) provider.shutdown() output.seek(0) joined_output = "".join(output.readlines()) result_0 = loads(joined_output.strip(linesep)) self.assertGreater(len(result_0), 0) metrics = result_0["resource_metrics"][0]["scope_metrics"][0] self.assertEqual(metrics["scope"]["name"], "test_console_exporter") point = metrics["metrics"][0]["data"]["data_points"][0] self.assertEqual(point["attributes"], {"a": "b"}) self.assertEqual(point["value"], 1) self.assertEqual( point["exemplars"], [ { "filtered_attributes": {}, "value": 1, "time_unix_nano": TEST_TIMESTAMP, "span_id": None, "trace_id": None, } ], ) python-opentelemetry-1.39.1/opentelemetry-sdk/tests/metrics/integration_test/test_cpu_time.py000066400000000000000000000305251511654350100331110ustar00rootroot00000000000000# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # type: ignore import io from typing import Generator, Iterable, List from unittest import TestCase from unittest.mock import Mock, patch from opentelemetry.context import Context from opentelemetry.metrics import CallbackOptions, Instrument, Observation from opentelemetry.sdk.metrics import MeterProvider from opentelemetry.sdk.metrics._internal.measurement import Measurement # FIXME Test that the instrument methods can be called concurrently safely. TEST_TIMESTAMP = 1_234_567_890 TEST_CONTEXT = Context() @patch( "opentelemetry.sdk.metrics._internal.instrument.time_ns", Mock(return_value=TEST_TIMESTAMP), ) class TestCpuTimeIntegration(TestCase): """Integration test of scraping CPU time from proc stat with an observable counter""" procstat_str = """\ cpu 8549517 4919096 9165935 1430260740 1641349 0 1646147 623279 0 0 cpu0 615029 317746 594601 89126459 129629 0 834346 42137 0 0 cpu1 588232 349185 640492 89156411 124485 0 241004 41862 0 0 intr 4370168813 38 9 0 0 1639 0 0 0 0 0 2865202 0 152 0 0 0 0 0 0 0 0 0 0 0 0 7236812 5966240 4501046 6467792 7289114 6048205 5299600 5178254 4642580 6826812 6880917 6230308 6307699 4699637 6119330 4905094 5644039 4700633 10539029 5365438 6086908 2227906 5094323 9685701 10137610 7739951 7143508 8123281 4968458 5683103 9890878 4466603 0 0 0 8929628 0 5 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 ctxt 6877594077 btime 1631501040 processes 2557351 procs_running 2 procs_blocked 0 softirq 1644603067 0 166540056 208 309152755 8936439 0 1354908 935642970 13 222975718\n""" @staticmethod def create_measurements_expected( instrument: Instrument, ) -> List[Measurement]: return [ Measurement( 6150.29, TEST_TIMESTAMP, instrument=instrument, context=TEST_CONTEXT, attributes={"cpu": "cpu0", "state": "user"}, ), Measurement( 3177.46, TEST_TIMESTAMP, instrument=instrument, context=TEST_CONTEXT, attributes={"cpu": "cpu0", "state": "nice"}, ), Measurement( 5946.01, TEST_TIMESTAMP, instrument=instrument, context=TEST_CONTEXT, attributes={"cpu": "cpu0", "state": "system"}, ), Measurement( 891264.59, TEST_TIMESTAMP, instrument=instrument, context=TEST_CONTEXT, attributes={"cpu": "cpu0", "state": "idle"}, ), Measurement( 1296.29, TEST_TIMESTAMP, instrument=instrument, context=TEST_CONTEXT, attributes={"cpu": "cpu0", "state": "iowait"}, ), Measurement( 0.0, TEST_TIMESTAMP, instrument=instrument, context=TEST_CONTEXT, attributes={"cpu": "cpu0", "state": "irq"}, ), Measurement( 8343.46, TEST_TIMESTAMP, instrument=instrument, context=TEST_CONTEXT, attributes={"cpu": "cpu0", "state": "softirq"}, ), Measurement( 421.37, TEST_TIMESTAMP, instrument=instrument, context=TEST_CONTEXT, attributes={"cpu": "cpu0", "state": "guest"}, ), Measurement( 0, TEST_TIMESTAMP, instrument=instrument, context=TEST_CONTEXT, attributes={"cpu": "cpu0", "state": "guest_nice"}, ), Measurement( 5882.32, TEST_TIMESTAMP, instrument=instrument, context=TEST_CONTEXT, attributes={"cpu": "cpu1", "state": "user"}, ), Measurement( 3491.85, TEST_TIMESTAMP, instrument=instrument, context=TEST_CONTEXT, attributes={"cpu": "cpu1", "state": "nice"}, ), Measurement( 6404.92, TEST_TIMESTAMP, instrument=instrument, context=TEST_CONTEXT, attributes={"cpu": "cpu1", "state": "system"}, ), Measurement( 891564.11, TEST_TIMESTAMP, instrument=instrument, context=TEST_CONTEXT, attributes={"cpu": "cpu1", "state": "idle"}, ), Measurement( 1244.85, TEST_TIMESTAMP, instrument=instrument, context=TEST_CONTEXT, attributes={"cpu": "cpu1", "state": "iowait"}, ), Measurement( 0, TEST_TIMESTAMP, instrument=instrument, context=TEST_CONTEXT, attributes={"cpu": "cpu1", "state": "irq"}, ), Measurement( 2410.04, TEST_TIMESTAMP, instrument=instrument, context=TEST_CONTEXT, attributes={"cpu": "cpu1", "state": "softirq"}, ), Measurement( 418.62, TEST_TIMESTAMP, instrument=instrument, context=TEST_CONTEXT, attributes={"cpu": "cpu1", "state": "guest"}, ), Measurement( 0, TEST_TIMESTAMP, instrument=instrument, context=TEST_CONTEXT, attributes={"cpu": "cpu1", "state": "guest_nice"}, ), ] def test_cpu_time_callback(self): def cpu_time_callback( options: CallbackOptions, ) -> Iterable[Observation]: procstat = io.StringIO(self.procstat_str) procstat.readline() # skip the first line for line in procstat: if not line.startswith("cpu"): break cpu, *states = line.split() yield Observation( int(states[0]) / 100, {"cpu": cpu, "state": "user"} ) yield Observation( int(states[1]) / 100, {"cpu": cpu, "state": "nice"} ) yield Observation( int(states[2]) / 100, {"cpu": cpu, "state": "system"} ) yield Observation( int(states[3]) / 100, {"cpu": cpu, "state": "idle"} ) yield Observation( int(states[4]) / 100, {"cpu": cpu, "state": "iowait"} ) yield Observation( int(states[5]) / 100, {"cpu": cpu, "state": "irq"} ) yield Observation( int(states[6]) / 100, {"cpu": cpu, "state": "softirq"} ) yield Observation( int(states[7]) / 100, {"cpu": cpu, "state": "guest"} ) yield Observation( int(states[8]) / 100, {"cpu": cpu, "state": "guest_nice"} ) meter = MeterProvider().get_meter("name") observable_counter = meter.create_observable_counter( "system.cpu.time", callbacks=[cpu_time_callback], unit="s", description="CPU time", ) measurements = list(observable_counter.callback(CallbackOptions())) self.assertEqual( measurements, self.create_measurements_expected(observable_counter) ) def test_cpu_time_generator(self): def cpu_time_generator() -> Generator[ Iterable[Observation], None, None ]: options = yield while True: self.assertIsInstance(options, CallbackOptions) measurements = [] procstat = io.StringIO(self.procstat_str) procstat.readline() # skip the first line for line in procstat: if not line.startswith("cpu"): break cpu, *states = line.split() measurements.append( Observation( int(states[0]) / 100, {"cpu": cpu, "state": "user"}, ) ) measurements.append( Observation( int(states[1]) / 100, {"cpu": cpu, "state": "nice"}, ) ) measurements.append( Observation( int(states[2]) / 100, {"cpu": cpu, "state": "system"}, ) ) measurements.append( Observation( int(states[3]) / 100, {"cpu": cpu, "state": "idle"}, ) ) measurements.append( Observation( int(states[4]) / 100, {"cpu": cpu, "state": "iowait"}, ) ) measurements.append( Observation( int(states[5]) / 100, {"cpu": cpu, "state": "irq"} ) ) measurements.append( Observation( int(states[6]) / 100, {"cpu": cpu, "state": "softirq"}, ) ) measurements.append( Observation( int(states[7]) / 100, {"cpu": cpu, "state": "guest"}, ) ) measurements.append( Observation( int(states[8]) / 100, {"cpu": cpu, "state": "guest_nice"}, ) ) options = yield measurements meter = MeterProvider().get_meter("name") observable_counter = meter.create_observable_counter( "system.cpu.time", callbacks=[cpu_time_generator()], unit="s", description="CPU time", ) measurements = list(observable_counter.callback(CallbackOptions())) self.assertEqual( measurements, self.create_measurements_expected(observable_counter) ) maxDiff = None test_disable_default_views.py000066400000000000000000000046771511654350100355620ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-sdk/tests/metrics/integration_test# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from unittest import TestCase from opentelemetry.sdk.metrics import MeterProvider from opentelemetry.sdk.metrics.export import InMemoryMetricReader from opentelemetry.sdk.metrics.view import DropAggregation, View class TestDisableDefaultViews(TestCase): def test_disable_default_views(self): reader = InMemoryMetricReader() meter_provider = MeterProvider( metric_readers=[reader], views=[View(instrument_name="*", aggregation=DropAggregation())], ) meter = meter_provider.get_meter("testmeter") counter = meter.create_counter("testcounter") counter.add(10, {"label": "value1"}) counter.add(10, {"label": "value2"}) counter.add(10, {"label": "value3"}) self.assertIsNone(reader.get_metrics_data()) def test_disable_default_views_add_custom(self): reader = InMemoryMetricReader() meter_provider = MeterProvider( metric_readers=[reader], views=[ View(instrument_name="*", aggregation=DropAggregation()), View(instrument_name="testhist"), ], ) meter = meter_provider.get_meter("testmeter") counter = meter.create_counter("testcounter") histogram = meter.create_histogram("testhist") counter.add(10, {"label": "value1"}) counter.add(10, {"label": "value2"}) counter.add(10, {"label": "value3"}) histogram.record(12, {"label": "value"}) metrics = reader.get_metrics_data() self.assertEqual(len(metrics.resource_metrics), 1) self.assertEqual(len(metrics.resource_metrics[0].scope_metrics), 1) self.assertEqual( len(metrics.resource_metrics[0].scope_metrics[0].metrics), 1 ) self.assertEqual( metrics.resource_metrics[0].scope_metrics[0].metrics[0].name, "testhist", ) python-opentelemetry-1.39.1/opentelemetry-sdk/tests/metrics/integration_test/test_exemplars.py000066400000000000000000000274161511654350100333110ustar00rootroot00000000000000# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os from unittest import TestCase, mock from opentelemetry import trace as trace_api from opentelemetry.sdk.metrics import Exemplar, MeterProvider from opentelemetry.sdk.metrics.export import ( AggregationTemporality, InMemoryMetricReader, Metric, NumberDataPoint, Sum, ) from opentelemetry.trace import SpanContext, TraceFlags class TestExemplars(TestCase): TRACE_ID = int("d4cda95b652f4a1592b449d5929fda1b", 16) SPAN_ID = int("6e0c63257de34c92", 16) @mock.patch.dict(os.environ, {"OTEL_METRICS_EXEMPLAR_FILTER": "always_on"}) def test_always_on_exemplars(self): reader = InMemoryMetricReader() meter_provider = MeterProvider( metric_readers=[reader], ) meter = meter_provider.get_meter("testmeter") counter = meter.create_counter("testcounter") counter.add(10, {"label": "value1"}) data = reader.get_metrics_data() metrics = data.resource_metrics[0].scope_metrics[0].metrics self.assertEqual( metrics, [ Metric( name="testcounter", description="", unit="", data=Sum( data_points=[ NumberDataPoint( attributes={"label": "value1"}, start_time_unix_nano=mock.ANY, time_unix_nano=mock.ANY, value=10, exemplars=[ Exemplar( filtered_attributes={}, value=10, time_unix_nano=mock.ANY, span_id=None, trace_id=None, ), ], ) ], aggregation_temporality=AggregationTemporality.CUMULATIVE, is_monotonic=True, ), ) ], ) @mock.patch.dict( os.environ, {"OTEL_METRICS_EXEMPLAR_FILTER": "trace_based"} ) def test_trace_based_exemplars(self): span_context = SpanContext( trace_id=self.TRACE_ID, span_id=self.SPAN_ID, is_remote=False, trace_flags=TraceFlags(TraceFlags.SAMPLED), trace_state={}, ) span = trace_api.NonRecordingSpan(span_context) trace_api.set_span_in_context(span) reader = InMemoryMetricReader() meter_provider = MeterProvider( metric_readers=[reader], ) meter = meter_provider.get_meter("testmeter") counter = meter.create_counter("testcounter") with trace_api.use_span(span): counter.add(10, {"label": "value1"}) data = reader.get_metrics_data() metrics = data.resource_metrics[0].scope_metrics[0].metrics self.assertEqual( metrics, [ Metric( name="testcounter", description="", unit="", data=Sum( data_points=[ NumberDataPoint( attributes={"label": "value1"}, start_time_unix_nano=mock.ANY, time_unix_nano=mock.ANY, value=10, exemplars=[ Exemplar( filtered_attributes={}, value=10, time_unix_nano=mock.ANY, span_id=self.SPAN_ID, trace_id=self.TRACE_ID, ), ], ) ], aggregation_temporality=AggregationTemporality.CUMULATIVE, is_monotonic=True, ), ) ], ) def test_default_exemplar_filter_no_span(self): reader = InMemoryMetricReader() meter_provider = MeterProvider( metric_readers=[reader], ) meter = meter_provider.get_meter("testmeter") counter = meter.create_counter("testcounter") counter.add(10, {"label": "value1"}) data = reader.get_metrics_data() metrics = data.resource_metrics[0].scope_metrics[0].metrics self.assertEqual( metrics, [ Metric( name="testcounter", description="", unit="", data=Sum( data_points=[ NumberDataPoint( attributes={"label": "value1"}, start_time_unix_nano=mock.ANY, time_unix_nano=mock.ANY, value=10, exemplars=[], ) ], aggregation_temporality=AggregationTemporality.CUMULATIVE, is_monotonic=True, ), ) ], ) def test_default_exemplar_filter(self): span_context = SpanContext( trace_id=self.TRACE_ID, span_id=self.SPAN_ID, is_remote=False, trace_flags=TraceFlags(TraceFlags.SAMPLED), trace_state={}, ) span = trace_api.NonRecordingSpan(span_context) trace_api.set_span_in_context(span) reader = InMemoryMetricReader() meter_provider = MeterProvider( metric_readers=[reader], ) meter = meter_provider.get_meter("testmeter") counter = meter.create_counter("testcounter") with trace_api.use_span(span): counter.add(10, {"label": "value1"}) data = reader.get_metrics_data() metrics = data.resource_metrics[0].scope_metrics[0].metrics self.assertEqual( metrics, [ Metric( name="testcounter", description="", unit="", data=Sum( data_points=[ NumberDataPoint( attributes={"label": "value1"}, start_time_unix_nano=mock.ANY, time_unix_nano=mock.ANY, value=10, exemplars=[ Exemplar( filtered_attributes={}, value=10, time_unix_nano=mock.ANY, span_id=self.SPAN_ID, trace_id=self.TRACE_ID, ), ], ) ], aggregation_temporality=AggregationTemporality.CUMULATIVE, is_monotonic=True, ), ) ], ) def test_exemplar_trace_based_manual_context(self): span_context = SpanContext( trace_id=self.TRACE_ID, span_id=self.SPAN_ID, is_remote=False, trace_flags=TraceFlags(TraceFlags.SAMPLED), trace_state={}, ) span = trace_api.NonRecordingSpan(span_context) ctx = trace_api.set_span_in_context(span) reader = InMemoryMetricReader() meter_provider = MeterProvider( metric_readers=[reader], ) meter = meter_provider.get_meter("testmeter") counter = meter.create_counter("testcounter") counter.add(10, {"label": "value1"}, context=ctx) data = reader.get_metrics_data() metrics = data.resource_metrics[0].scope_metrics[0].metrics self.assertEqual( metrics, [ Metric( name="testcounter", description="", unit="", data=Sum( data_points=[ NumberDataPoint( attributes={"label": "value1"}, start_time_unix_nano=mock.ANY, time_unix_nano=mock.ANY, value=10, exemplars=[ Exemplar( filtered_attributes={}, value=10, time_unix_nano=mock.ANY, span_id=self.SPAN_ID, trace_id=self.TRACE_ID, ), ], ) ], aggregation_temporality=AggregationTemporality.CUMULATIVE, is_monotonic=True, ), ) ], ) @mock.patch.dict( os.environ, {"OTEL_METRICS_EXEMPLAR_FILTER": "always_off"} ) def test_always_off_exemplars(self): span_context = SpanContext( trace_id=self.TRACE_ID, span_id=self.SPAN_ID, is_remote=False, trace_flags=TraceFlags(TraceFlags.SAMPLED), trace_state={}, ) span = trace_api.NonRecordingSpan(span_context) trace_api.set_span_in_context(span) reader = InMemoryMetricReader() meter_provider = MeterProvider( metric_readers=[reader], ) meter = meter_provider.get_meter("testmeter") counter = meter.create_counter("testcounter") with trace_api.use_span(span): counter.add(10, {"label": "value1"}) data = reader.get_metrics_data() metrics = data.resource_metrics[0].scope_metrics[0].metrics self.assertEqual( metrics, [ Metric( name="testcounter", description="", unit="", data=Sum( data_points=[ NumberDataPoint( attributes={"label": "value1"}, start_time_unix_nano=mock.ANY, time_unix_nano=mock.ANY, value=10, exemplars=[], ) ], aggregation_temporality=AggregationTemporality.CUMULATIVE, is_monotonic=True, ), ) ], ) test_explicit_bucket_histogram_aggregation.py000066400000000000000000000203341511654350100410240ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-sdk/tests/metrics/integration_test# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from platform import system from time import sleep from unittest import TestCase from pytest import mark from opentelemetry.sdk.metrics import Histogram, MeterProvider from opentelemetry.sdk.metrics.export import ( AggregationTemporality, InMemoryMetricReader, ) from opentelemetry.sdk.metrics.view import ExplicitBucketHistogramAggregation class TestExplicitBucketHistogramAggregation(TestCase): test_values = [1, 6, 11, 26, 51, 76, 101, 251, 501, 751] @mark.skipif( system() == "Windows", reason=( "Tests fail because Windows time_ns resolution is too low so " "two different time measurements may end up having the exact same" "value." ), ) def test_synchronous_delta_temporality(self): aggregation = ExplicitBucketHistogramAggregation() reader = InMemoryMetricReader( preferred_aggregation={Histogram: aggregation}, preferred_temporality={Histogram: AggregationTemporality.DELTA}, ) provider = MeterProvider(metric_readers=[reader]) meter = provider.get_meter("name", "version") histogram = meter.create_histogram("histogram") results = [] for _ in range(10): results.append(reader.get_metrics_data()) for metrics_data in results: self.assertIsNone(metrics_data) results = [] for test_value in self.test_values: histogram.record(test_value) results.append(reader.get_metrics_data()) metric_data = ( results[0] .resource_metrics[0] .scope_metrics[0] .metrics[0] .data.data_points[0] ) previous_time_unix_nano = metric_data.time_unix_nano self.assertEqual( metric_data.bucket_counts, (0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), ) self.assertLess( metric_data.start_time_unix_nano, previous_time_unix_nano, ) self.assertEqual(metric_data.min, self.test_values[0]) self.assertEqual(metric_data.max, self.test_values[0]) self.assertEqual(metric_data.sum, self.test_values[0]) for index, metrics_data in enumerate(results[1:]): metric_data = ( metrics_data.resource_metrics[0] .scope_metrics[0] .metrics[0] .data.data_points[0] ) self.assertEqual( previous_time_unix_nano, metric_data.start_time_unix_nano ) previous_time_unix_nano = metric_data.time_unix_nano self.assertEqual( metric_data.bucket_counts, # pylint: disable=consider-using-generator tuple( [ 1 if internal_index == index + 2 else 0 for internal_index in range(16) ] ), ) self.assertLess( metric_data.start_time_unix_nano, metric_data.time_unix_nano ) self.assertEqual(metric_data.min, self.test_values[index + 1]) self.assertEqual(metric_data.max, self.test_values[index + 1]) self.assertEqual(metric_data.sum, self.test_values[index + 1]) results = [] for _ in range(10): results.append(reader.get_metrics_data()) for metrics_data in results: self.assertIsNone(metrics_data) results = [] histogram.record(1) results.append(reader.get_metrics_data()) sleep(0.1) results.append(reader.get_metrics_data()) histogram.record(2) results.append(reader.get_metrics_data()) metric_data_0 = ( results[0] .resource_metrics[0] .scope_metrics[0] .metrics[0] .data.data_points[0] ) metric_data_2 = ( results[2] .resource_metrics[0] .scope_metrics[0] .metrics[0] .data.data_points[0] ) self.assertIsNone(results[1]) self.assertGreater( metric_data_2.start_time_unix_nano, metric_data_0.time_unix_nano ) provider.shutdown() @mark.skipif( system() != "Linux", reason=( "Tests fail because Windows time_ns resolution is too low so " "two different time measurements may end up having the exact same" "value." ), ) def test_synchronous_cumulative_temporality(self): aggregation = ExplicitBucketHistogramAggregation() reader = InMemoryMetricReader( preferred_aggregation={Histogram: aggregation}, preferred_temporality={ Histogram: AggregationTemporality.CUMULATIVE }, ) provider = MeterProvider(metric_readers=[reader]) meter = provider.get_meter("name", "version") histogram = meter.create_histogram("histogram") results = [] for _ in range(10): results.append(reader.get_metrics_data()) for metrics_data in results: self.assertIsNone(metrics_data) results = [] for test_value in self.test_values: histogram.record(test_value) results.append(reader.get_metrics_data()) start_time_unix_nano = ( results[0] .resource_metrics[0] .scope_metrics[0] .metrics[0] .data.data_points[0] .start_time_unix_nano ) for index, metrics_data in enumerate(results): metric_data = ( metrics_data.resource_metrics[0] .scope_metrics[0] .metrics[0] .data.data_points[0] ) self.assertEqual( start_time_unix_nano, metric_data.start_time_unix_nano ) self.assertEqual( metric_data.bucket_counts, # pylint: disable=consider-using-generator tuple( [ ( 0 if internal_index < 1 or internal_index > index + 1 else 1 ) for internal_index in range(16) ] ), ) self.assertEqual(metric_data.min, self.test_values[0]) self.assertEqual(metric_data.max, self.test_values[index]) self.assertEqual( metric_data.sum, sum(self.test_values[: index + 1]) ) results = [] for _ in range(10): results.append(reader.get_metrics_data()) provider.shutdown() start_time_unix_nano = ( results[0] .resource_metrics[0] .scope_metrics[0] .metrics[0] .data.data_points[0] .start_time_unix_nano ) for metrics_data in results: metric_data = ( metrics_data.resource_metrics[0] .scope_metrics[0] .metrics[0] .data.data_points[0] ) self.assertEqual( start_time_unix_nano, metric_data.start_time_unix_nano ) self.assertEqual( metric_data.bucket_counts, (0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0), ) self.assertEqual(metric_data.min, self.test_values[0]) self.assertEqual(metric_data.max, self.test_values[-1]) self.assertEqual(metric_data.sum, sum(self.test_values)) test_exponential_bucket_histogram.py000066400000000000000000000256741511654350100371760ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-sdk/tests/metrics/integration_test# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from platform import system from time import sleep from unittest import TestCase from pytest import mark from opentelemetry.sdk.metrics import Histogram, MeterProvider from opentelemetry.sdk.metrics.export import ( AggregationTemporality, InMemoryMetricReader, ) from opentelemetry.sdk.metrics.view import ( ExponentialBucketHistogramAggregation, ) class TestExponentialBucketHistogramAggregation(TestCase): test_values = [2, 4, 1, 1, 8, 0.5, 0.1, 0.045] @mark.skipif( system() == "Windows", reason=( "Tests fail because Windows time_ns resolution is too low so " "two different time measurements may end up having the exact same" "value." ), ) def test_synchronous_delta_temporality(self): """ This test case instantiates an exponential histogram aggregation and then uses it to record measurements and get metrics. The order in which these actions are taken are relevant to the testing that happens here. For this reason, the aggregation is only instantiated once, since the reinstantiation of the aggregation would defeat the purpose of this test case. """ aggregation = ExponentialBucketHistogramAggregation() reader = InMemoryMetricReader( preferred_aggregation={Histogram: aggregation}, preferred_temporality={Histogram: AggregationTemporality.DELTA}, ) provider = MeterProvider(metric_readers=[reader]) meter = provider.get_meter("name", "version") histogram = meter.create_histogram("histogram") # The test scenario here is calling collect without calling aggregate # ever before. results = [] for _ in range(10): results.append(reader.get_metrics_data()) for metrics_data in results: self.assertIsNone(metrics_data) # The test scenario here is calling aggregate then collect repeatedly. results = [] for test_value in self.test_values: histogram.record(test_value) results.append(reader.get_metrics_data()) metric_data = ( results[0] .resource_metrics[0] .scope_metrics[0] .metrics[0] .data.data_points[0] ) previous_time_unix_nano = metric_data.time_unix_nano self.assertEqual(metric_data.positive.bucket_counts, [1]) self.assertEqual(metric_data.negative.bucket_counts, [0]) self.assertLess( metric_data.start_time_unix_nano, previous_time_unix_nano, ) self.assertEqual(metric_data.min, self.test_values[0]) self.assertEqual(metric_data.max, self.test_values[0]) self.assertEqual(metric_data.sum, self.test_values[0]) for index, metrics_data in enumerate(results[1:]): metric_data = ( metrics_data.resource_metrics[0] .scope_metrics[0] .metrics[0] .data.data_points[0] ) self.assertEqual( previous_time_unix_nano, metric_data.start_time_unix_nano ) previous_time_unix_nano = metric_data.time_unix_nano self.assertEqual(metric_data.positive.bucket_counts, [1]) self.assertEqual(metric_data.negative.bucket_counts, [0]) self.assertLess( metric_data.start_time_unix_nano, metric_data.time_unix_nano ) self.assertEqual(metric_data.min, self.test_values[index + 1]) self.assertEqual(metric_data.max, self.test_values[index + 1]) # Using assertAlmostEqual here because in 3.12 resolution can cause # these checks to fail. self.assertAlmostEqual( metric_data.sum, self.test_values[index + 1] ) # The test scenario here is calling collect without calling aggregate # immediately before, but having aggregate being called before at some # moment. results = [] for _ in range(10): results.append(reader.get_metrics_data()) for metrics_data in results: self.assertIsNone(metrics_data) # The test scenario here is calling aggregate and collect, waiting for # a certain amount of time, calling collect, then calling aggregate and # collect again. results = [] histogram.record(1) results.append(reader.get_metrics_data()) sleep(0.1) results.append(reader.get_metrics_data()) histogram.record(2) results.append(reader.get_metrics_data()) metric_data_0 = ( results[0] .resource_metrics[0] .scope_metrics[0] .metrics[0] .data.data_points[0] ) metric_data_2 = ( results[2] .resource_metrics[0] .scope_metrics[0] .metrics[0] .data.data_points[0] ) self.assertIsNone(results[1]) self.assertGreater( metric_data_2.start_time_unix_nano, metric_data_0.time_unix_nano ) provider.shutdown() @mark.skipif( system() == "Windows", reason=( "Tests fail because Windows time_ns resolution is too low so " "two different time measurements may end up having the exact same" "value." ), ) def test_synchronous_cumulative_temporality(self): aggregation = ExponentialBucketHistogramAggregation() reader = InMemoryMetricReader( preferred_aggregation={Histogram: aggregation}, preferred_temporality={ Histogram: AggregationTemporality.CUMULATIVE }, ) provider = MeterProvider(metric_readers=[reader]) meter = provider.get_meter("name", "version") histogram = meter.create_histogram("histogram") results = [] for _ in range(10): results.append(reader.get_metrics_data()) for metrics_data in results: self.assertIsNone(metrics_data) results = [] for test_value in self.test_values: histogram.record(test_value) results.append(reader.get_metrics_data()) metric_data = ( results[0] .resource_metrics[0] .scope_metrics[0] .metrics[0] .data.data_points[0] ) start_time_unix_nano = metric_data.start_time_unix_nano self.assertLess( metric_data.start_time_unix_nano, metric_data.time_unix_nano, ) self.assertEqual(metric_data.min, self.test_values[0]) self.assertEqual(metric_data.max, self.test_values[0]) self.assertEqual(metric_data.sum, self.test_values[0]) previous_time_unix_nano = metric_data.time_unix_nano for index, metrics_data in enumerate(results[1:]): metric_data = ( metrics_data.resource_metrics[0] .scope_metrics[0] .metrics[0] .data.data_points[0] ) self.assertEqual( start_time_unix_nano, metric_data.start_time_unix_nano ) self.assertLess( metric_data.start_time_unix_nano, metric_data.time_unix_nano, ) self.assertEqual( metric_data.min, min(self.test_values[: index + 2]) ) self.assertEqual( metric_data.max, max(self.test_values[: index + 2]) ) self.assertAlmostEqual( metric_data.sum, sum(self.test_values[: index + 2]) ) self.assertGreater( metric_data.time_unix_nano, previous_time_unix_nano ) previous_time_unix_nano = metric_data.time_unix_nano self.assertEqual( metric_data.positive.bucket_counts, [ 1, *[0] * 17, 1, *[0] * 36, 1, *[0] * 15, 2, *[0] * 15, 1, *[0] * 15, 1, *[0] * 15, 1, *[0] * 40, ], ) self.assertEqual(metric_data.negative.bucket_counts, [0]) results = [] for _ in range(10): results.append(reader.get_metrics_data()) provider.shutdown() metric_data = ( results[0] .resource_metrics[0] .scope_metrics[0] .metrics[0] .data.data_points[0] ) start_time_unix_nano = metric_data.start_time_unix_nano self.assertLess( metric_data.start_time_unix_nano, metric_data.time_unix_nano, ) self.assertEqual(metric_data.min, min(self.test_values)) self.assertEqual(metric_data.max, max(self.test_values)) self.assertAlmostEqual(metric_data.sum, sum(self.test_values)) previous_metric_data = metric_data for index, metrics_data in enumerate(results[1:]): metric_data = ( metrics_data.resource_metrics[0] .scope_metrics[0] .metrics[0] .data.data_points[0] ) self.assertEqual( previous_metric_data.start_time_unix_nano, metric_data.start_time_unix_nano, ) self.assertEqual(previous_metric_data.min, metric_data.min) self.assertEqual(previous_metric_data.max, metric_data.max) self.assertAlmostEqual(previous_metric_data.sum, metric_data.sum) self.assertEqual( metric_data.positive.bucket_counts, [ 1, *[0] * 17, 1, *[0] * 36, 1, *[0] * 15, 2, *[0] * 15, 1, *[0] * 15, 1, *[0] * 15, 1, *[0] * 40, ], ) self.assertEqual(metric_data.negative.bucket_counts, [0]) self.assertLess( previous_metric_data.time_unix_nano, metric_data.time_unix_nano, ) test_exporter_concurrency.py000066400000000000000000000076471511654350100355200ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-sdk/tests/metrics/integration_test# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import time from threading import Lock from opentelemetry.metrics import CallbackOptions, Observation from opentelemetry.sdk.metrics import MeterProvider from opentelemetry.sdk.metrics.export import ( MetricExporter, MetricExportResult, MetricsData, PeriodicExportingMetricReader, ) from opentelemetry.test.concurrency_test import ConcurrencyTestBase class MaxCountExporter(MetricExporter): def __init__(self) -> None: super().__init__(None, None) self._lock = Lock() # the number of threads inside of export() self.count_in_export = 0 # the total count of calls to export() self.export_count = 0 # the maximum number of threads in export() ever self.max_count_in_export = 0 def export( self, metrics_data: MetricsData, timeout_millis: float = 10_000, **kwargs, ) -> MetricExportResult: with self._lock: self.export_count += 1 self.count_in_export += 1 # yield to other threads time.sleep(0) with self._lock: self.max_count_in_export = max( self.max_count_in_export, self.count_in_export ) self.count_in_export -= 1 def force_flush(self, timeout_millis: float = 10_000) -> bool: return True def shutdown(self, timeout_millis: float = 30_000, **kwargs) -> None: pass class TestExporterConcurrency(ConcurrencyTestBase): """ Tests the requirement that: > `Export` will never be called concurrently for the same exporter instance. `Export` can > be called again only after the current call returns. https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/metrics/sdk.md#exportbatch This test also tests that a thread that calls the a ``MetricReader.collect`` method using an asynchronous instrument is able to perform two actions in the same thread lock space (without it being interrupted by another thread): 1. Consume the measurement produced by the callback associated to the asynchronous instrument. 2. Export the measurement mentioned in the step above. """ def test_exporter_not_called_concurrently(self): exporter = MaxCountExporter() reader = PeriodicExportingMetricReader( exporter=exporter, export_interval_millis=100_000, ) meter_provider = MeterProvider(metric_readers=[reader]) counter_cb_counter = 0 def counter_cb(options: CallbackOptions): nonlocal counter_cb_counter counter_cb_counter += 1 yield Observation(2) meter_provider.get_meter(__name__).create_observable_counter( "testcounter", callbacks=[counter_cb] ) # call collect from a bunch of threads to try and enter export() concurrently def test_many_threads(): reader.collect() self.run_with_many_threads(test_many_threads, num_threads=100) self.assertEqual(counter_cb_counter, 100) # no thread should be in export() now self.assertEqual(exporter.count_in_export, 0) # should be one call for each thread self.assertEqual(exporter.export_count, 100) # should never have been more than one concurrent call self.assertEqual(exporter.max_count_in_export, 1) test_histogram_advisory_explicit_buckets.py000066400000000000000000000225071511654350100405640ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-sdk/tests/metrics/integration_test# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from unittest import TestCase from opentelemetry.sdk.metrics import MeterProvider from opentelemetry.sdk.metrics._internal.aggregation import ( _DEFAULT_EXPLICIT_BUCKET_HISTOGRAM_AGGREGATION_BOUNDARIES, ) from opentelemetry.sdk.metrics._internal.instrument import Histogram from opentelemetry.sdk.metrics.export import InMemoryMetricReader from opentelemetry.sdk.metrics.view import ( ExplicitBucketHistogramAggregation, View, ) class TestHistogramAdvisory(TestCase): def test_default(self): reader = InMemoryMetricReader() meter_provider = MeterProvider( metric_readers=[reader], ) meter = meter_provider.get_meter("testmeter") histogram = meter.create_histogram( "testhistogram", explicit_bucket_boundaries_advisory=[1.0, 2.0, 3.0], ) histogram.record(1, {"label": "value"}) histogram.record(2, {"label": "value"}) histogram.record(3, {"label": "value"}) metrics = reader.get_metrics_data() self.assertEqual(len(metrics.resource_metrics), 1) self.assertEqual(len(metrics.resource_metrics[0].scope_metrics), 1) self.assertEqual( len(metrics.resource_metrics[0].scope_metrics[0].metrics), 1 ) metric = metrics.resource_metrics[0].scope_metrics[0].metrics[0] self.assertEqual(metric.name, "testhistogram") self.assertEqual( metric.data.data_points[0].explicit_bounds, (1.0, 2.0, 3.0) ) def test_empty_buckets(self): reader = InMemoryMetricReader() meter_provider = MeterProvider( metric_readers=[reader], ) meter = meter_provider.get_meter("testmeter") histogram = meter.create_histogram( "testhistogram", explicit_bucket_boundaries_advisory=[], ) histogram.record(1, {"label": "value"}) histogram.record(2, {"label": "value"}) histogram.record(3, {"label": "value"}) metrics = reader.get_metrics_data() self.assertEqual(len(metrics.resource_metrics), 1) self.assertEqual(len(metrics.resource_metrics[0].scope_metrics), 1) self.assertEqual( len(metrics.resource_metrics[0].scope_metrics[0].metrics), 1 ) metric = metrics.resource_metrics[0].scope_metrics[0].metrics[0] self.assertEqual(metric.name, "testhistogram") self.assertEqual(metric.data.data_points[0].explicit_bounds, ()) def test_view_default_aggregation(self): reader = InMemoryMetricReader() view = View(instrument_name="testhistogram") meter_provider = MeterProvider( metric_readers=[reader], views=[view], ) meter = meter_provider.get_meter("testmeter") histogram = meter.create_histogram( "testhistogram", explicit_bucket_boundaries_advisory=[1.0, 2.0, 3.0], ) histogram.record(1, {"label": "value"}) histogram.record(2, {"label": "value"}) histogram.record(3, {"label": "value"}) metrics = reader.get_metrics_data() self.assertEqual(len(metrics.resource_metrics), 1) self.assertEqual(len(metrics.resource_metrics[0].scope_metrics), 1) self.assertEqual( len(metrics.resource_metrics[0].scope_metrics[0].metrics), 1 ) metric = metrics.resource_metrics[0].scope_metrics[0].metrics[0] self.assertEqual(metric.name, "testhistogram") self.assertEqual( metric.data.data_points[0].explicit_bounds, (1.0, 2.0, 3.0) ) def test_view_overrides_buckets(self): reader = InMemoryMetricReader() view = View( instrument_name="testhistogram", aggregation=ExplicitBucketHistogramAggregation( boundaries=[10.0, 100.0, 1000.0] ), ) meter_provider = MeterProvider( metric_readers=[reader], views=[view], ) meter = meter_provider.get_meter("testmeter") histogram = meter.create_histogram( "testhistogram", explicit_bucket_boundaries_advisory=[1.0, 2.0, 3.0], ) histogram.record(1, {"label": "value"}) histogram.record(2, {"label": "value"}) histogram.record(3, {"label": "value"}) metrics = reader.get_metrics_data() self.assertEqual(len(metrics.resource_metrics), 1) self.assertEqual(len(metrics.resource_metrics[0].scope_metrics), 1) self.assertEqual( len(metrics.resource_metrics[0].scope_metrics[0].metrics), 1 ) metric = metrics.resource_metrics[0].scope_metrics[0].metrics[0] self.assertEqual(metric.name, "testhistogram") self.assertEqual( metric.data.data_points[0].explicit_bounds, (10.0, 100.0, 1000.0) ) def test_explicit_aggregation(self): reader = InMemoryMetricReader( preferred_aggregation={ Histogram: ExplicitBucketHistogramAggregation() } ) meter_provider = MeterProvider( metric_readers=[reader], ) meter = meter_provider.get_meter("testmeter") histogram = meter.create_histogram( "testhistogram", explicit_bucket_boundaries_advisory=[1.0, 2.0, 3.0], ) histogram.record(1, {"label": "value"}) histogram.record(2, {"label": "value"}) histogram.record(3, {"label": "value"}) metrics = reader.get_metrics_data() self.assertEqual(len(metrics.resource_metrics), 1) self.assertEqual(len(metrics.resource_metrics[0].scope_metrics), 1) self.assertEqual( len(metrics.resource_metrics[0].scope_metrics[0].metrics), 1 ) metric = metrics.resource_metrics[0].scope_metrics[0].metrics[0] self.assertEqual(metric.name, "testhistogram") self.assertEqual( metric.data.data_points[0].explicit_bounds, (1.0, 2.0, 3.0) ) def test_explicit_aggregation_multiple_histograms(self): reader = InMemoryMetricReader( preferred_aggregation={ Histogram: ExplicitBucketHistogramAggregation() } ) meter_provider = MeterProvider( metric_readers=[reader], ) meter = meter_provider.get_meter("testmeter") histogram1 = meter.create_histogram( "testhistogram1", explicit_bucket_boundaries_advisory=[1.0, 2.0, 3.0], ) histogram1.record(1, {"label": "value"}) histogram1.record(2, {"label": "value"}) histogram1.record(3, {"label": "value"}) histogram2 = meter.create_histogram( "testhistogram2", explicit_bucket_boundaries_advisory=[4.0, 5.0, 6.0], ) histogram2.record(4, {"label": "value"}) histogram2.record(5, {"label": "value"}) histogram2.record(6, {"label": "value"}) metrics = reader.get_metrics_data() self.assertEqual(len(metrics.resource_metrics), 1) self.assertEqual(len(metrics.resource_metrics[0].scope_metrics), 1) self.assertEqual( len(metrics.resource_metrics[0].scope_metrics[0].metrics), 2 ) metric1 = metrics.resource_metrics[0].scope_metrics[0].metrics[0] self.assertEqual(metric1.name, "testhistogram1") self.assertEqual( metric1.data.data_points[0].explicit_bounds, (1.0, 2.0, 3.0) ) metric2 = metrics.resource_metrics[0].scope_metrics[0].metrics[1] self.assertEqual(metric2.name, "testhistogram2") self.assertEqual( metric2.data.data_points[0].explicit_bounds, (4.0, 5.0, 6.0) ) def test_explicit_aggregation_default_boundaries(self): reader = InMemoryMetricReader( preferred_aggregation={ Histogram: ExplicitBucketHistogramAggregation() } ) meter_provider = MeterProvider( metric_readers=[reader], ) meter = meter_provider.get_meter("testmeter") histogram = meter.create_histogram( "testhistogram", ) histogram.record(1, {"label": "value"}) histogram.record(2, {"label": "value"}) histogram.record(3, {"label": "value"}) metrics = reader.get_metrics_data() self.assertEqual(len(metrics.resource_metrics), 1) self.assertEqual(len(metrics.resource_metrics[0].scope_metrics), 1) self.assertEqual( len(metrics.resource_metrics[0].scope_metrics[0].metrics), 1 ) metric = metrics.resource_metrics[0].scope_metrics[0].metrics[0] self.assertEqual(metric.name, "testhistogram") self.assertEqual( metric.data.data_points[0].explicit_bounds, _DEFAULT_EXPLICIT_BUCKET_HISTOGRAM_AGGREGATION_BOUNDARIES, ) test_histogram_export.py000066400000000000000000000141631511654350100346230ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-sdk/tests/metrics/integration_test# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from unittest import TestCase from opentelemetry.sdk.metrics import MeterProvider from opentelemetry.sdk.metrics._internal.exemplar import ( AlwaysOffExemplarFilter, AlwaysOnExemplarFilter, ) from opentelemetry.sdk.metrics.export import InMemoryMetricReader from opentelemetry.sdk.resources import SERVICE_NAME, Resource class TestHistogramExport(TestCase): def test_histogram_counter_collection(self): in_memory_metric_reader = InMemoryMetricReader() provider = MeterProvider( resource=Resource.create({SERVICE_NAME: "otel-test"}), metric_readers=[in_memory_metric_reader], ) meter = provider.get_meter("my-meter") histogram = meter.create_histogram("my_histogram") counter = meter.create_counter("my_counter") histogram.record(5, {"attribute": "value"}) counter.add(1, {"attribute": "value_counter"}) metric_data = in_memory_metric_reader.get_metrics_data() self.assertEqual( len(metric_data.resource_metrics[0].scope_metrics[0].metrics), 2 ) self.assertEqual( ( metric_data.resource_metrics[0] .scope_metrics[0] .metrics[0] .data.data_points[0] .bucket_counts ), (0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), ) self.assertEqual( ( metric_data.resource_metrics[0] .scope_metrics[0] .metrics[1] .data.data_points[0] .value ), 1, ) metric_data = in_memory_metric_reader.get_metrics_data() self.assertEqual( len(metric_data.resource_metrics[0].scope_metrics[0].metrics), 2 ) self.assertEqual( ( metric_data.resource_metrics[0] .scope_metrics[0] .metrics[0] .data.data_points[0] .bucket_counts ), (0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), ) self.assertEqual( ( metric_data.resource_metrics[0] .scope_metrics[0] .metrics[1] .data.data_points[0] .value ), 1, ) def test_histogram_with_exemplars(self): in_memory_metric_reader = InMemoryMetricReader() provider = MeterProvider( resource=Resource.create({SERVICE_NAME: "otel-test"}), metric_readers=[in_memory_metric_reader], exemplar_filter=AlwaysOnExemplarFilter(), ) meter = provider.get_meter("my-meter") histogram = meter.create_histogram("my_histogram") histogram.record( 2, {"attribute": "value1"} ) # Should go in the first bucket histogram.record( 7, {"attribute": "value2"} ) # Should go in the second bucket histogram.record( 9, {"attribute": "value2"} ) # Should also go in the second bucket histogram.record( 15, {"attribute": "value3"} ) # Should go in the third bucket metric_data = in_memory_metric_reader.get_metrics_data() self.assertEqual( len(metric_data.resource_metrics[0].scope_metrics[0].metrics), 1 ) histogram_metric = ( metric_data.resource_metrics[0].scope_metrics[0].metrics[0] ) self.assertEqual(len(histogram_metric.data.data_points), 3) self.assertEqual( len(histogram_metric.data.data_points[0].exemplars), 1 ) self.assertEqual( len(histogram_metric.data.data_points[1].exemplars), 1 ) self.assertEqual( len(histogram_metric.data.data_points[2].exemplars), 1 ) self.assertEqual(histogram_metric.data.data_points[0].sum, 2) self.assertEqual(histogram_metric.data.data_points[1].sum, 16) self.assertEqual(histogram_metric.data.data_points[2].sum, 15) self.assertEqual( histogram_metric.data.data_points[0].exemplars[0].value, 2.0 ) self.assertEqual( histogram_metric.data.data_points[1].exemplars[0].value, 9.0 ) self.assertEqual( histogram_metric.data.data_points[2].exemplars[0].value, 15.0 ) def test_filter_with_exemplars(self): in_memory_metric_reader = InMemoryMetricReader() provider = MeterProvider( resource=Resource.create({SERVICE_NAME: "otel-test"}), metric_readers=[in_memory_metric_reader], exemplar_filter=AlwaysOffExemplarFilter(), ) meter = provider.get_meter("my-meter") histogram = meter.create_histogram("my_histogram") histogram.record( 2, {"attribute": "value1"} ) # Should go in the first bucket histogram.record( 7, {"attribute": "value2"} ) # Should go in the second bucket metric_data = in_memory_metric_reader.get_metrics_data() self.assertEqual( len(metric_data.resource_metrics[0].scope_metrics[0].metrics), 1 ) histogram_metric = ( metric_data.resource_metrics[0].scope_metrics[0].metrics[0] ) self.assertEqual(len(histogram_metric.data.data_points), 2) self.assertEqual( len(histogram_metric.data.data_points[0].exemplars), 0 ) self.assertEqual( len(histogram_metric.data.data_points[1].exemplars), 0 ) test_provider_shutdown.py000066400000000000000000000051331511654350100350070ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-sdk/tests/metrics/integration_test# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import time import weakref from typing import Sequence from unittest import TestCase from opentelemetry.sdk.metrics import MeterProvider from opentelemetry.sdk.metrics.export import ( Metric, MetricExporter, MetricExportResult, PeriodicExportingMetricReader, ) class FakeMetricsExporter(MetricExporter): def __init__( self, wait=0, preferred_temporality=None, preferred_aggregation=None ): self.wait = wait self.metrics = [] self._shutdown = False super().__init__( preferred_temporality=preferred_temporality, preferred_aggregation=preferred_aggregation, ) def export( self, metrics_data: Sequence[Metric], timeout_millis: float = 10_000, **kwargs, ) -> MetricExportResult: time.sleep(self.wait) self.metrics.extend(metrics_data) return True def shutdown(self, timeout_millis: float = 30_000, **kwargs) -> None: self._shutdown = True def force_flush(self, timeout_millis: float = 10_000) -> bool: return True class TestMeterProviderShutdown(TestCase): def test_meter_provider_shutdown_cleans_up_successfully(self): def create_and_shutdown(): exporter = FakeMetricsExporter() exporter_wr = weakref.ref(exporter) reader = PeriodicExportingMetricReader(exporter) reader_wr = weakref.ref(reader) provider = MeterProvider(metric_readers=[reader]) provider_wr = weakref.ref(provider) provider.shutdown() return exporter_wr, reader_wr, provider_wr # When: the provider is shutdown ( exporter_weakref, reader_weakref, provider_weakref, ) = create_and_shutdown() gc.collect() # Then: the provider, exporter and reader should be garbage collected self.assertIsNone(exporter_weakref()) self.assertIsNone(reader_weakref()) self.assertIsNone(provider_weakref()) python-opentelemetry-1.39.1/opentelemetry-sdk/tests/metrics/integration_test/test_sum_aggregation.py000066400000000000000000000333021511654350100344530ustar00rootroot00000000000000# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from itertools import count from logging import ERROR from platform import system from time import sleep from unittest import TestCase from pytest import mark from opentelemetry.context import Context from opentelemetry.metrics import Observation from opentelemetry.sdk.metrics import Counter, MeterProvider, ObservableCounter from opentelemetry.sdk.metrics._internal.exemplar import AlwaysOnExemplarFilter from opentelemetry.sdk.metrics.export import ( AggregationTemporality, InMemoryMetricReader, ) from opentelemetry.sdk.metrics.view import SumAggregation class TestSumAggregation(TestCase): @mark.skipif( system() != "Linux", reason=( "Tests fail because Windows time_ns resolution is too low so " "two different time measurements may end up having the exact same" "value." ), ) def test_asynchronous_delta_temporality(self): eight_multiple_generator = count(start=8, step=8) counter = 0 def observable_counter_callback(callback_options): nonlocal counter counter += 1 if counter < 11: yield elif counter < 21: yield Observation(next(eight_multiple_generator)) else: yield aggregation = SumAggregation() reader = InMemoryMetricReader( preferred_aggregation={ObservableCounter: aggregation}, preferred_temporality={ ObservableCounter: AggregationTemporality.DELTA }, ) provider = MeterProvider(metric_readers=[reader]) meter = provider.get_meter("name", "version") meter.create_observable_counter( "observable_counter", [observable_counter_callback] ) results = [] for _ in range(10): with self.assertLogs(level=ERROR): results.append(reader.get_metrics_data()) self.assertEqual(counter, 10) for metrics_data in results: self.assertIsNone(metrics_data) results = [] for _ in range(10): results.append(reader.get_metrics_data()) self.assertEqual(counter, 20) previous_time_unix_nano = ( results[0] .resource_metrics[0] .scope_metrics[0] .metrics[0] .data.data_points[0] .time_unix_nano ) self.assertEqual( ( results[0] .resource_metrics[0] .scope_metrics[0] .metrics[0] .data.data_points[0] .value ), 8, ) self.assertLess( ( results[0] .resource_metrics[0] .scope_metrics[0] .metrics[0] .data.data_points[0] .start_time_unix_nano ), previous_time_unix_nano, ) for metrics_data in results[1:]: metric_data = ( metrics_data.resource_metrics[0] .scope_metrics[0] .metrics[0] .data.data_points[0] ) self.assertEqual( previous_time_unix_nano, metric_data.start_time_unix_nano ) previous_time_unix_nano = metric_data.time_unix_nano self.assertEqual(metric_data.value, 8) self.assertLess( metric_data.start_time_unix_nano, metric_data.time_unix_nano ) results = [] for _ in range(10): with self.assertLogs(level=ERROR): results.append(reader.get_metrics_data()) self.assertEqual(counter, 30) provider.shutdown() for metrics_data in results: self.assertIsNone(metrics_data) @mark.skipif( system() != "Linux", reason=( "Tests fail because Windows time_ns resolution is too low so " "two different time measurements may end up having the exact same" "value." ), ) def test_asynchronous_cumulative_temporality(self): eight_multiple_generator = count(start=8, step=8) counter = 0 def observable_counter_callback(callback_options): nonlocal counter counter += 1 if counter < 11: yield elif counter < 21: yield Observation(next(eight_multiple_generator)) else: yield aggregation = SumAggregation() reader = InMemoryMetricReader( preferred_aggregation={ObservableCounter: aggregation}, preferred_temporality={ ObservableCounter: AggregationTemporality.CUMULATIVE }, ) provider = MeterProvider(metric_readers=[reader]) meter = provider.get_meter("name", "version") meter.create_observable_counter( "observable_counter", [observable_counter_callback] ) results = [] for _ in range(10): with self.assertLogs(level=ERROR): results.append(reader.get_metrics_data()) self.assertEqual(counter, 10) for metrics_data in results: self.assertIsNone(metrics_data) results = [] for _ in range(10): results.append(reader.get_metrics_data()) self.assertEqual(counter, 20) start_time_unix_nano = ( results[0] .resource_metrics[0] .scope_metrics[0] .metrics[0] .data.data_points[0] .start_time_unix_nano ) for index, metrics_data in enumerate(results): metric_data = ( metrics_data.resource_metrics[0] .scope_metrics[0] .metrics[0] .data.data_points[0] ) self.assertEqual( start_time_unix_nano, metric_data.start_time_unix_nano ) self.assertEqual(metric_data.value, 8 * (index + 1)) results = [] for _ in range(10): with self.assertLogs(level=ERROR): results.append(reader.get_metrics_data()) self.assertEqual(counter, 30) provider.shutdown() for metrics_data in results: self.assertIsNone(metrics_data) @mark.skipif( system() != "Linux", reason=( "Tests fail because Windows time_ns resolution is too low so " "two different time measurements may end up having the exact same" "value." ), ) def test_synchronous_delta_temporality(self): aggregation = SumAggregation() reader = InMemoryMetricReader( preferred_aggregation={Counter: aggregation}, preferred_temporality={Counter: AggregationTemporality.DELTA}, ) provider = MeterProvider(metric_readers=[reader]) meter = provider.get_meter("name", "version") counter = meter.create_counter("counter") results = [] for _ in range(10): results.append(reader.get_metrics_data()) for metrics_data in results: self.assertIsNone(metrics_data) results = [] for _ in range(10): counter.add(8) results.append(reader.get_metrics_data()) previous_time_unix_nano = ( results[0] .resource_metrics[0] .scope_metrics[0] .metrics[0] .data.data_points[0] .time_unix_nano ) self.assertEqual( ( results[0] .resource_metrics[0] .scope_metrics[0] .metrics[0] .data.data_points[0] .value ), 8, ) self.assertLess( ( results[0] .resource_metrics[0] .scope_metrics[0] .metrics[0] .data.data_points[0] .start_time_unix_nano ), previous_time_unix_nano, ) for metrics_data in results[1:]: metric_data = ( metrics_data.resource_metrics[0] .scope_metrics[0] .metrics[0] .data.data_points[0] ) self.assertEqual( previous_time_unix_nano, metric_data.start_time_unix_nano ) previous_time_unix_nano = metric_data.time_unix_nano self.assertEqual(metric_data.value, 8) self.assertLess( metric_data.start_time_unix_nano, metric_data.time_unix_nano ) results = [] for _ in range(10): results.append(reader.get_metrics_data()) for metrics_data in results: self.assertIsNone(metrics_data) results = [] counter.add(1) results.append(reader.get_metrics_data()) sleep(0.1) results.append(reader.get_metrics_data()) counter.add(2) results.append(reader.get_metrics_data()) metric_data_0 = ( results[0] .resource_metrics[0] .scope_metrics[0] .metrics[0] .data.data_points[0] ) metric_data_2 = ( results[2] .resource_metrics[0] .scope_metrics[0] .metrics[0] .data.data_points[0] ) self.assertIsNone(results[1]) self.assertGreater( metric_data_2.start_time_unix_nano, metric_data_0.time_unix_nano ) provider.shutdown() @mark.skipif( system() != "Linux", reason=( "Tests fail because Windows time_ns resolution is too low so " "two different time measurements may end up having the exact same" "value." ), ) def test_synchronous_cumulative_temporality(self): aggregation = SumAggregation() reader = InMemoryMetricReader( preferred_aggregation={Counter: aggregation}, preferred_temporality={Counter: AggregationTemporality.CUMULATIVE}, ) provider = MeterProvider(metric_readers=[reader]) meter = provider.get_meter("name", "version") counter = meter.create_counter("counter") results = [] for _ in range(10): results.append(reader.get_metrics_data()) for metrics_data in results: self.assertIsNone(metrics_data) results = [] for _ in range(10): counter.add(8) results.append(reader.get_metrics_data()) start_time_unix_nano = ( results[0] .resource_metrics[0] .scope_metrics[0] .metrics[0] .data.data_points[0] .start_time_unix_nano ) for index, metrics_data in enumerate(results): metric_data = ( metrics_data.resource_metrics[0] .scope_metrics[0] .metrics[0] .data.data_points[0] ) self.assertEqual( start_time_unix_nano, metric_data.start_time_unix_nano ) self.assertEqual(metric_data.value, 8 * (index + 1)) results = [] for _ in range(10): results.append(reader.get_metrics_data()) provider.shutdown() start_time_unix_nano = ( results[0] .resource_metrics[0] .scope_metrics[0] .metrics[0] .data.data_points[0] .start_time_unix_nano ) for metrics_data in results: metric_data = ( metrics_data.resource_metrics[0] .scope_metrics[0] .metrics[0] .data.data_points[0] ) self.assertEqual( start_time_unix_nano, metric_data.start_time_unix_nano ) self.assertEqual(metric_data.value, 80) def test_sum_aggregation_with_exemplars(self): in_memory_metric_reader = InMemoryMetricReader() provider = MeterProvider( metric_readers=[in_memory_metric_reader], exemplar_filter=AlwaysOnExemplarFilter(), ) meter = provider.get_meter("my-meter") counter = meter.create_counter("my_counter") counter.add(2, {"attribute": "value1"}, context=Context()) counter.add(5, {"attribute": "value2"}, context=Context()) counter.add(3, {"attribute": "value3"}, context=Context()) metric_data = in_memory_metric_reader.get_metrics_data() self.assertEqual( len(metric_data.resource_metrics[0].scope_metrics[0].metrics), 1 ) sum_metric = ( metric_data.resource_metrics[0].scope_metrics[0].metrics[0] ) data_points = sum_metric.data.data_points self.assertEqual(len(data_points), 3) self.assertEqual(data_points[0].exemplars[0].value, 2.0) self.assertEqual(data_points[1].exemplars[0].value, 5.0) self.assertEqual(data_points[2].exemplars[0].value, 3.0) provider.shutdown() python-opentelemetry-1.39.1/opentelemetry-sdk/tests/metrics/integration_test/test_time_align.py000066400000000000000000000230221511654350100334060ustar00rootroot00000000000000# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from platform import system from time import sleep from unittest import TestCase from pytest import mark from opentelemetry.sdk.metrics import Counter, MeterProvider from opentelemetry.sdk.metrics.export import ( AggregationTemporality, InMemoryMetricReader, ) class TestTimeAlign(TestCase): # This delay is needed for these tests to pass when they are run in # Windows. delay = 0.001 def test_time_align_cumulative(self): reader = InMemoryMetricReader() meter_provider = MeterProvider(metric_readers=[reader]) meter = meter_provider.get_meter("testmeter") counter_0 = meter.create_counter("counter_0") counter_1 = meter.create_counter("counter_1") counter_0.add(10, {"label": "value1"}) sleep(self.delay) counter_0.add(10, {"label": "value2"}) sleep(self.delay) counter_1.add(10, {"label": "value1"}) sleep(self.delay) counter_1.add(10, {"label": "value2"}) metrics = reader.get_metrics_data() data_points_0_0 = list( metrics.resource_metrics[0] .scope_metrics[0] .metrics[0] .data.data_points ) data_points_0_1 = list( metrics.resource_metrics[0] .scope_metrics[0] .metrics[1] .data.data_points ) self.assertEqual(len(data_points_0_0), 2) self.assertEqual(len(data_points_0_1), 2) self.assertLess( data_points_0_0[0].start_time_unix_nano, data_points_0_0[1].start_time_unix_nano, ) self.assertLess( data_points_0_1[0].start_time_unix_nano, data_points_0_1[1].start_time_unix_nano, ) self.assertNotEqual( data_points_0_0[1].start_time_unix_nano, data_points_0_1[0].start_time_unix_nano, ) self.assertEqual( data_points_0_0[0].time_unix_nano, data_points_0_0[1].time_unix_nano, ) self.assertEqual( data_points_0_1[0].time_unix_nano, data_points_0_1[1].time_unix_nano, ) self.assertEqual( data_points_0_0[1].time_unix_nano, data_points_0_1[0].time_unix_nano, ) counter_0.add(10, {"label": "value1"}) sleep(self.delay) counter_0.add(10, {"label": "value2"}) sleep(self.delay) counter_1.add(10, {"label": "value1"}) sleep(self.delay) counter_1.add(10, {"label": "value2"}) metrics = reader.get_metrics_data() data_points_1_0 = list( metrics.resource_metrics[0] .scope_metrics[0] .metrics[0] .data.data_points ) data_points_1_1 = list( metrics.resource_metrics[0] .scope_metrics[0] .metrics[1] .data.data_points ) self.assertEqual(len(data_points_1_0), 2) self.assertEqual(len(data_points_1_1), 2) self.assertLess( data_points_1_0[0].start_time_unix_nano, data_points_1_0[1].start_time_unix_nano, ) self.assertLess( data_points_1_1[0].start_time_unix_nano, data_points_1_1[1].start_time_unix_nano, ) self.assertNotEqual( data_points_1_0[1].start_time_unix_nano, data_points_1_1[0].start_time_unix_nano, ) self.assertEqual( data_points_1_0[0].time_unix_nano, data_points_1_0[1].time_unix_nano, ) self.assertEqual( data_points_1_1[0].time_unix_nano, data_points_1_1[1].time_unix_nano, ) self.assertEqual( data_points_1_0[1].time_unix_nano, data_points_1_1[0].time_unix_nano, ) self.assertEqual( data_points_0_0[0].start_time_unix_nano, data_points_1_0[0].start_time_unix_nano, ) self.assertEqual( data_points_0_0[1].start_time_unix_nano, data_points_1_0[1].start_time_unix_nano, ) self.assertEqual( data_points_0_1[0].start_time_unix_nano, data_points_1_1[0].start_time_unix_nano, ) self.assertEqual( data_points_0_1[1].start_time_unix_nano, data_points_1_1[1].start_time_unix_nano, ) @mark.skipif( system() != "Linux", reason="test failing in CI when run in Windows" ) def test_time_align_delta(self): reader = InMemoryMetricReader( preferred_temporality={Counter: AggregationTemporality.DELTA} ) meter_provider = MeterProvider(metric_readers=[reader]) meter = meter_provider.get_meter("testmeter") counter_0 = meter.create_counter("counter_0") counter_1 = meter.create_counter("counter_1") counter_0.add(10, {"label": "value1"}) sleep(self.delay) counter_0.add(10, {"label": "value2"}) sleep(self.delay) counter_1.add(10, {"label": "value1"}) sleep(self.delay) counter_1.add(10, {"label": "value2"}) metrics = reader.get_metrics_data() data_points_0_0 = list( metrics.resource_metrics[0] .scope_metrics[0] .metrics[0] .data.data_points ) data_points_0_1 = list( metrics.resource_metrics[0] .scope_metrics[0] .metrics[1] .data.data_points ) self.assertEqual(len(data_points_0_0), 2) self.assertEqual(len(data_points_0_1), 2) self.assertLess( data_points_0_0[0].start_time_unix_nano, data_points_0_0[1].start_time_unix_nano, ) self.assertLess( data_points_0_1[0].start_time_unix_nano, data_points_0_1[1].start_time_unix_nano, ) self.assertNotEqual( data_points_0_0[1].start_time_unix_nano, data_points_0_1[0].start_time_unix_nano, ) self.assertEqual( data_points_0_0[0].time_unix_nano, data_points_0_0[1].time_unix_nano, ) self.assertEqual( data_points_0_1[0].time_unix_nano, data_points_0_1[1].time_unix_nano, ) self.assertEqual( data_points_0_0[1].time_unix_nano, data_points_0_1[0].time_unix_nano, ) counter_0.add(10, {"label": "value1"}) sleep(self.delay) counter_0.add(10, {"label": "value2"}) sleep(self.delay) counter_1.add(10, {"label": "value1"}) sleep(self.delay) counter_1.add(10, {"label": "value2"}) metrics = reader.get_metrics_data() data_points_1_0 = list( metrics.resource_metrics[0] .scope_metrics[0] .metrics[0] .data.data_points ) data_points_1_1 = list( metrics.resource_metrics[0] .scope_metrics[0] .metrics[1] .data.data_points ) self.assertEqual(len(data_points_1_0), 2) self.assertEqual(len(data_points_1_1), 2) self.assertEqual( data_points_1_0[0].start_time_unix_nano, data_points_1_0[1].start_time_unix_nano, ) self.assertEqual( data_points_1_1[0].start_time_unix_nano, data_points_1_1[1].start_time_unix_nano, ) self.assertEqual( data_points_1_0[1].start_time_unix_nano, data_points_1_1[0].start_time_unix_nano, ) self.assertEqual( data_points_1_0[0].time_unix_nano, data_points_1_0[1].time_unix_nano, ) self.assertEqual( data_points_1_1[0].time_unix_nano, data_points_1_1[1].time_unix_nano, ) self.assertEqual( data_points_1_0[1].time_unix_nano, data_points_1_1[0].time_unix_nano, ) self.assertNotEqual( data_points_0_0[0].start_time_unix_nano, data_points_1_0[0].start_time_unix_nano, ) self.assertNotEqual( data_points_0_0[1].start_time_unix_nano, data_points_1_0[1].start_time_unix_nano, ) self.assertNotEqual( data_points_0_1[0].start_time_unix_nano, data_points_1_1[0].start_time_unix_nano, ) self.assertNotEqual( data_points_0_1[1].start_time_unix_nano, data_points_1_1[1].start_time_unix_nano, ) self.assertEqual( data_points_0_0[0].time_unix_nano, data_points_1_0[0].start_time_unix_nano, ) self.assertEqual( data_points_0_0[1].time_unix_nano, data_points_1_0[1].start_time_unix_nano, ) self.assertEqual( data_points_0_1[0].time_unix_nano, data_points_1_1[0].start_time_unix_nano, ) self.assertEqual( data_points_0_1[1].time_unix_nano, data_points_1_1[1].start_time_unix_nano, ) python-opentelemetry-1.39.1/opentelemetry-sdk/tests/metrics/test_aggregation.py000066400000000000000000000632231511654350100302120ustar00rootroot00000000000000# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # pylint: disable=protected-access from math import inf from time import sleep, time_ns from typing import Union from unittest import TestCase from unittest.mock import Mock from opentelemetry.context import Context from opentelemetry.sdk.metrics._internal.aggregation import ( _ExplicitBucketHistogramAggregation, _LastValueAggregation, _SumAggregation, ) from opentelemetry.sdk.metrics._internal.exemplar import ( AlignedHistogramBucketExemplarReservoir, SimpleFixedSizeExemplarReservoir, ) from opentelemetry.sdk.metrics._internal.instrument import ( _Counter, _Gauge, _Histogram, _ObservableCounter, _ObservableGauge, _ObservableUpDownCounter, _UpDownCounter, ) from opentelemetry.sdk.metrics._internal.measurement import Measurement from opentelemetry.sdk.metrics._internal.view import _default_reservoir_factory from opentelemetry.sdk.metrics.export import ( AggregationTemporality, NumberDataPoint, ) from opentelemetry.sdk.metrics.view import ( DefaultAggregation, ExplicitBucketHistogramAggregation, LastValueAggregation, SumAggregation, ) from opentelemetry.util.types import Attributes def measurement( value: Union[int, float], attributes: Attributes = None ) -> Measurement: return Measurement( value, time_ns(), instrument=Mock(), context=Context(), attributes=attributes, ) class TestSynchronousSumAggregation(TestCase): def test_aggregate_delta(self): """ `SynchronousSumAggregation` aggregates data for sum metric points """ synchronous_sum_aggregation = _SumAggregation( Mock(), True, AggregationTemporality.DELTA, 0, _default_reservoir_factory(_SumAggregation), ) synchronous_sum_aggregation.aggregate(measurement(1)) synchronous_sum_aggregation.aggregate(measurement(2)) synchronous_sum_aggregation.aggregate(measurement(3)) self.assertEqual(synchronous_sum_aggregation._value, 6) synchronous_sum_aggregation = _SumAggregation( Mock(), True, AggregationTemporality.DELTA, 0, _default_reservoir_factory(_SumAggregation), ) synchronous_sum_aggregation.aggregate(measurement(1)) synchronous_sum_aggregation.aggregate(measurement(-2)) synchronous_sum_aggregation.aggregate(measurement(3)) self.assertEqual(synchronous_sum_aggregation._value, 2) def test_aggregate_cumulative(self): """ `SynchronousSumAggregation` aggregates data for sum metric points """ synchronous_sum_aggregation = _SumAggregation( Mock(), True, AggregationTemporality.CUMULATIVE, 0, _default_reservoir_factory(_SumAggregation), ) synchronous_sum_aggregation.aggregate(measurement(1)) synchronous_sum_aggregation.aggregate(measurement(2)) synchronous_sum_aggregation.aggregate(measurement(3)) self.assertEqual(synchronous_sum_aggregation._value, 6) synchronous_sum_aggregation = _SumAggregation( Mock(), True, AggregationTemporality.CUMULATIVE, 0, _default_reservoir_factory(_SumAggregation), ) synchronous_sum_aggregation.aggregate(measurement(1)) synchronous_sum_aggregation.aggregate(measurement(-2)) synchronous_sum_aggregation.aggregate(measurement(3)) self.assertEqual(synchronous_sum_aggregation._value, 2) def test_collect_delta(self): """ `SynchronousSumAggregation` collects sum metric points """ synchronous_sum_aggregation = _SumAggregation( Mock(), True, AggregationTemporality.DELTA, 0, _default_reservoir_factory(_SumAggregation), ) synchronous_sum_aggregation.aggregate(measurement(1)) # 1 is used here directly to simulate the instant the first # collection process starts. first_sum = synchronous_sum_aggregation.collect( AggregationTemporality.CUMULATIVE, 1 ) self.assertEqual(first_sum.value, 1) synchronous_sum_aggregation.aggregate(measurement(1)) # 2 is used here directly to simulate the instant the first # collection process starts. second_sum = synchronous_sum_aggregation.collect( AggregationTemporality.CUMULATIVE, 2 ) self.assertEqual(second_sum.value, 2) self.assertEqual( second_sum.start_time_unix_nano, first_sum.start_time_unix_nano ) synchronous_sum_aggregation = _SumAggregation( Mock(), True, AggregationTemporality.DELTA, 0, _default_reservoir_factory(_SumAggregation), ) synchronous_sum_aggregation.aggregate(measurement(1)) # 1 is used here directly to simulate the instant the first # collection process starts. first_sum = synchronous_sum_aggregation.collect( AggregationTemporality.DELTA, 1 ) self.assertEqual(first_sum.value, 1) synchronous_sum_aggregation.aggregate(measurement(1)) # 2 is used here directly to simulate the instant the first # collection process starts. second_sum = synchronous_sum_aggregation.collect( AggregationTemporality.DELTA, 2 ) self.assertEqual(second_sum.value, 1) self.assertGreater( second_sum.start_time_unix_nano, first_sum.start_time_unix_nano ) def test_collect_cumulative(self): """ `SynchronousSumAggregation` collects number data points """ sum_aggregation = _SumAggregation( Mock(), True, AggregationTemporality.CUMULATIVE, 0, _default_reservoir_factory(_SumAggregation), ) sum_aggregation.aggregate(measurement(1)) first_sum = sum_aggregation.collect( AggregationTemporality.CUMULATIVE, 1 ) self.assertEqual(first_sum.value, 1) # should have been reset after first collect sum_aggregation.aggregate(measurement(1)) second_sum = sum_aggregation.collect( AggregationTemporality.CUMULATIVE, 1 ) self.assertEqual(second_sum.value, 1) self.assertEqual( second_sum.start_time_unix_nano, first_sum.start_time_unix_nano ) # if no point seen for a whole interval, should return None third_sum = sum_aggregation.collect( AggregationTemporality.CUMULATIVE, 1 ) self.assertIsNone(third_sum) class TestLastValueAggregation(TestCase): def test_aggregate(self): """ `LastValueAggregation` collects data for gauge metric points with delta temporality """ last_value_aggregation = _LastValueAggregation( Mock(), _default_reservoir_factory(_LastValueAggregation) ) last_value_aggregation.aggregate(measurement(1)) self.assertEqual(last_value_aggregation._value, 1) last_value_aggregation.aggregate(measurement(2)) self.assertEqual(last_value_aggregation._value, 2) last_value_aggregation.aggregate(measurement(3)) self.assertEqual(last_value_aggregation._value, 3) def test_collect(self): """ `LastValueAggregation` collects number data points """ last_value_aggregation = _LastValueAggregation( Mock(), _default_reservoir_factory(_LastValueAggregation) ) self.assertIsNone( last_value_aggregation.collect( AggregationTemporality.CUMULATIVE, 1 ) ) last_value_aggregation.aggregate(measurement(1)) # 1 is used here directly to simulate the instant the first # collection process starts. first_number_data_point = last_value_aggregation.collect( AggregationTemporality.CUMULATIVE, 1 ) self.assertIsInstance(first_number_data_point, NumberDataPoint) self.assertEqual(first_number_data_point.value, 1) self.assertIsNone(first_number_data_point.start_time_unix_nano) last_value_aggregation.aggregate(measurement(1)) # CI fails the last assertion without this sleep(0.1) # 2 is used here directly to simulate the instant the second # collection process starts. second_number_data_point = last_value_aggregation.collect( AggregationTemporality.CUMULATIVE, 2 ) self.assertEqual(second_number_data_point.value, 1) self.assertIsNone(second_number_data_point.start_time_unix_nano) self.assertGreater( second_number_data_point.time_unix_nano, first_number_data_point.time_unix_nano, ) # 3 is used here directly to simulate the instant the second # collection process starts. third_number_data_point = last_value_aggregation.collect( AggregationTemporality.CUMULATIVE, 3 ) self.assertIsNone(third_number_data_point) class TestExplicitBucketHistogramAggregation(TestCase): def test_aggregate(self): """ Test `ExplicitBucketHistogramAggregation with custom boundaries """ explicit_bucket_histogram_aggregation = ( _ExplicitBucketHistogramAggregation( Mock(), AggregationTemporality.DELTA, 0, _default_reservoir_factory( _ExplicitBucketHistogramAggregation ), boundaries=[0, 2, 4], ) ) explicit_bucket_histogram_aggregation.aggregate(measurement(-1)) explicit_bucket_histogram_aggregation.aggregate(measurement(0)) explicit_bucket_histogram_aggregation.aggregate(measurement(1)) explicit_bucket_histogram_aggregation.aggregate(measurement(2)) explicit_bucket_histogram_aggregation.aggregate(measurement(3)) explicit_bucket_histogram_aggregation.aggregate(measurement(4)) explicit_bucket_histogram_aggregation.aggregate(measurement(5)) # The first bucket keeps count of values between (-inf, 0] (-1 and 0) self.assertEqual(explicit_bucket_histogram_aggregation._value[0], 2) # The second bucket keeps count of values between (0, 2] (1 and 2) self.assertEqual(explicit_bucket_histogram_aggregation._value[1], 2) # The third bucket keeps count of values between (2, 4] (3 and 4) self.assertEqual(explicit_bucket_histogram_aggregation._value[2], 2) # The fourth bucket keeps count of values between (4, inf) (3 and 4) self.assertEqual(explicit_bucket_histogram_aggregation._value[3], 1) histo = explicit_bucket_histogram_aggregation.collect( AggregationTemporality.CUMULATIVE, 1 ) self.assertEqual(histo.sum, 14) def test_min_max(self): """ `record_min_max` indicates the aggregator to record the minimum and maximum value in the population """ explicit_bucket_histogram_aggregation = ( _ExplicitBucketHistogramAggregation( Mock(), AggregationTemporality.CUMULATIVE, 0, _default_reservoir_factory( _ExplicitBucketHistogramAggregation ), ) ) explicit_bucket_histogram_aggregation.aggregate(measurement(-1)) explicit_bucket_histogram_aggregation.aggregate(measurement(2)) explicit_bucket_histogram_aggregation.aggregate(measurement(7)) explicit_bucket_histogram_aggregation.aggregate(measurement(8)) explicit_bucket_histogram_aggregation.aggregate(measurement(9999)) self.assertEqual(explicit_bucket_histogram_aggregation._min, -1) self.assertEqual(explicit_bucket_histogram_aggregation._max, 9999) explicit_bucket_histogram_aggregation = ( _ExplicitBucketHistogramAggregation( Mock(), AggregationTemporality.CUMULATIVE, 0, _default_reservoir_factory( _ExplicitBucketHistogramAggregation ), record_min_max=False, ) ) explicit_bucket_histogram_aggregation.aggregate(measurement(-1)) explicit_bucket_histogram_aggregation.aggregate(measurement(2)) explicit_bucket_histogram_aggregation.aggregate(measurement(7)) explicit_bucket_histogram_aggregation.aggregate(measurement(8)) explicit_bucket_histogram_aggregation.aggregate(measurement(9999)) self.assertEqual(explicit_bucket_histogram_aggregation._min, inf) self.assertEqual(explicit_bucket_histogram_aggregation._max, -inf) def test_collect(self): """ `_ExplicitBucketHistogramAggregation` collects sum metric points """ explicit_bucket_histogram_aggregation = ( _ExplicitBucketHistogramAggregation( Mock(), AggregationTemporality.DELTA, 0, _default_reservoir_factory( _ExplicitBucketHistogramAggregation ), boundaries=[0, 1, 2], ) ) explicit_bucket_histogram_aggregation.aggregate(measurement(1)) # 1 is used here directly to simulate the instant the first # collection process starts. first_histogram = explicit_bucket_histogram_aggregation.collect( AggregationTemporality.CUMULATIVE, 1 ) self.assertEqual(first_histogram.bucket_counts, (0, 1, 0, 0)) self.assertEqual(first_histogram.sum, 1) # CI fails the last assertion without this sleep(0.1) explicit_bucket_histogram_aggregation.aggregate(measurement(1)) # 2 is used here directly to simulate the instant the second # collection process starts. second_histogram = explicit_bucket_histogram_aggregation.collect( AggregationTemporality.CUMULATIVE, 2 ) self.assertEqual(second_histogram.bucket_counts, (0, 2, 0, 0)) self.assertEqual(second_histogram.sum, 2) self.assertGreater( second_histogram.time_unix_nano, first_histogram.time_unix_nano ) def test_boundaries(self): self.assertEqual( _ExplicitBucketHistogramAggregation( Mock(), AggregationTemporality.CUMULATIVE, 0, _default_reservoir_factory( _ExplicitBucketHistogramAggregation ), )._boundaries, ( 0.0, 5.0, 10.0, 25.0, 50.0, 75.0, 100.0, 250.0, 500.0, 750.0, 1000.0, 2500.0, 5000.0, 7500.0, 10000.0, ), ) class TestAggregationFactory(TestCase): def test_sum_factory(self): counter = _Counter("name", Mock(), Mock()) factory = SumAggregation() aggregation = factory._create_aggregation( counter, Mock(), _default_reservoir_factory, 0 ) self.assertIsInstance(aggregation, _SumAggregation) self.assertTrue(aggregation._instrument_is_monotonic) self.assertEqual( aggregation._instrument_aggregation_temporality, AggregationTemporality.DELTA, ) aggregation2 = factory._create_aggregation( counter, Mock(), _default_reservoir_factory, 0 ) self.assertNotEqual(aggregation, aggregation2) counter = _UpDownCounter("name", Mock(), Mock()) factory = SumAggregation() aggregation = factory._create_aggregation( counter, Mock(), _default_reservoir_factory, 0 ) self.assertIsInstance(aggregation, _SumAggregation) self.assertFalse(aggregation._instrument_is_monotonic) self.assertEqual( aggregation._instrument_aggregation_temporality, AggregationTemporality.DELTA, ) counter = _ObservableCounter("name", Mock(), Mock(), None) factory = SumAggregation() aggregation = factory._create_aggregation( counter, Mock(), _default_reservoir_factory, 0 ) self.assertIsInstance(aggregation, _SumAggregation) self.assertTrue(aggregation._instrument_is_monotonic) self.assertEqual( aggregation._instrument_aggregation_temporality, AggregationTemporality.CUMULATIVE, ) def test_explicit_bucket_histogram_factory(self): histo = _Histogram("name", Mock(), Mock()) factory = ExplicitBucketHistogramAggregation( boundaries=( 0.0, 5.0, ), record_min_max=False, ) aggregation = factory._create_aggregation( histo, Mock(), _default_reservoir_factory, 0 ) self.assertIsInstance(aggregation, _ExplicitBucketHistogramAggregation) self.assertFalse(aggregation._record_min_max) self.assertEqual(aggregation._boundaries, (0.0, 5.0)) aggregation2 = factory._create_aggregation( histo, Mock(), _default_reservoir_factory, 0 ) self.assertNotEqual(aggregation, aggregation2) def test_last_value_factory(self): counter = _Counter("name", Mock(), Mock()) factory = LastValueAggregation() aggregation = factory._create_aggregation( counter, Mock(), _default_reservoir_factory, 0 ) self.assertIsInstance(aggregation, _LastValueAggregation) aggregation2 = factory._create_aggregation( counter, Mock(), _default_reservoir_factory, 0 ) self.assertNotEqual(aggregation, aggregation2) class TestDefaultAggregation(TestCase): @classmethod def setUpClass(cls): cls.default_aggregation = DefaultAggregation() def test_counter(self): aggregation = self.default_aggregation._create_aggregation( _Counter("name", Mock(), Mock()), Mock(), _default_reservoir_factory, 0, ) self.assertIsInstance(aggregation, _SumAggregation) self.assertTrue(aggregation._instrument_is_monotonic) self.assertEqual( aggregation._instrument_aggregation_temporality, AggregationTemporality.DELTA, ) def test_up_down_counter(self): aggregation = self.default_aggregation._create_aggregation( _UpDownCounter("name", Mock(), Mock()), Mock(), _default_reservoir_factory, 0, ) self.assertIsInstance(aggregation, _SumAggregation) self.assertFalse(aggregation._instrument_is_monotonic) self.assertEqual( aggregation._instrument_aggregation_temporality, AggregationTemporality.DELTA, ) def test_observable_counter(self): aggregation = self.default_aggregation._create_aggregation( _ObservableCounter("name", Mock(), Mock(), callbacks=[Mock()]), Mock(), _default_reservoir_factory, 0, ) self.assertIsInstance(aggregation, _SumAggregation) self.assertTrue(aggregation._instrument_is_monotonic) self.assertEqual( aggregation._instrument_aggregation_temporality, AggregationTemporality.CUMULATIVE, ) def test_observable_up_down_counter(self): aggregation = self.default_aggregation._create_aggregation( _ObservableUpDownCounter( "name", Mock(), Mock(), callbacks=[Mock()] ), Mock(), _default_reservoir_factory, 0, ) self.assertIsInstance(aggregation, _SumAggregation) self.assertFalse(aggregation._instrument_is_monotonic) self.assertEqual( aggregation._instrument_aggregation_temporality, AggregationTemporality.CUMULATIVE, ) def test_histogram(self): aggregation = self.default_aggregation._create_aggregation( _Histogram( "name", Mock(), Mock(), ), Mock(), _default_reservoir_factory, 0, ) self.assertIsInstance(aggregation, _ExplicitBucketHistogramAggregation) def test_histogram_with_advisory(self): boundaries = [1.0, 2.0, 3.0] aggregation = self.default_aggregation._create_aggregation( _Histogram( "name", Mock(), Mock(), explicit_bucket_boundaries_advisory=boundaries, ), Mock(), _default_reservoir_factory, 0, ) self.assertIsInstance(aggregation, _ExplicitBucketHistogramAggregation) self.assertEqual(aggregation._boundaries, tuple(boundaries)) def test_gauge(self): aggregation = self.default_aggregation._create_aggregation( _Gauge( "name", Mock(), Mock(), ), Mock(), _default_reservoir_factory, 0, ) self.assertIsInstance(aggregation, _LastValueAggregation) def test_observable_gauge(self): aggregation = self.default_aggregation._create_aggregation( _ObservableGauge( "name", Mock(), Mock(), callbacks=[Mock()], ), Mock(), _default_reservoir_factory, 0, ) self.assertIsInstance(aggregation, _LastValueAggregation) class TestExemplarsFromAggregations(TestCase): def test_collection_simple_fixed_size_reservoir(self): synchronous_sum_aggregation = _SumAggregation( Mock(), True, AggregationTemporality.DELTA, 0, lambda: SimpleFixedSizeExemplarReservoir(size=3), ) synchronous_sum_aggregation.aggregate(measurement(1)) synchronous_sum_aggregation.aggregate(measurement(2)) synchronous_sum_aggregation.aggregate(measurement(3)) self.assertEqual(synchronous_sum_aggregation._value, 6) datapoint = synchronous_sum_aggregation.collect( AggregationTemporality.CUMULATIVE, 0 ) # As the reservoir as multiple buckets, it may store up to # 3 exemplars self.assertGreater(len(datapoint.exemplars), 0) self.assertLessEqual(len(datapoint.exemplars), 3) def test_collection_simple_fixed_size_reservoir_with_default_reservoir( self, ): synchronous_sum_aggregation = _SumAggregation( Mock(), True, AggregationTemporality.DELTA, 0, _default_reservoir_factory(_SumAggregation), ) synchronous_sum_aggregation.aggregate(measurement(1)) synchronous_sum_aggregation.aggregate(measurement(2)) synchronous_sum_aggregation.aggregate(measurement(3)) self.assertEqual(synchronous_sum_aggregation._value, 6) datapoint = synchronous_sum_aggregation.collect( AggregationTemporality.CUMULATIVE, 0 ) self.assertEqual(len(datapoint.exemplars), 1) def test_collection_aligned_histogram_bucket_reservoir(self): boundaries = [5.0, 10.0, 20.0] synchronous_sum_aggregation = _SumAggregation( Mock(), True, AggregationTemporality.DELTA, 0, lambda: AlignedHistogramBucketExemplarReservoir(boundaries), ) synchronous_sum_aggregation.aggregate(measurement(2.0)) synchronous_sum_aggregation.aggregate(measurement(4.0)) synchronous_sum_aggregation.aggregate(measurement(6.0)) synchronous_sum_aggregation.aggregate(measurement(15.0)) synchronous_sum_aggregation.aggregate(measurement(25.0)) datapoint = synchronous_sum_aggregation.collect( AggregationTemporality.CUMULATIVE, 0 ) self.assertEqual(len(datapoint.exemplars), 4) # Verify that exemplars are associated with the correct boundaries expected_buckets = [ ( 4.0, boundaries[0], ), # First bucket, should hold the last value <= 5.0 ( 6.0, boundaries[1], ), # Second bucket, should hold the last value <= 10.0 ( 15.0, boundaries[2], ), # Third bucket, should hold the last value <= 20.0 (25.0, None), # Last bucket, should hold the value > 20.0 ] for exemplar, (value, boundary) in zip( datapoint.exemplars, expected_buckets ): self.assertEqual(exemplar.value, value) if boundary is not None: self.assertLessEqual(exemplar.value, boundary) else: self.assertGreater(exemplar.value, boundaries[-1]) python-opentelemetry-1.39.1/opentelemetry-sdk/tests/metrics/test_backward_compat.py000066400000000000000000000074131511654350100310430ustar00rootroot00000000000000# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ The purpose of this test is to test for backward compatibility with any user-implementable interfaces as they were originally defined. For example, changes to the MetricExporter ABC must be made in such a way that existing implementations (outside of this repo) continue to work when *called* by the SDK. This does not apply to classes which are not intended to be overridden by the user e.g. Meter and PeriodicExportingMetricReader concrete class. Those may freely be modified in a backward-compatible way for *callers*. Ideally, we could use pyright for this as well, but SDK is not type checked atm. """ from typing import Iterable, Sequence from opentelemetry.metrics import CallbackOptions, Observation from opentelemetry.sdk.metrics import MeterProvider from opentelemetry.sdk.metrics._internal.export import InMemoryMetricReader from opentelemetry.sdk.metrics.export import ( Metric, MetricExporter, MetricExportResult, MetricReader, PeriodicExportingMetricReader, ) from opentelemetry.test import TestCase # Do not change these classes until after major version 1 class OrigMetricExporter(MetricExporter): def export( self, metrics_data: Sequence[Metric], timeout_millis: float = 10_000, **kwargs, ) -> MetricExportResult: pass def shutdown(self, timeout_millis: float = 30_000, **kwargs) -> None: pass def force_flush(self, timeout_millis: float = 10_000) -> bool: return True class OrigMetricReader(MetricReader): def _receive_metrics( self, metrics_data: Iterable[Metric], timeout_millis: float = 10_000, **kwargs, ) -> None: pass def shutdown(self, timeout_millis: float = 30_000, **kwargs) -> None: self.collect() def orig_callback(options: CallbackOptions) -> Iterable[Observation]: yield Observation(2) class TestBackwardCompat(TestCase): def test_metric_exporter(self): exporter = OrigMetricExporter() meter_provider = MeterProvider( metric_readers=[PeriodicExportingMetricReader(exporter)] ) # produce some data meter_provider.get_meter("foo").create_counter("mycounter").add(12) with self.assertNotRaises(Exception): meter_provider.shutdown() def test_metric_reader(self): reader = OrigMetricReader() meter_provider = MeterProvider(metric_readers=[reader]) # produce some data meter_provider.get_meter("foo").create_counter("mycounter").add(12) with self.assertNotRaises(Exception): meter_provider.shutdown() def test_observable_callback(self): reader = InMemoryMetricReader() meter_provider = MeterProvider(metric_readers=[reader]) # produce some data meter_provider.get_meter("foo").create_counter("mycounter").add(12) with self.assertNotRaises(Exception): metrics_data = reader.get_metrics_data() self.assertEqual(len(metrics_data.resource_metrics), 1) self.assertEqual( len(metrics_data.resource_metrics[0].scope_metrics), 1 ) self.assertEqual( len(metrics_data.resource_metrics[0].scope_metrics[0].metrics), 1 ) python-opentelemetry-1.39.1/opentelemetry-sdk/tests/metrics/test_exemplarfilter.py000066400000000000000000000037501511654350100307450ustar00rootroot00000000000000from unittest import TestCase from opentelemetry import trace from opentelemetry.context import Context from opentelemetry.sdk.metrics._internal.exemplar import ( AlwaysOffExemplarFilter, AlwaysOnExemplarFilter, TraceBasedExemplarFilter, ) from opentelemetry.trace import TraceFlags from opentelemetry.trace.span import SpanContext class TestAlwaysOnExemplarFilter(TestCase): def test_should_sample(self): filter = AlwaysOnExemplarFilter() self.assertTrue(filter.should_sample(10, 0, {}, Context())) class TestAlwaysOffExemplarFilter(TestCase): def test_should_sample(self): filter = AlwaysOffExemplarFilter() self.assertFalse(filter.should_sample(10, 0, {}, Context())) class TestTraceBasedExemplarFilter(TestCase): TRACE_ID = int("d4cda95b652f4a1592b449d5929fda1b", 16) SPAN_ID = int("6e0c63257de34c92", 16) def test_should_not_sample_without_trace(self): filter = TraceBasedExemplarFilter() span_context = SpanContext( trace_id=self.TRACE_ID, span_id=self.SPAN_ID, is_remote=False, trace_flags=TraceFlags(TraceFlags.DEFAULT), trace_state={}, ) span = trace.NonRecordingSpan(span_context) ctx = trace.set_span_in_context(span) self.assertFalse(filter.should_sample(10, 0, {}, ctx)) def test_should_not_sample_with_invalid_span(self): filter = TraceBasedExemplarFilter() self.assertFalse(filter.should_sample(10, 0, {}, Context())) def test_should_sample_when_trace_is_sampled(self): filter = TraceBasedExemplarFilter() span_context = SpanContext( trace_id=self.TRACE_ID, span_id=self.SPAN_ID, is_remote=False, trace_flags=TraceFlags(TraceFlags.SAMPLED), trace_state={}, ) span = trace.NonRecordingSpan(span_context) ctx = trace.set_span_in_context(span) self.assertTrue(filter.should_sample(10, 0, {}, ctx)) python-opentelemetry-1.39.1/opentelemetry-sdk/tests/metrics/test_exemplarreservoir.py000066400000000000000000000141201511654350100314710ustar00rootroot00000000000000from time import time_ns from unittest import TestCase from opentelemetry import trace from opentelemetry.context import Context from opentelemetry.sdk.metrics._internal.aggregation import ( _ExplicitBucketHistogramAggregation, _LastValueAggregation, _SumAggregation, ) from opentelemetry.sdk.metrics._internal.exemplar import ( AlignedHistogramBucketExemplarReservoir, SimpleFixedSizeExemplarReservoir, ) from opentelemetry.sdk.metrics._internal.view import _default_reservoir_factory from opentelemetry.trace import SpanContext, TraceFlags class TestSimpleFixedSizeExemplarReservoir(TestCase): TRACE_ID = int("d4cda95b652f4a1592b449d5929fda1b", 16) SPAN_ID = int("6e0c63257de34c92", 16) def test_no_measurements(self): reservoir = SimpleFixedSizeExemplarReservoir(10) self.assertEqual(len(reservoir.collect({})), 0) def test_has_context(self): reservoir = SimpleFixedSizeExemplarReservoir(1) span_context = SpanContext( trace_id=self.TRACE_ID, span_id=self.SPAN_ID, is_remote=False, trace_flags=TraceFlags(TraceFlags.SAMPLED), trace_state={}, ) span = trace.NonRecordingSpan(span_context) ctx = trace.set_span_in_context(span) reservoir.offer(1, time_ns(), {}, ctx) exemplars = reservoir.collect({}) self.assertEqual(len(exemplars), 1) self.assertEqual(exemplars[0].trace_id, self.TRACE_ID) self.assertEqual(exemplars[0].span_id, self.SPAN_ID) def test_filter_attributes(self): reservoir = SimpleFixedSizeExemplarReservoir(1) span_context = SpanContext( trace_id=self.TRACE_ID, span_id=self.SPAN_ID, is_remote=False, trace_flags=TraceFlags(TraceFlags.SAMPLED), trace_state={}, ) span = trace.NonRecordingSpan(span_context) ctx = trace.set_span_in_context(span) reservoir.offer( 1, time_ns(), {"key1": "value1", "key2": "value2"}, ctx ) exemplars = reservoir.collect({"key2": "value2"}) self.assertEqual(len(exemplars), 1) self.assertIn("key1", exemplars[0].filtered_attributes) self.assertNotIn("key2", exemplars[0].filtered_attributes) def test_reset_after_collection(self): reservoir = SimpleFixedSizeExemplarReservoir(4) reservoir.offer(1.0, time_ns(), {"attribute": "value1"}, Context()) reservoir.offer(2.0, time_ns(), {"attribute": "value2"}, Context()) reservoir.offer(3.0, time_ns(), {"attribute": "value3"}, Context()) exemplars = reservoir.collect({}) self.assertEqual(len(exemplars), 3) # Offer new measurements after reset reservoir.offer(4.0, time_ns(), {"attribute": "value4"}, Context()) reservoir.offer(5.0, time_ns(), {"attribute": "value5"}, Context()) # Collect again and check the number of exemplars new_exemplars = reservoir.collect({}) self.assertEqual(len(new_exemplars), 2) self.assertEqual(new_exemplars[0].value, 4.0) self.assertEqual(new_exemplars[1].value, 5.0) class TestAlignedHistogramBucketExemplarReservoir(TestCase): TRACE_ID = int("d4cda95b652f4a1592b449d5929fda1b", 16) SPAN_ID = int("6e0c63257de34c92", 16) def test_measurement_in_buckets(self): reservoir = AlignedHistogramBucketExemplarReservoir( [0, 5, 10, 25, 50, 75] ) span_context = SpanContext( trace_id=self.TRACE_ID, span_id=self.SPAN_ID, is_remote=False, trace_flags=TraceFlags(TraceFlags.SAMPLED), trace_state={}, ) span = trace.NonRecordingSpan(span_context) ctx = trace.set_span_in_context(span) reservoir.offer(80, time_ns(), {"bucket": "5"}, ctx) # outliner reservoir.offer(52, time_ns(), {"bucket": "4"}, ctx) reservoir.offer(7, time_ns(), {"bucket": "1"}, ctx) reservoir.offer(6, time_ns(), {"bucket": "1"}, ctx) exemplars = reservoir.collect({"bucket": "1"}) self.assertEqual(len(exemplars), 3) self.assertEqual(exemplars[0].value, 6) self.assertEqual(exemplars[1].value, 52) self.assertEqual(exemplars[2].value, 80) # outliner self.assertEqual(len(exemplars[0].filtered_attributes), 0) def test_last_measurement_in_bucket(self): reservoir = AlignedHistogramBucketExemplarReservoir([0, 5, 10, 25]) span_context = SpanContext( trace_id=self.TRACE_ID, span_id=self.SPAN_ID, is_remote=False, trace_flags=TraceFlags(TraceFlags.SAMPLED), trace_state={}, ) span = trace.NonRecordingSpan(span_context) ctx = trace.set_span_in_context(span) # Offer values to the reservoir reservoir.offer(2, time_ns(), {"bucket": "1"}, ctx) # Bucket 1 reservoir.offer(7, time_ns(), {"bucket": "2"}, ctx) # Bucket 2 reservoir.offer( 8, time_ns(), {"bucket": "2"}, ctx ) # Bucket 2 - should replace the 7 reservoir.offer(15, time_ns(), {"bucket": "3"}, ctx) # Bucket 3 exemplars = reservoir.collect({}) # Check that each bucket has the correct value self.assertEqual(len(exemplars), 3) self.assertEqual(exemplars[0].value, 2) self.assertEqual(exemplars[1].value, 8) self.assertEqual(exemplars[2].value, 15) class TestExemplarReservoirFactory(TestCase): def test_sum_aggregation(self): exemplar_reservoir = _default_reservoir_factory(_SumAggregation) self.assertEqual(exemplar_reservoir, SimpleFixedSizeExemplarReservoir) def test_last_value_aggregation(self): exemplar_reservoir = _default_reservoir_factory(_LastValueAggregation) self.assertEqual(exemplar_reservoir, SimpleFixedSizeExemplarReservoir) def test_explicit_histogram_aggregation(self): exemplar_reservoir = _default_reservoir_factory( _ExplicitBucketHistogramAggregation ) self.assertEqual( exemplar_reservoir, AlignedHistogramBucketExemplarReservoir ) python-opentelemetry-1.39.1/opentelemetry-sdk/tests/metrics/test_import.py000066400000000000000000000050431511654350100272310ustar00rootroot00000000000000# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # pylint: disable=unused-import,import-outside-toplevel,too-many-locals from opentelemetry.test import TestCase class TestImport(TestCase): def test_import_init(self): """ Test that the metrics root module has the right symbols """ with self.assertNotRaises(Exception): from opentelemetry.sdk.metrics import ( # noqa: F401, PLC0415 Counter, Histogram, Meter, MeterProvider, ObservableCounter, ObservableGauge, ObservableUpDownCounter, UpDownCounter, _Gauge, ) def test_import_export(self): """ Test that the metrics export module has the right symbols """ with self.assertNotRaises(Exception): from opentelemetry.sdk.metrics.export import ( # noqa: F401, PLC0415 AggregationTemporality, ConsoleMetricExporter, DataPointT, DataT, Gauge, Histogram, HistogramDataPoint, InMemoryMetricReader, Metric, MetricExporter, MetricExportResult, MetricReader, MetricsData, NumberDataPoint, PeriodicExportingMetricReader, ResourceMetrics, ScopeMetrics, Sum, ) def test_import_view(self): """ Test that the metrics view module has the right symbols """ with self.assertNotRaises(Exception): from opentelemetry.sdk.metrics.view import ( # noqa: F401, PLC0415 Aggregation, DefaultAggregation, DropAggregation, ExplicitBucketHistogramAggregation, LastValueAggregation, SumAggregation, View, ) python-opentelemetry-1.39.1/opentelemetry-sdk/tests/metrics/test_in_memory_metric_reader.py000066400000000000000000000123541511654350100326050ustar00rootroot00000000000000# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # pylint: disable=protected-access from time import sleep from unittest import TestCase from unittest.mock import Mock from opentelemetry.metrics import Observation from opentelemetry.sdk.metrics import Counter, MeterProvider from opentelemetry.sdk.metrics.export import ( AggregationTemporality, InMemoryMetricReader, Metric, NumberDataPoint, Sum, ) class TestInMemoryMetricReader(TestCase): def test_no_metrics(self): mock_collect_callback = Mock(return_value=[]) reader = InMemoryMetricReader() reader._set_collect_callback(mock_collect_callback) self.assertEqual(reader.get_metrics_data(), []) mock_collect_callback.assert_called_once() def test_converts_metrics_to_list(self): metric = Metric( name="foo", description="", unit="", data=Sum( data_points=[ NumberDataPoint( attributes={"myattr": "baz"}, start_time_unix_nano=1647626444152947792, time_unix_nano=1647626444153163239, value=72.3309814450449, ) ], aggregation_temporality=AggregationTemporality.CUMULATIVE, is_monotonic=True, ), ) mock_collect_callback = Mock(return_value=(metric,)) reader = InMemoryMetricReader() reader._set_collect_callback(mock_collect_callback) returned_metrics = reader.get_metrics_data() mock_collect_callback.assert_called_once() self.assertIsInstance(returned_metrics, tuple) self.assertEqual(len(returned_metrics), 1) self.assertIs(returned_metrics[0], metric) def test_shutdown(self): # shutdown should always be successful self.assertIsNone(InMemoryMetricReader().shutdown()) def test_integration(self): reader = InMemoryMetricReader() meter = MeterProvider(metric_readers=[reader]).get_meter("test_meter") counter1 = meter.create_counter("counter1") meter.create_observable_gauge( "observable_gauge1", callbacks=[lambda options: [Observation(value=12)]], ) counter1.add(1, {"foo": "1"}) counter1.add(1, {"foo": "2"}) metrics = reader.get_metrics_data() # should be 3 number data points, one from the observable gauge and one # for each labelset from the counter self.assertEqual(len(metrics.resource_metrics[0].scope_metrics), 1) self.assertEqual( len(metrics.resource_metrics[0].scope_metrics[0].metrics), 2 ) self.assertEqual( len( list( metrics.resource_metrics[0] .scope_metrics[0] .metrics[0] .data.data_points ) ), 2, ) self.assertEqual( len( list( metrics.resource_metrics[0] .scope_metrics[0] .metrics[1] .data.data_points ) ), 1, ) def test_cumulative_multiple_collect(self): reader = InMemoryMetricReader( preferred_temporality={Counter: AggregationTemporality.CUMULATIVE} ) meter = MeterProvider(metric_readers=[reader]).get_meter("test_meter") counter = meter.create_counter("counter1") counter.add(1, attributes={"key": "value"}) reader.collect() number_data_point_0 = list( reader._metrics_data.resource_metrics[0] .scope_metrics[0] .metrics[0] .data.data_points )[0] # Windows tests fail without this sleep because both time_unix_nano # values are the same. sleep(0.1) reader.collect() number_data_point_1 = list( reader._metrics_data.resource_metrics[0] .scope_metrics[0] .metrics[0] .data.data_points )[0] self.assertEqual( number_data_point_0.attributes, number_data_point_1.attributes ) self.assertEqual( number_data_point_0.start_time_unix_nano, number_data_point_1.start_time_unix_nano, ) self.assertEqual( number_data_point_0.start_time_unix_nano, number_data_point_1.start_time_unix_nano, ) self.assertEqual(number_data_point_0.value, number_data_point_1.value) self.assertGreater( number_data_point_1.time_unix_nano, number_data_point_0.time_unix_nano, ) python-opentelemetry-1.39.1/opentelemetry-sdk/tests/metrics/test_instrument.py000066400000000000000000000404731511654350100301350ustar00rootroot00000000000000# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # pylint: disable=no-self-use from logging import WARNING # from time import time_ns from unittest import TestCase from unittest.mock import Mock, patch from opentelemetry.context import Context from opentelemetry.metrics import Observation from opentelemetry.metrics._internal.instrument import CallbackOptions from opentelemetry.sdk.metrics import ( Counter, Histogram, ObservableCounter, ObservableGauge, ObservableUpDownCounter, UpDownCounter, ) from opentelemetry.sdk.metrics import _Gauge as _SDKGauge from opentelemetry.sdk.metrics._internal.instrument import ( _Counter, _Gauge, _Histogram, _ObservableCounter, _ObservableGauge, _ObservableUpDownCounter, _UpDownCounter, ) from opentelemetry.sdk.metrics._internal.measurement import Measurement class TestCounter(TestCase): def testname(self): self.assertEqual(_Counter("name", Mock(), Mock()).name, "name") self.assertEqual(_Counter("Name", Mock(), Mock()).name, "name") def test_add(self): mc = Mock() counter = _Counter("name", Mock(), mc) counter.add(1.0) mc.consume_measurement.assert_called_once() def test_add_non_monotonic(self): mc = Mock() counter = _Counter("name", Mock(), mc) with self.assertLogs(level=WARNING): counter.add(-1.0) mc.consume_measurement.assert_not_called() def test_disallow_direct_counter_creation(self): with self.assertRaises(TypeError): # pylint: disable=abstract-class-instantiated Counter("name", Mock(), Mock()) class TestUpDownCounter(TestCase): def test_add(self): mc = Mock() counter = _UpDownCounter("name", Mock(), mc) counter.add(1.0) mc.consume_measurement.assert_called_once() def test_add_non_monotonic(self): mc = Mock() counter = _UpDownCounter("name", Mock(), mc) counter.add(-1.0) mc.consume_measurement.assert_called_once() def test_disallow_direct_up_down_counter_creation(self): with self.assertRaises(TypeError): # pylint: disable=abstract-class-instantiated UpDownCounter("name", Mock(), Mock()) TEST_ATTRIBUTES = {"foo": "bar"} TEST_CONTEXT = Context() TEST_TIMESTAMP = 1_000_000_000 def callable_callback_0(options: CallbackOptions): return [ Observation(1, attributes=TEST_ATTRIBUTES, context=TEST_CONTEXT), Observation(2, attributes=TEST_ATTRIBUTES, context=TEST_CONTEXT), Observation(3, attributes=TEST_ATTRIBUTES, context=TEST_CONTEXT), ] def callable_callback_1(options: CallbackOptions): return [ Observation(4, attributes=TEST_ATTRIBUTES, context=TEST_CONTEXT), Observation(5, attributes=TEST_ATTRIBUTES, context=TEST_CONTEXT), Observation(6, attributes=TEST_ATTRIBUTES, context=TEST_CONTEXT), ] def generator_callback_0(): options = yield assert isinstance(options, CallbackOptions) options = yield [ Observation(1, attributes=TEST_ATTRIBUTES, context=TEST_CONTEXT), Observation(2, attributes=TEST_ATTRIBUTES, context=TEST_CONTEXT), Observation(3, attributes=TEST_ATTRIBUTES, context=TEST_CONTEXT), ] assert isinstance(options, CallbackOptions) def generator_callback_1(): options = yield assert isinstance(options, CallbackOptions) options = yield [ Observation(4, attributes=TEST_ATTRIBUTES, context=TEST_CONTEXT), Observation(5, attributes=TEST_ATTRIBUTES, context=TEST_CONTEXT), Observation(6, attributes=TEST_ATTRIBUTES, context=TEST_CONTEXT), ] assert isinstance(options, CallbackOptions) @patch( "opentelemetry.sdk.metrics._internal.instrument.time_ns", Mock(return_value=TEST_TIMESTAMP), ) class TestObservableGauge(TestCase): def testname(self): self.assertEqual(_ObservableGauge("name", Mock(), Mock()).name, "name") self.assertEqual(_ObservableGauge("Name", Mock(), Mock()).name, "name") def test_callable_callback_0(self): observable_gauge = _ObservableGauge( "name", Mock(), Mock(), [callable_callback_0] ) assert list(observable_gauge.callback(CallbackOptions())) == ( [ Measurement( 1, TEST_TIMESTAMP, instrument=observable_gauge, context=TEST_CONTEXT, attributes=TEST_ATTRIBUTES, ), Measurement( 2, TEST_TIMESTAMP, instrument=observable_gauge, context=TEST_CONTEXT, attributes=TEST_ATTRIBUTES, ), Measurement( 3, TEST_TIMESTAMP, instrument=observable_gauge, context=TEST_CONTEXT, attributes=TEST_ATTRIBUTES, ), ] ) def test_callable_multiple_callable_callback(self): observable_gauge = _ObservableGauge( "name", Mock(), Mock(), [callable_callback_0, callable_callback_1] ) self.assertEqual( list(observable_gauge.callback(CallbackOptions())), [ Measurement( 1, TEST_TIMESTAMP, instrument=observable_gauge, context=TEST_CONTEXT, attributes=TEST_ATTRIBUTES, ), Measurement( 2, TEST_TIMESTAMP, instrument=observable_gauge, context=TEST_CONTEXT, attributes=TEST_ATTRIBUTES, ), Measurement( 3, TEST_TIMESTAMP, instrument=observable_gauge, context=TEST_CONTEXT, attributes=TEST_ATTRIBUTES, ), Measurement( 4, TEST_TIMESTAMP, instrument=observable_gauge, context=TEST_CONTEXT, attributes=TEST_ATTRIBUTES, ), Measurement( 5, TEST_TIMESTAMP, instrument=observable_gauge, context=TEST_CONTEXT, attributes=TEST_ATTRIBUTES, ), Measurement( 6, TEST_TIMESTAMP, instrument=observable_gauge, context=TEST_CONTEXT, attributes=TEST_ATTRIBUTES, ), ], ) def test_generator_callback_0(self): observable_gauge = _ObservableGauge( "name", Mock(), Mock(), [generator_callback_0()] ) self.assertEqual( list(observable_gauge.callback(CallbackOptions())), [ Measurement( 1, TEST_TIMESTAMP, instrument=observable_gauge, context=TEST_CONTEXT, attributes=TEST_ATTRIBUTES, ), Measurement( 2, TEST_TIMESTAMP, instrument=observable_gauge, context=TEST_CONTEXT, attributes=TEST_ATTRIBUTES, ), Measurement( 3, TEST_TIMESTAMP, instrument=observable_gauge, context=TEST_CONTEXT, attributes=TEST_ATTRIBUTES, ), ], ) def test_generator_multiple_generator_callback(self): observable_gauge = _ObservableGauge( "name", Mock(), Mock(), callbacks=[generator_callback_0(), generator_callback_1()], ) self.assertEqual( list(observable_gauge.callback(CallbackOptions())), [ Measurement( 1, TEST_TIMESTAMP, instrument=observable_gauge, context=TEST_CONTEXT, attributes=TEST_ATTRIBUTES, ), Measurement( 2, TEST_TIMESTAMP, instrument=observable_gauge, context=TEST_CONTEXT, attributes=TEST_ATTRIBUTES, ), Measurement( 3, TEST_TIMESTAMP, instrument=observable_gauge, context=TEST_CONTEXT, attributes=TEST_ATTRIBUTES, ), Measurement( 4, TEST_TIMESTAMP, instrument=observable_gauge, context=TEST_CONTEXT, attributes=TEST_ATTRIBUTES, ), Measurement( 5, TEST_TIMESTAMP, instrument=observable_gauge, context=TEST_CONTEXT, attributes=TEST_ATTRIBUTES, ), Measurement( 6, TEST_TIMESTAMP, instrument=observable_gauge, context=TEST_CONTEXT, attributes=TEST_ATTRIBUTES, ), ], ) def test_disallow_direct_observable_gauge_creation(self): with self.assertRaises(TypeError): # pylint: disable=abstract-class-instantiated ObservableGauge("name", Mock(), Mock()) @patch( "opentelemetry.sdk.metrics._internal.instrument.time_ns", Mock(return_value=TEST_TIMESTAMP), ) class TestObservableCounter(TestCase): def test_callable_callback_0(self): observable_counter = _ObservableCounter( "name", Mock(), Mock(), [callable_callback_0] ) self.assertEqual( list(observable_counter.callback(CallbackOptions())), [ Measurement( 1, TEST_TIMESTAMP, instrument=observable_counter, context=TEST_CONTEXT, attributes=TEST_ATTRIBUTES, ), Measurement( 2, TEST_TIMESTAMP, instrument=observable_counter, context=TEST_CONTEXT, attributes=TEST_ATTRIBUTES, ), Measurement( 3, TEST_TIMESTAMP, instrument=observable_counter, context=TEST_CONTEXT, attributes=TEST_ATTRIBUTES, ), ], ) def test_generator_callback_0(self): observable_counter = _ObservableCounter( "name", Mock(), Mock(), [generator_callback_0()] ) self.assertEqual( list(observable_counter.callback(CallbackOptions())), [ Measurement( 1, TEST_TIMESTAMP, instrument=observable_counter, context=TEST_CONTEXT, attributes=TEST_ATTRIBUTES, ), Measurement( 2, TEST_TIMESTAMP, instrument=observable_counter, context=TEST_CONTEXT, attributes=TEST_ATTRIBUTES, ), Measurement( 3, TEST_TIMESTAMP, instrument=observable_counter, context=TEST_CONTEXT, attributes=TEST_ATTRIBUTES, ), ], ) def test_disallow_direct_observable_counter_creation(self): with self.assertRaises(TypeError): # pylint: disable=abstract-class-instantiated ObservableCounter("name", Mock(), Mock()) class TestGauge(TestCase): def testname(self): self.assertEqual(_Gauge("name", Mock(), Mock()).name, "name") self.assertEqual(_Gauge("Name", Mock(), Mock()).name, "name") def test_set(self): mc = Mock() gauge = _Gauge("name", Mock(), mc) gauge.set(1.0) mc.consume_measurement.assert_called_once() def test_disallow_direct_counter_creation(self): with self.assertRaises(TypeError): # pylint: disable=abstract-class-instantiated _SDKGauge("name", Mock(), Mock()) @patch( "opentelemetry.sdk.metrics._internal.instrument.time_ns", Mock(return_value=TEST_TIMESTAMP), ) class TestObservableUpDownCounter(TestCase): def test_callable_callback_0(self): observable_up_down_counter = _ObservableUpDownCounter( "name", Mock(), Mock(), [callable_callback_0] ) self.assertEqual( list(observable_up_down_counter.callback(CallbackOptions())), [ Measurement( 1, TEST_TIMESTAMP, instrument=observable_up_down_counter, context=TEST_CONTEXT, attributes=TEST_ATTRIBUTES, ), Measurement( 2, TEST_TIMESTAMP, instrument=observable_up_down_counter, context=TEST_CONTEXT, attributes=TEST_ATTRIBUTES, ), Measurement( 3, TEST_TIMESTAMP, instrument=observable_up_down_counter, context=TEST_CONTEXT, attributes=TEST_ATTRIBUTES, ), ], ) def test_generator_callback_0(self): observable_up_down_counter = _ObservableUpDownCounter( "name", Mock(), Mock(), [generator_callback_0()] ) self.assertEqual( list(observable_up_down_counter.callback(CallbackOptions())), [ Measurement( 1, TEST_TIMESTAMP, instrument=observable_up_down_counter, context=TEST_CONTEXT, attributes=TEST_ATTRIBUTES, ), Measurement( 2, TEST_TIMESTAMP, instrument=observable_up_down_counter, context=TEST_CONTEXT, attributes=TEST_ATTRIBUTES, ), Measurement( 3, TEST_TIMESTAMP, instrument=observable_up_down_counter, context=TEST_CONTEXT, attributes=TEST_ATTRIBUTES, ), ], ) def test_disallow_direct_observable_up_down_counter_creation(self): with self.assertRaises(TypeError): # pylint: disable=abstract-class-instantiated ObservableUpDownCounter("name", Mock(), Mock()) class TestHistogram(TestCase): def test_record(self): mc = Mock() hist = _Histogram("name", Mock(), mc) hist.record(1.0) mc.consume_measurement.assert_called_once() def test_record_non_monotonic(self): mc = Mock() hist = _Histogram("name", Mock(), mc) with self.assertLogs(level=WARNING): hist.record(-1.0) mc.consume_measurement.assert_not_called() def test_disallow_direct_histogram_creation(self): with self.assertRaises(TypeError): # pylint: disable=abstract-class-instantiated Histogram("name", Mock(), Mock()) python-opentelemetry-1.39.1/opentelemetry-sdk/tests/metrics/test_measurement_consumer.py000066400000000000000000000153201511654350100321560ustar00rootroot00000000000000# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # pylint: disable=invalid-name,no-self-use from time import sleep from unittest import TestCase from unittest.mock import MagicMock, Mock, patch from opentelemetry.sdk.metrics._internal.measurement_consumer import ( MeasurementConsumer, SynchronousMeasurementConsumer, ) from opentelemetry.sdk.metrics._internal.sdk_configuration import ( SdkConfiguration, ) @patch( "opentelemetry.sdk.metrics._internal." "measurement_consumer.MetricReaderStorage" ) class TestSynchronousMeasurementConsumer(TestCase): def test_parent(self, _): self.assertIsInstance( SynchronousMeasurementConsumer(MagicMock()), MeasurementConsumer ) def test_creates_metric_reader_storages(self, MockMetricReaderStorage): """It should create one MetricReaderStorage per metric reader passed in the SdkConfiguration""" reader_mocks = [Mock() for _ in range(5)] SynchronousMeasurementConsumer( SdkConfiguration( exemplar_filter=Mock(), resource=Mock(), metric_readers=reader_mocks, views=Mock(), ) ) self.assertEqual(len(MockMetricReaderStorage.mock_calls), 5) def test_measurements_passed_to_each_reader_storage( self, MockMetricReaderStorage ): reader_mocks = [Mock() for _ in range(5)] reader_storage_mocks = [Mock() for _ in range(5)] MockMetricReaderStorage.side_effect = reader_storage_mocks consumer = SynchronousMeasurementConsumer( SdkConfiguration( exemplar_filter=Mock(should_sample=Mock(return_value=False)), resource=Mock(), metric_readers=reader_mocks, views=Mock(), ) ) measurement_mock = Mock() consumer.consume_measurement(measurement_mock) for rs_mock in reader_storage_mocks: rs_mock.consume_measurement.assert_called_once_with( measurement_mock, False ) def test_collect_passed_to_reader_stage(self, MockMetricReaderStorage): """Its collect() method should defer to the underlying MetricReaderStorage""" reader_mocks = [Mock() for _ in range(5)] reader_storage_mocks = [Mock() for _ in range(5)] MockMetricReaderStorage.side_effect = reader_storage_mocks consumer = SynchronousMeasurementConsumer( SdkConfiguration( exemplar_filter=Mock(), resource=Mock(), metric_readers=reader_mocks, views=Mock(), ) ) for r_mock, rs_mock in zip(reader_mocks, reader_storage_mocks): rs_mock.collect.assert_not_called() consumer.collect(r_mock) rs_mock.collect.assert_called_once_with() def test_collect_calls_async_instruments(self, MockMetricReaderStorage): """Its collect() method should invoke async instruments and pass measurements to the corresponding metric reader storage""" reader_mock = Mock() reader_storage_mock = Mock() MockMetricReaderStorage.return_value = reader_storage_mock consumer = SynchronousMeasurementConsumer( SdkConfiguration( exemplar_filter=Mock(should_sample=Mock(return_value=False)), resource=Mock(), metric_readers=[reader_mock], views=Mock(), ) ) async_instrument_mocks = [MagicMock() for _ in range(5)] for i_mock in async_instrument_mocks: i_mock.callback.return_value = [Mock()] consumer.register_asynchronous_instrument(i_mock) consumer.collect(reader_mock) # it should call async instruments for i_mock in async_instrument_mocks: i_mock.callback.assert_called_once() # it should pass measurements to reader storage self.assertEqual( len(reader_storage_mock.consume_measurement.mock_calls), 5 ) # assert consume_measurement was called with at least 2 arguments the second # matching the mocked exemplar filter self.assertFalse(reader_storage_mock.consume_measurement.call_args[1]) def test_collect_timeout(self, MockMetricReaderStorage): reader_mock = Mock() reader_storage_mock = Mock() MockMetricReaderStorage.return_value = reader_storage_mock consumer = SynchronousMeasurementConsumer( SdkConfiguration( exemplar_filter=Mock(), resource=Mock(), metric_readers=[reader_mock], views=Mock(), ) ) def sleep_1(*args, **kwargs): sleep(1) consumer.register_asynchronous_instrument( Mock(**{"callback.side_effect": sleep_1}) ) with self.assertRaises(Exception) as error: consumer.collect(reader_mock, timeout_millis=10) self.assertIn( "Timed out while executing callback", error.exception.args[0] ) @patch( "opentelemetry.sdk.metrics._internal." "measurement_consumer.CallbackOptions" ) def test_collect_deadline( self, mock_callback_options, MockMetricReaderStorage ): reader_mock = Mock() reader_storage_mock = Mock() MockMetricReaderStorage.return_value = reader_storage_mock consumer = SynchronousMeasurementConsumer( SdkConfiguration( exemplar_filter=Mock(), resource=Mock(), metric_readers=[reader_mock], views=Mock(), ) ) def sleep_1(*args, **kwargs): sleep(1) return [] consumer.register_asynchronous_instrument( Mock(**{"callback.side_effect": sleep_1}) ) consumer.register_asynchronous_instrument( Mock(**{"callback.side_effect": sleep_1}) ) consumer.collect(reader_mock) callback_options_time_call = mock_callback_options.mock_calls[ -1 ].kwargs["timeout_millis"] self.assertLess( callback_options_time_call, 10000, ) python-opentelemetry-1.39.1/opentelemetry-sdk/tests/metrics/test_metric_reader.py000066400000000000000000000113741511654350100305300ustar00rootroot00000000000000# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # pylint: disable=protected-access from typing import Dict, Iterable from unittest import TestCase from unittest.mock import patch from opentelemetry.sdk.metrics import Counter, Histogram, ObservableGauge from opentelemetry.sdk.metrics import _Gauge as _SDKGauge from opentelemetry.sdk.metrics._internal.instrument import ( _Counter, _Gauge, _Histogram, _ObservableCounter, _ObservableGauge, _ObservableUpDownCounter, _UpDownCounter, ) from opentelemetry.sdk.metrics.export import ( AggregationTemporality, Metric, MetricReader, ) from opentelemetry.sdk.metrics.view import ( Aggregation, DefaultAggregation, LastValueAggregation, ) _expected_keys = [ _Counter, _UpDownCounter, _Gauge, _Histogram, _ObservableCounter, _ObservableUpDownCounter, _ObservableGauge, ] class DummyMetricReader(MetricReader): def __init__( self, preferred_temporality: Dict[type, AggregationTemporality] = None, preferred_aggregation: Dict[type, Aggregation] = None, ) -> None: super().__init__( preferred_temporality=preferred_temporality, preferred_aggregation=preferred_aggregation, ) def _receive_metrics( self, metrics_data: Iterable[Metric], timeout_millis: float = 10_000, **kwargs, ) -> None: pass def shutdown(self, timeout_millis: float = 30_000, **kwargs) -> None: return True class TestMetricReader(TestCase): def test_configure_temporality(self): dummy_metric_reader = DummyMetricReader( preferred_temporality={ Histogram: AggregationTemporality.DELTA, ObservableGauge: AggregationTemporality.DELTA, _SDKGauge: AggregationTemporality.DELTA, } ) self.assertEqual( dummy_metric_reader._instrument_class_temporality.keys(), set(_expected_keys), ) self.assertEqual( dummy_metric_reader._instrument_class_temporality[_Counter], AggregationTemporality.CUMULATIVE, ) self.assertEqual( dummy_metric_reader._instrument_class_temporality[_UpDownCounter], AggregationTemporality.CUMULATIVE, ) self.assertEqual( dummy_metric_reader._instrument_class_temporality[_Histogram], AggregationTemporality.DELTA, ) self.assertEqual( dummy_metric_reader._instrument_class_temporality[ _ObservableCounter ], AggregationTemporality.CUMULATIVE, ) self.assertEqual( dummy_metric_reader._instrument_class_temporality[ _ObservableUpDownCounter ], AggregationTemporality.CUMULATIVE, ) self.assertEqual( dummy_metric_reader._instrument_class_temporality[ _ObservableGauge ], AggregationTemporality.DELTA, ) self.assertEqual( dummy_metric_reader._instrument_class_temporality[_Gauge], AggregationTemporality.DELTA, ) def test_configure_aggregation(self): dummy_metric_reader = DummyMetricReader() self.assertEqual( dummy_metric_reader._instrument_class_aggregation.keys(), set(_expected_keys), ) for ( value ) in dummy_metric_reader._instrument_class_aggregation.values(): self.assertIsInstance(value, DefaultAggregation) dummy_metric_reader = DummyMetricReader( preferred_aggregation={Counter: LastValueAggregation()} ) self.assertEqual( dummy_metric_reader._instrument_class_aggregation.keys(), set(_expected_keys), ) self.assertIsInstance( dummy_metric_reader._instrument_class_aggregation[_Counter], LastValueAggregation, ) # pylint: disable=no-self-use def test_force_flush(self): with patch.object(DummyMetricReader, "collect") as mock_collect: DummyMetricReader().force_flush(timeout_millis=10) mock_collect.assert_called_with(timeout_millis=10) python-opentelemetry-1.39.1/opentelemetry-sdk/tests/metrics/test_metric_reader_storage.py000066400000000000000000000755241511654350100322630ustar00rootroot00000000000000# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # pylint: disable=protected-access,invalid-name from logging import WARNING from time import time_ns from unittest.mock import MagicMock, Mock, patch from opentelemetry.context import Context from opentelemetry.sdk.metrics._internal.aggregation import ( _LastValueAggregation, ) from opentelemetry.sdk.metrics._internal.instrument import ( _Counter, _Gauge, _Histogram, _ObservableCounter, _UpDownCounter, ) from opentelemetry.sdk.metrics._internal.measurement import Measurement from opentelemetry.sdk.metrics._internal.metric_reader_storage import ( _DEFAULT_VIEW, MetricReaderStorage, ) from opentelemetry.sdk.metrics._internal.sdk_configuration import ( SdkConfiguration, ) from opentelemetry.sdk.metrics.export import AggregationTemporality from opentelemetry.sdk.metrics.view import ( DefaultAggregation, DropAggregation, ExplicitBucketHistogramAggregation, SumAggregation, View, ) from opentelemetry.test.concurrency_test import ConcurrencyTestBase, MockFunc def mock_view_matching(name, *instruments) -> Mock: mock = Mock(name=name) mock._match.side_effect = lambda instrument: instrument in instruments return mock def mock_instrument() -> Mock: instr = Mock() instr.attributes = {} return instr class TestMetricReaderStorage(ConcurrencyTestBase): @patch( "opentelemetry.sdk.metrics._internal" ".metric_reader_storage._ViewInstrumentMatch" ) def test_creates_view_instrument_matches( self, MockViewInstrumentMatch: Mock ): """It should create a MockViewInstrumentMatch when an instrument matches a view""" instrument1 = Mock(name="instrument1") instrument2 = Mock(name="instrument2") view1 = mock_view_matching("view_1", instrument1) view2 = mock_view_matching("view_2", instrument1, instrument2) storage = MetricReaderStorage( SdkConfiguration( exemplar_filter=Mock(), resource=Mock(), metric_readers=(), views=(view1, view2), ), MagicMock( **{ "__getitem__.return_value": AggregationTemporality.CUMULATIVE } ), MagicMock(**{"__getitem__.return_value": DefaultAggregation()}), ) # instrument1 matches view1 and view2, so should create two # ViewInstrumentMatch objects storage.consume_measurement( Measurement(1, time_ns(), instrument1, Context()) ) self.assertEqual( len(MockViewInstrumentMatch.call_args_list), 2, MockViewInstrumentMatch.mock_calls, ) # they should only be created the first time the instrument is seen storage.consume_measurement( Measurement(1, time_ns(), instrument1, Context()) ) self.assertEqual(len(MockViewInstrumentMatch.call_args_list), 2) # instrument2 matches view2, so should create a single # ViewInstrumentMatch MockViewInstrumentMatch.call_args_list.clear() with self.assertLogs(level=WARNING): storage.consume_measurement( Measurement(1, time_ns(), instrument2, Context()) ) self.assertEqual(len(MockViewInstrumentMatch.call_args_list), 1) @patch( "opentelemetry.sdk.metrics._internal." "metric_reader_storage._ViewInstrumentMatch" ) def test_forwards_calls_to_view_instrument_match( self, MockViewInstrumentMatch: Mock ): view_instrument_match1 = Mock( _aggregation=_LastValueAggregation({}, Mock()) ) view_instrument_match2 = Mock( _aggregation=_LastValueAggregation({}, Mock()) ) view_instrument_match3 = Mock( _aggregation=_LastValueAggregation({}, Mock()) ) MockViewInstrumentMatch.side_effect = [ view_instrument_match1, view_instrument_match2, view_instrument_match3, ] instrument1 = Mock(name="instrument1") instrument2 = Mock(name="instrument2") view1 = mock_view_matching("view1", instrument1) view2 = mock_view_matching("view2", instrument1, instrument2) storage = MetricReaderStorage( SdkConfiguration( exemplar_filter=Mock(), resource=Mock(), metric_readers=(), views=(view1, view2), ), MagicMock( **{ "__getitem__.return_value": AggregationTemporality.CUMULATIVE } ), MagicMock(**{"__getitem__.return_value": DefaultAggregation()}), ) # Measurements from an instrument should be passed on to each # ViewInstrumentMatch objects created for that instrument measurement = Measurement(1, time_ns(), instrument1, Context()) storage.consume_measurement(measurement) view_instrument_match1.consume_measurement.assert_called_once_with( measurement, True ) view_instrument_match2.consume_measurement.assert_called_once_with( measurement, True ) view_instrument_match3.consume_measurement.assert_not_called() measurement = Measurement(1, time_ns(), instrument2, Context()) with self.assertLogs(level=WARNING): storage.consume_measurement(measurement) view_instrument_match3.consume_measurement.assert_called_once_with( measurement, True ) # collect() should call collect on all of its _ViewInstrumentMatch # objects and combine them together all_metrics = [Mock() for _ in range(6)] view_instrument_match1.collect.return_value = all_metrics[:2] view_instrument_match2.collect.return_value = all_metrics[2:4] view_instrument_match3.collect.return_value = all_metrics[4:] result = storage.collect() view_instrument_match1.collect.assert_called_once() view_instrument_match2.collect.assert_called_once() view_instrument_match3.collect.assert_called_once() self.assertEqual( ( result.resource_metrics[0] .scope_metrics[0] .metrics[0] .data.data_points[0] ), all_metrics[0], ) self.assertEqual( ( result.resource_metrics[0] .scope_metrics[0] .metrics[0] .data.data_points[1] ), all_metrics[1], ) self.assertEqual( ( result.resource_metrics[0] .scope_metrics[0] .metrics[1] .data.data_points[0] ), all_metrics[2], ) self.assertEqual( ( result.resource_metrics[0] .scope_metrics[0] .metrics[1] .data.data_points[1] ), all_metrics[3], ) self.assertEqual( ( result.resource_metrics[0] .scope_metrics[1] .metrics[0] .data.data_points[0] ), all_metrics[4], ) self.assertEqual( ( result.resource_metrics[0] .scope_metrics[1] .metrics[0] .data.data_points[1] ), all_metrics[5], ) @patch( "opentelemetry.sdk.metrics._internal." "metric_reader_storage._ViewInstrumentMatch" ) def test_race_concurrent_measurements(self, MockViewInstrumentMatch: Mock): mock_view_instrument_match_ctor = MockFunc() MockViewInstrumentMatch.side_effect = mock_view_instrument_match_ctor instrument1 = Mock(name="instrument1") view1 = mock_view_matching(instrument1) storage = MetricReaderStorage( SdkConfiguration( exemplar_filter=Mock(), resource=Mock(), metric_readers=(), views=(view1,), ), MagicMock( **{ "__getitem__.return_value": AggregationTemporality.CUMULATIVE } ), MagicMock(**{"__getitem__.return_value": DefaultAggregation()}), ) def send_measurement(): storage.consume_measurement( Measurement(1, time_ns(), instrument1, Context()) ) # race sending many measurements concurrently self.run_with_many_threads(send_measurement) # _ViewInstrumentMatch constructor should have only been called once self.assertEqual(mock_view_instrument_match_ctor.call_count, 1) @patch( "opentelemetry.sdk.metrics._internal." "metric_reader_storage._ViewInstrumentMatch" ) def test_default_view_enabled(self, MockViewInstrumentMatch: Mock): """Instruments should be matched with default views when enabled""" instrument1 = Mock(name="instrument1") instrument2 = Mock(name="instrument2") storage = MetricReaderStorage( SdkConfiguration( exemplar_filter=Mock(), resource=Mock(), metric_readers=(), views=(), ), MagicMock( **{ "__getitem__.return_value": AggregationTemporality.CUMULATIVE } ), MagicMock(**{"__getitem__.return_value": DefaultAggregation()}), ) storage.consume_measurement( Measurement(1, time_ns(), instrument1, Context()) ) self.assertEqual( len(MockViewInstrumentMatch.call_args_list), 1, MockViewInstrumentMatch.mock_calls, ) storage.consume_measurement( Measurement(1, time_ns(), instrument1, Context()) ) self.assertEqual(len(MockViewInstrumentMatch.call_args_list), 1) MockViewInstrumentMatch.call_args_list.clear() storage.consume_measurement( Measurement(1, time_ns(), instrument2, Context()) ) self.assertEqual(len(MockViewInstrumentMatch.call_args_list), 1) def test_drop_aggregation(self): counter = _Counter("name", Mock(), Mock()) metric_reader_storage = MetricReaderStorage( SdkConfiguration( exemplar_filter=Mock(), resource=Mock(), metric_readers=(), views=( View( instrument_name="name", aggregation=DropAggregation() ), ), ), MagicMock( **{ "__getitem__.return_value": AggregationTemporality.CUMULATIVE } ), MagicMock(**{"__getitem__.return_value": DefaultAggregation()}), ) metric_reader_storage.consume_measurement( Measurement(1, time_ns(), counter, Context()) ) self.assertIsNone(metric_reader_storage.collect()) def test_same_collection_start(self): counter = _Counter("name", Mock(), Mock()) up_down_counter = _UpDownCounter("name", Mock(), Mock()) metric_reader_storage = MetricReaderStorage( SdkConfiguration( exemplar_filter=Mock(), resource=Mock(), metric_readers=(), views=(View(instrument_name="name"),), ), MagicMock( **{ "__getitem__.return_value": AggregationTemporality.CUMULATIVE } ), MagicMock(**{"__getitem__.return_value": DefaultAggregation()}), ) metric_reader_storage.consume_measurement( Measurement(1, time_ns(), counter, Context()) ) metric_reader_storage.consume_measurement( Measurement(1, time_ns(), up_down_counter, Context()) ) actual = metric_reader_storage.collect() self.assertEqual( list( actual.resource_metrics[0] .scope_metrics[0] .metrics[0] .data.data_points )[0].time_unix_nano, list( actual.resource_metrics[0] .scope_metrics[1] .metrics[0] .data.data_points )[0].time_unix_nano, ) def test_conflicting_view_configuration(self): observable_counter = _ObservableCounter( "observable_counter", Mock(), [Mock()], unit="unit", description="description", ) metric_reader_storage = MetricReaderStorage( SdkConfiguration( exemplar_filter=Mock(), resource=Mock(), metric_readers=(), views=( View( instrument_name="observable_counter", aggregation=ExplicitBucketHistogramAggregation(), ), ), ), MagicMock( **{ "__getitem__.return_value": AggregationTemporality.CUMULATIVE } ), MagicMock(**{"__getitem__.return_value": DefaultAggregation()}), ) with self.assertLogs(level=WARNING): metric_reader_storage.consume_measurement( Measurement(1, time_ns(), observable_counter, Context()) ) self.assertIs( metric_reader_storage._instrument_view_instrument_matches[ observable_counter ][0]._view, _DEFAULT_VIEW, ) def test_view_instrument_match_conflict_0(self): # There is a conflict between views and instruments. observable_counter_0 = _ObservableCounter( "observable_counter_0", Mock(), [Mock()], unit="unit", description="description", ) observable_counter_1 = _ObservableCounter( "observable_counter_1", Mock(), [Mock()], unit="unit", description="description", ) metric_reader_storage = MetricReaderStorage( SdkConfiguration( exemplar_filter=Mock(), resource=Mock(), metric_readers=(), views=( View(instrument_name="observable_counter_0", name="foo"), View(instrument_name="observable_counter_1", name="foo"), ), ), MagicMock( **{ "__getitem__.return_value": AggregationTemporality.CUMULATIVE } ), MagicMock(**{"__getitem__.return_value": DefaultAggregation()}), ) with self.assertRaises(AssertionError): with self.assertLogs(level=WARNING): metric_reader_storage.consume_measurement( Measurement(1, time_ns(), observable_counter_0, Context()) ) with self.assertLogs(level=WARNING) as log: metric_reader_storage.consume_measurement( Measurement(1, time_ns(), observable_counter_1, Context()) ) self.assertIn( "will cause conflicting metrics", log.records[0].message, ) def test_view_instrument_match_conflict_1(self): # There is a conflict between views and instruments. observable_counter_foo = _ObservableCounter( "foo", Mock(), [Mock()], unit="unit", description="description", ) observable_counter_bar = _ObservableCounter( "bar", Mock(), [Mock()], unit="unit", description="description", ) observable_counter_baz = _ObservableCounter( "baz", Mock(), [Mock()], unit="unit", description="description", ) metric_reader_storage = MetricReaderStorage( SdkConfiguration( exemplar_filter=Mock(), resource=Mock(), metric_readers=(), views=( View(instrument_name="bar", name="foo"), View(instrument_name="baz", name="foo"), ), ), MagicMock( **{ "__getitem__.return_value": AggregationTemporality.CUMULATIVE } ), MagicMock(**{"__getitem__.return_value": DefaultAggregation()}), ) with self.assertRaises(AssertionError): with self.assertLogs(level=WARNING): metric_reader_storage.consume_measurement( Measurement( 1, time_ns(), observable_counter_foo, Context() ) ) with self.assertLogs(level=WARNING) as log: metric_reader_storage.consume_measurement( Measurement(1, time_ns(), observable_counter_bar, Context()) ) self.assertIn( "will cause conflicting metrics", log.records[0].message, ) with self.assertLogs(level=WARNING) as log: metric_reader_storage.consume_measurement( Measurement(1, time_ns(), observable_counter_baz, Context()) ) self.assertIn( "will cause conflicting metrics", log.records[0].message, ) for view_instrument_matches in ( metric_reader_storage._instrument_view_instrument_matches.values() ): for view_instrument_match in view_instrument_matches: self.assertEqual(view_instrument_match._name, "foo") def test_view_instrument_match_conflict_2(self): # There is no conflict because the metric streams names are different. observable_counter_foo = _ObservableCounter( "foo", Mock(), [Mock()], unit="unit", description="description", ) observable_counter_bar = _ObservableCounter( "bar", Mock(), [Mock()], unit="unit", description="description", ) metric_reader_storage = MetricReaderStorage( SdkConfiguration( exemplar_filter=Mock(), resource=Mock(), metric_readers=(), views=( View(instrument_name="foo"), View(instrument_name="bar"), ), ), MagicMock( **{ "__getitem__.return_value": AggregationTemporality.CUMULATIVE } ), MagicMock(**{"__getitem__.return_value": DefaultAggregation()}), ) with self.assertRaises(AssertionError): with self.assertLogs(level=WARNING): metric_reader_storage.consume_measurement( Measurement( 1, time_ns(), observable_counter_foo, Context() ) ) with self.assertRaises(AssertionError): with self.assertLogs(level=WARNING): metric_reader_storage.consume_measurement( Measurement( 1, time_ns(), observable_counter_bar, Context() ) ) def test_view_instrument_match_conflict_3(self): # There is no conflict because the aggregation temporality of the # instruments is different. counter_bar = _Counter( "bar", Mock(), [Mock()], unit="unit", description="description", ) observable_counter_baz = _ObservableCounter( "baz", Mock(), [Mock()], unit="unit", description="description", ) metric_reader_storage = MetricReaderStorage( SdkConfiguration( exemplar_filter=Mock(), resource=Mock(), metric_readers=(), views=( View(instrument_name="bar", name="foo"), View(instrument_name="baz", name="foo"), ), ), MagicMock( **{ "__getitem__.return_value": AggregationTemporality.CUMULATIVE } ), MagicMock(**{"__getitem__.return_value": DefaultAggregation()}), ) with self.assertRaises(AssertionError): with self.assertLogs(level=WARNING): metric_reader_storage.consume_measurement( Measurement(1, time_ns(), counter_bar, Context()) ) with self.assertRaises(AssertionError): with self.assertLogs(level=WARNING): metric_reader_storage.consume_measurement( Measurement( 1, time_ns(), observable_counter_baz, Context() ) ) def test_view_instrument_match_conflict_4(self): # There is no conflict because the monotonicity of the instruments is # different. counter_bar = _Counter( "bar", Mock(), [Mock()], unit="unit", description="description", ) up_down_counter_baz = _UpDownCounter( "baz", Mock(), [Mock()], unit="unit", description="description", ) metric_reader_storage = MetricReaderStorage( SdkConfiguration( exemplar_filter=Mock(), resource=Mock(), metric_readers=(), views=( View(instrument_name="bar", name="foo"), View(instrument_name="baz", name="foo"), ), ), MagicMock( **{ "__getitem__.return_value": AggregationTemporality.CUMULATIVE } ), MagicMock(**{"__getitem__.return_value": DefaultAggregation()}), ) with self.assertRaises(AssertionError): with self.assertLogs(level=WARNING): metric_reader_storage.consume_measurement( Measurement(1, time_ns(), counter_bar, Context()) ) with self.assertRaises(AssertionError): with self.assertLogs(level=WARNING): metric_reader_storage.consume_measurement( Measurement(1, time_ns(), up_down_counter_baz, Context()) ) def test_view_instrument_match_conflict_5(self): # There is no conflict because the instrument units are different. observable_counter_0 = _ObservableCounter( "observable_counter_0", Mock(), [Mock()], unit="unit_0", description="description", ) observable_counter_1 = _ObservableCounter( "observable_counter_1", Mock(), [Mock()], unit="unit_1", description="description", ) metric_reader_storage = MetricReaderStorage( SdkConfiguration( exemplar_filter=Mock(), resource=Mock(), metric_readers=(), views=( View(instrument_name="observable_counter_0", name="foo"), View(instrument_name="observable_counter_1", name="foo"), ), ), MagicMock( **{ "__getitem__.return_value": AggregationTemporality.CUMULATIVE } ), MagicMock(**{"__getitem__.return_value": DefaultAggregation()}), ) with self.assertRaises(AssertionError): with self.assertLogs(level=WARNING): metric_reader_storage.consume_measurement( Measurement(1, time_ns(), observable_counter_0, Context()) ) with self.assertRaises(AssertionError): with self.assertLogs(level=WARNING): metric_reader_storage.consume_measurement( Measurement(1, time_ns(), observable_counter_1, Context()) ) def test_view_instrument_match_conflict_6(self): # There is no conflict because the instrument data points are # different. observable_counter = _ObservableCounter( "observable_counter", Mock(), [Mock()], unit="unit", description="description", ) histogram = _Histogram( "histogram", Mock(), [Mock()], unit="unit", description="description", ) gauge = _Gauge( "gauge", Mock(), [Mock()], unit="unit", description="description", ) metric_reader_storage = MetricReaderStorage( SdkConfiguration( exemplar_filter=Mock(), resource=Mock(), metric_readers=(), views=( View(instrument_name="observable_counter", name="foo"), View(instrument_name="histogram", name="foo"), View(instrument_name="gauge", name="foo"), ), ), MagicMock( **{ "__getitem__.return_value": AggregationTemporality.CUMULATIVE } ), MagicMock(**{"__getitem__.return_value": DefaultAggregation()}), ) with self.assertRaises(AssertionError): with self.assertLogs(level=WARNING): metric_reader_storage.consume_measurement( Measurement(1, time_ns(), observable_counter, Context()) ) with self.assertRaises(AssertionError): with self.assertLogs(level=WARNING): metric_reader_storage.consume_measurement( Measurement(1, time_ns(), histogram, Context()) ) with self.assertRaises(AssertionError): with self.assertLogs(level=WARNING): metric_reader_storage.consume_measurement( Measurement(1, time_ns(), gauge, Context()) ) def test_view_instrument_match_conflict_7(self): # There is a conflict between views and instruments because the # description being different does not avoid a conflict. observable_counter_0 = _ObservableCounter( "observable_counter_0", Mock(), [Mock()], unit="unit", description="description_0", ) observable_counter_1 = _ObservableCounter( "observable_counter_1", Mock(), [Mock()], unit="unit", description="description_1", ) metric_reader_storage = MetricReaderStorage( SdkConfiguration( exemplar_filter=Mock(), resource=Mock(), metric_readers=(), views=( View(instrument_name="observable_counter_0", name="foo"), View(instrument_name="observable_counter_1", name="foo"), ), ), MagicMock( **{ "__getitem__.return_value": AggregationTemporality.CUMULATIVE } ), MagicMock(**{"__getitem__.return_value": DefaultAggregation()}), ) with self.assertRaises(AssertionError): with self.assertLogs(level=WARNING): metric_reader_storage.consume_measurement( Measurement(1, time_ns(), observable_counter_0, Context()) ) with self.assertLogs(level=WARNING) as log: metric_reader_storage.consume_measurement( Measurement(1, time_ns(), observable_counter_1, Context()) ) self.assertIn( "will cause conflicting metrics", log.records[0].message, ) def test_view_instrument_match_conflict_8(self): # There is a conflict because the histogram-matching view changes the # default aggregation of the histogram to Sum aggregation which is the # same aggregation as the default aggregation of the up down counter # and also the temporality and monotonicity of the up down counter and # the histogram are the same. up_down_counter = _UpDownCounter( "up_down_counter", Mock(), [Mock()], unit="unit", description="description", ) histogram = _Histogram( "histogram", Mock(), [Mock()], unit="unit", description="description", ) metric_reader_storage = MetricReaderStorage( SdkConfiguration( exemplar_filter=Mock(), resource=Mock(), metric_readers=(), views=( View(instrument_name="up_down_counter", name="foo"), View( instrument_name="histogram", name="foo", aggregation=SumAggregation(), ), ), ), MagicMock( **{ "__getitem__.return_value": AggregationTemporality.CUMULATIVE } ), MagicMock(**{"__getitem__.return_value": DefaultAggregation()}), ) with self.assertRaises(AssertionError): with self.assertLogs(level=WARNING): metric_reader_storage.consume_measurement( Measurement(1, time_ns(), up_down_counter, Context()) ) with self.assertLogs(level=WARNING) as log: metric_reader_storage.consume_measurement( Measurement(1, time_ns(), histogram, Context()) ) self.assertIn( "will cause conflicting metrics", log.records[0].message, ) python-opentelemetry-1.39.1/opentelemetry-sdk/tests/metrics/test_metrics.py000066400000000000000000000541151511654350100273710ustar00rootroot00000000000000# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # pylint: disable=protected-access,no-self-use import weakref from logging import WARNING from time import sleep from typing import Iterable, Sequence from unittest.mock import MagicMock, Mock, patch from opentelemetry.attributes import BoundedAttributes from opentelemetry.metrics import NoOpMeter from opentelemetry.sdk.environment_variables import OTEL_SDK_DISABLED from opentelemetry.sdk.metrics import ( Counter, Histogram, Meter, MeterProvider, ObservableCounter, ObservableGauge, ObservableUpDownCounter, UpDownCounter, _Gauge, ) from opentelemetry.sdk.metrics._internal import SynchronousMeasurementConsumer from opentelemetry.sdk.metrics.export import ( Metric, MetricExporter, MetricExportResult, MetricReader, PeriodicExportingMetricReader, ) from opentelemetry.sdk.metrics.view import SumAggregation, View from opentelemetry.sdk.resources import Resource from opentelemetry.test import TestCase from opentelemetry.test.concurrency_test import ConcurrencyTestBase, MockFunc class DummyMetricReader(MetricReader): def __init__(self): super().__init__() def _receive_metrics( self, metrics_data: Iterable[Metric], timeout_millis: float = 10_000, **kwargs, ) -> None: pass def shutdown(self, timeout_millis: float = 30_000, **kwargs) -> None: return True class TestMeterProvider(ConcurrencyTestBase, TestCase): def tearDown(self): MeterProvider._all_metric_readers = weakref.WeakSet() @patch.object(Resource, "create") def test_init_default(self, resource_patch): meter_provider = MeterProvider() resource_mock = resource_patch.return_value resource_patch.assert_called_once() self.assertIsNotNone(meter_provider._sdk_config) self.assertEqual(meter_provider._sdk_config.resource, resource_mock) self.assertTrue( isinstance( meter_provider._measurement_consumer, SynchronousMeasurementConsumer, ) ) self.assertIsNotNone(meter_provider._atexit_handler) def test_register_metric_readers(self): mock_exporter = Mock() mock_exporter._preferred_temporality = None mock_exporter._preferred_aggregation = None metric_reader_0 = PeriodicExportingMetricReader(mock_exporter) metric_reader_1 = PeriodicExportingMetricReader(mock_exporter) with self.assertNotRaises(Exception): MeterProvider(metric_readers=(metric_reader_0,)) MeterProvider(metric_readers=(metric_reader_1,)) with self.assertRaises(Exception): MeterProvider(metric_readers=(metric_reader_0,)) MeterProvider(metric_readers=(metric_reader_0,)) def test_resource(self): """ `MeterProvider` provides a way to allow a `Resource` to be specified. """ meter_provider_0 = MeterProvider() meter_provider_1 = MeterProvider() self.assertEqual( meter_provider_0._sdk_config.resource, meter_provider_1._sdk_config.resource, ) self.assertIsInstance(meter_provider_0._sdk_config.resource, Resource) self.assertIsInstance(meter_provider_1._sdk_config.resource, Resource) resource = Resource({"key": "value"}) self.assertIs( MeterProvider(resource=resource)._sdk_config.resource, resource ) def test_get_meter(self): """ `MeterProvider.get_meter` arguments are used to create an `InstrumentationScope` object on the created `Meter`. """ meter = MeterProvider().get_meter( "name", version="version", schema_url="schema_url", attributes={"key": "value"}, ) self.assertEqual(meter._instrumentation_scope.name, "name") self.assertEqual(meter._instrumentation_scope.version, "version") self.assertEqual(meter._instrumentation_scope.schema_url, "schema_url") self.assertEqual( meter._instrumentation_scope.attributes, {"key": "value"} ) def test_get_meter_attributes(self): """ `MeterProvider.get_meter` arguments are used to create an `InstrumentationScope` object on the created `Meter`. """ meter = MeterProvider().get_meter( "name", version="version", schema_url="schema_url", attributes={"key": "value", "key2": 5, "key3": "value3"}, ) self.assertEqual(meter._instrumentation_scope.name, "name") self.assertEqual(meter._instrumentation_scope.version, "version") self.assertEqual(meter._instrumentation_scope.schema_url, "schema_url") self.assertEqual( meter._instrumentation_scope.attributes, {"key": "value", "key2": 5, "key3": "value3"}, ) def test_get_meter_empty(self): """ `MeterProvider.get_meter` called with None or empty string as name should return a NoOpMeter. """ with self.assertLogs(level=WARNING): meter = MeterProvider().get_meter( None, version="version", schema_url="schema_url", ) self.assertIsInstance(meter, NoOpMeter) self.assertEqual(meter._name, None) with self.assertLogs(level=WARNING): meter = MeterProvider().get_meter( "", version="version", schema_url="schema_url", ) self.assertIsInstance(meter, NoOpMeter) self.assertEqual(meter._name, "") def test_get_meter_duplicate(self): """ Subsequent calls to `MeterProvider.get_meter` with the same arguments should return the same `Meter` instance. """ mp = MeterProvider() meter1 = mp.get_meter( "name", version="version", schema_url="schema_url", ) meter2 = mp.get_meter( "name", version="version", schema_url="schema_url", ) meter3 = mp.get_meter( "name2", version="version", schema_url="schema_url", ) self.assertIs(meter1, meter2) self.assertIsNot(meter1, meter3) def test_get_meter_comparison_with_attributes(self): """ Subsequent calls to `MeterProvider.get_meter` with the same arguments should return the same `Meter` instance. """ mp = MeterProvider() meter1 = mp.get_meter( "name", version="version", schema_url="schema_url", attributes={"key": "value", "key2": 5, "key3": "value3"}, ) meter2 = mp.get_meter( "name", version="version", schema_url="schema_url", attributes={"key": "value", "key2": 5, "key3": "value3"}, ) meter3 = mp.get_meter( "name2", version="version", schema_url="schema_url", ) meter4 = mp.get_meter( "name", version="version", schema_url="schema_url", attributes={"key": "value", "key2": 5, "key3": "value4"}, ) self.assertIs(meter1, meter2) self.assertIsNot(meter1, meter3) self.assertTrue( meter3._instrumentation_scope > meter4._instrumentation_scope ) self.assertIsInstance( meter4._instrumentation_scope.attributes, BoundedAttributes ) def test_shutdown(self): mock_metric_reader_0 = MagicMock( **{ "shutdown.side_effect": ZeroDivisionError(), } ) mock_metric_reader_1 = MagicMock( **{ "shutdown.side_effect": AssertionError(), } ) meter_provider = MeterProvider( metric_readers=[mock_metric_reader_0, mock_metric_reader_1] ) with self.assertRaises(Exception) as error: meter_provider.shutdown() error = error.exception self.assertEqual( str(error), ( "MeterProvider.shutdown failed because the following " "metric readers failed during shutdown:\n" "MagicMock: ZeroDivisionError()\n" "MagicMock: AssertionError()" ), ) mock_metric_reader_0.shutdown.assert_called_once() mock_metric_reader_1.shutdown.assert_called_once() mock_metric_reader_0 = Mock() mock_metric_reader_1 = Mock() meter_provider = MeterProvider( metric_readers=[mock_metric_reader_0, mock_metric_reader_1] ) self.assertIsNone(meter_provider.shutdown()) mock_metric_reader_0.shutdown.assert_called_once() mock_metric_reader_1.shutdown.assert_called_once() def test_shutdown_subsequent_calls(self): """ No subsequent attempts to get a `Meter` are allowed after calling `MeterProvider.shutdown` """ meter_provider = MeterProvider() with self.assertRaises(AssertionError): with self.assertLogs(level=WARNING): meter_provider.shutdown() with self.assertLogs(level=WARNING): meter_provider.shutdown() @patch("opentelemetry.sdk.metrics._internal._logger") def test_shutdown_race(self, mock_logger): mock_logger.warning = MockFunc() meter_provider = MeterProvider() num_threads = 70 self.run_with_many_threads( meter_provider.shutdown, num_threads=num_threads ) self.assertEqual(mock_logger.warning.call_count, num_threads - 1) @patch( "opentelemetry.sdk.metrics._internal.SynchronousMeasurementConsumer" ) def test_measurement_collect_callback( self, mock_sync_measurement_consumer ): metric_readers = [ DummyMetricReader(), DummyMetricReader(), DummyMetricReader(), DummyMetricReader(), DummyMetricReader(), ] sync_consumer_instance = mock_sync_measurement_consumer() sync_consumer_instance.collect = MockFunc() MeterProvider(metric_readers=metric_readers) for reader in metric_readers: reader.collect() self.assertEqual( sync_consumer_instance.collect.call_count, len(metric_readers) ) @patch( "opentelemetry.sdk.metrics._internal.SynchronousMeasurementConsumer" ) def test_creates_sync_measurement_consumer( self, mock_sync_measurement_consumer ): MeterProvider() mock_sync_measurement_consumer.assert_called() @patch( "opentelemetry.sdk.metrics._internal.SynchronousMeasurementConsumer" ) def test_register_asynchronous_instrument( self, mock_sync_measurement_consumer ): meter_provider = MeterProvider() # pylint: disable=no-member meter_provider._measurement_consumer.register_asynchronous_instrument.assert_called_with( meter_provider.get_meter("name").create_observable_counter( "name0", callbacks=[Mock()] ) ) meter_provider._measurement_consumer.register_asynchronous_instrument.assert_called_with( meter_provider.get_meter("name").create_observable_up_down_counter( "name1", callbacks=[Mock()] ) ) meter_provider._measurement_consumer.register_asynchronous_instrument.assert_called_with( meter_provider.get_meter("name").create_observable_gauge( "name2", callbacks=[Mock()] ) ) @patch( "opentelemetry.sdk.metrics._internal.SynchronousMeasurementConsumer" ) def test_consume_measurement_counter(self, mock_sync_measurement_consumer): sync_consumer_instance = mock_sync_measurement_consumer() meter_provider = MeterProvider() counter = meter_provider.get_meter("name").create_counter("name") counter.add(1) sync_consumer_instance.consume_measurement.assert_called() @patch( "opentelemetry.sdk.metrics._internal.SynchronousMeasurementConsumer" ) def test_consume_measurement_up_down_counter( self, mock_sync_measurement_consumer ): sync_consumer_instance = mock_sync_measurement_consumer() meter_provider = MeterProvider() counter = meter_provider.get_meter("name").create_up_down_counter( "name" ) counter.add(1) sync_consumer_instance.consume_measurement.assert_called() @patch( "opentelemetry.sdk.metrics._internal.SynchronousMeasurementConsumer" ) def test_consume_measurement_histogram( self, mock_sync_measurement_consumer ): sync_consumer_instance = mock_sync_measurement_consumer() meter_provider = MeterProvider() counter = meter_provider.get_meter("name").create_histogram("name") counter.record(1) sync_consumer_instance.consume_measurement.assert_called() @patch( "opentelemetry.sdk.metrics._internal.SynchronousMeasurementConsumer" ) def test_consume_measurement_gauge(self, mock_sync_measurement_consumer): sync_consumer_instance = mock_sync_measurement_consumer() meter_provider = MeterProvider() gauge = meter_provider.get_meter("name").create_gauge("name") gauge.set(1) sync_consumer_instance.consume_measurement.assert_called() class TestMeter(TestCase): def setUp(self): self.meter = Meter(Mock(), Mock()) # TODO: convert to assertNoLogs instead of mocking logger when 3.10 is baseline @patch("opentelemetry.sdk.metrics._internal._logger") def test_repeated_instrument_names(self, logger_mock): with self.assertNotRaises(Exception): self.meter.create_counter("counter") self.meter.create_up_down_counter("up_down_counter") self.meter.create_observable_counter( "observable_counter", callbacks=[Mock()] ) self.meter.create_histogram("histogram") self.meter.create_gauge("gauge") self.meter.create_observable_gauge( "observable_gauge", callbacks=[Mock()] ) self.meter.create_observable_up_down_counter( "observable_up_down_counter", callbacks=[Mock()] ) for instrument_name in [ "counter", "up_down_counter", "histogram", "gauge", ]: getattr(self.meter, f"create_{instrument_name}")(instrument_name) logger_mock.warning.assert_not_called() for instrument_name in [ "observable_counter", "observable_gauge", "observable_up_down_counter", ]: getattr(self.meter, f"create_{instrument_name}")( instrument_name, callbacks=[Mock()] ) logger_mock.warning.assert_not_called() def test_repeated_instrument_names_with_different_advisory(self): with self.assertNotRaises(Exception): self.meter.create_histogram( "histogram", explicit_bucket_boundaries_advisory=[1.0] ) for instrument_name in [ "histogram", ]: with self.assertLogs(level=WARNING): getattr(self.meter, f"create_{instrument_name}")( instrument_name ) def test_create_counter(self): counter = self.meter.create_counter( "name", unit="unit", description="description" ) self.assertIsInstance(counter, Counter) self.assertEqual(counter.name, "name") def test_create_up_down_counter(self): up_down_counter = self.meter.create_up_down_counter( "name", unit="unit", description="description" ) self.assertIsInstance(up_down_counter, UpDownCounter) self.assertEqual(up_down_counter.name, "name") def test_create_observable_counter(self): observable_counter = self.meter.create_observable_counter( "name", callbacks=[Mock()], unit="unit", description="description" ) self.assertIsInstance(observable_counter, ObservableCounter) self.assertEqual(observable_counter.name, "name") def test_create_histogram(self): histogram = self.meter.create_histogram( "name", unit="unit", description="description" ) self.assertIsInstance(histogram, Histogram) self.assertEqual(histogram.name, "name") def test_create_histogram_with_advisory(self): histogram = self.meter.create_histogram( "name", unit="unit", description="description", explicit_bucket_boundaries_advisory=[0.0, 1.0, 2], ) self.assertIsInstance(histogram, Histogram) self.assertEqual(histogram.name, "name") self.assertEqual( histogram._advisory.explicit_bucket_boundaries, [0.0, 1.0, 2], ) def test_create_histogram_advisory_validation(self): advisories = [ {"explicit_bucket_boundaries_advisory": "hello"}, {"explicit_bucket_boundaries_advisory": ["1"]}, ] for advisory in advisories: with self.subTest(advisory=advisory): with self.assertLogs(level=WARNING): self.meter.create_histogram( "name", unit="unit", description="description", **advisory, ) def test_create_observable_gauge(self): observable_gauge = self.meter.create_observable_gauge( "name", callbacks=[Mock()], unit="unit", description="description" ) self.assertIsInstance(observable_gauge, ObservableGauge) self.assertEqual(observable_gauge.name, "name") def test_create_gauge(self): gauge = self.meter.create_gauge( "name", unit="unit", description="description" ) self.assertIsInstance(gauge, _Gauge) self.assertEqual(gauge.name, "name") def test_create_observable_up_down_counter(self): observable_up_down_counter = ( self.meter.create_observable_up_down_counter( "name", callbacks=[Mock()], unit="unit", description="description", ) ) self.assertIsInstance( observable_up_down_counter, ObservableUpDownCounter ) self.assertEqual(observable_up_down_counter.name, "name") @patch.dict("os.environ", {OTEL_SDK_DISABLED: "true"}) def test_get_meter_with_sdk_disabled(self): meter_provider = MeterProvider() self.assertIsInstance(meter_provider.get_meter(Mock()), NoOpMeter) class InMemoryMetricExporter(MetricExporter): def __init__(self): super().__init__() self.metrics = {} self._counter = 0 def export( self, metrics_data: Sequence[Metric], timeout_millis: float = 10_000, **kwargs, ) -> MetricExportResult: self.metrics[self._counter] = metrics_data self._counter += 1 return MetricExportResult.SUCCESS def shutdown(self, timeout_millis: float = 30_000, **kwargs) -> None: pass def force_flush(self, timeout_millis: float = 10_000) -> bool: return True class TestDuplicateInstrumentAggregateData(TestCase): def test_duplicate_instrument_aggregate_data(self): exporter = InMemoryMetricExporter() reader = PeriodicExportingMetricReader( exporter, export_interval_millis=500 ) view = View( instrument_type=Counter, attribute_keys=[], aggregation=SumAggregation(), ) provider = MeterProvider( metric_readers=[reader], resource=Resource.create(), views=[view], ) meter_0 = provider.get_meter( name="meter_0", version="version", schema_url="schema_url", ) meter_1 = provider.get_meter( name="meter_1", version="version", schema_url="schema_url", ) counter_0_0 = meter_0.create_counter( "counter", unit="unit", description="description" ) counter_0_1 = meter_0.create_counter( "counter", unit="unit", description="description" ) counter_1_0 = meter_1.create_counter( "counter", unit="unit", description="description" ) self.assertIs(counter_0_0, counter_0_1) self.assertIsNot(counter_0_0, counter_1_0) counter_0_0.add(1, {}) counter_0_1.add(2, {}) with self.assertLogs(level=WARNING): counter_1_0.add(7, {}) sleep(1) reader.shutdown() sleep(1) metrics = exporter.metrics[0] scope_metrics = metrics.resource_metrics[0].scope_metrics self.assertEqual(len(scope_metrics), 2) metric_0 = scope_metrics[0].metrics[0] self.assertEqual(metric_0.name, "counter") self.assertEqual(metric_0.unit, "unit") self.assertEqual(metric_0.description, "description") self.assertEqual(next(iter(metric_0.data.data_points)).value, 3) metric_1 = scope_metrics[1].metrics[0] self.assertEqual(metric_1.name, "counter") self.assertEqual(metric_1.unit, "unit") self.assertEqual(metric_1.description, "description") self.assertEqual(next(iter(metric_1.data.data_points)).value, 7) python-opentelemetry-1.39.1/opentelemetry-sdk/tests/metrics/test_periodic_exporting_metric_reader.py000066400000000000000000000213171511654350100345030ustar00rootroot00000000000000# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # pylint: disable=protected-access,invalid-name,no-self-use import gc import math import weakref from logging import WARNING from time import sleep, time_ns from typing import Optional, Sequence from unittest.mock import Mock import pytest from opentelemetry.sdk.metrics import Counter, MetricsTimeoutError from opentelemetry.sdk.metrics._internal import _Counter from opentelemetry.sdk.metrics.export import ( AggregationTemporality, Gauge, Metric, MetricExporter, MetricExportResult, NumberDataPoint, PeriodicExportingMetricReader, Sum, ) from opentelemetry.sdk.metrics.view import ( DefaultAggregation, LastValueAggregation, ) from opentelemetry.test.concurrency_test import ConcurrencyTestBase class FakeMetricsExporter(MetricExporter): def __init__( self, wait=0, preferred_temporality=None, preferred_aggregation=None ): self.wait = wait self.metrics = [] self._shutdown = False super().__init__( preferred_temporality=preferred_temporality, preferred_aggregation=preferred_aggregation, ) def export( self, metrics_data: Sequence[Metric], timeout_millis: float = 10_000, **kwargs, ) -> MetricExportResult: sleep(self.wait) self.metrics.extend(metrics_data) return True def shutdown(self, timeout_millis: float = 30_000, **kwargs) -> None: self._shutdown = True def force_flush(self, timeout_millis: float = 10_000) -> bool: return True class ExceptionAtCollectionPeriodicExportingMetricReader( PeriodicExportingMetricReader ): def __init__( self, exporter: MetricExporter, exception: Exception, export_interval_millis: Optional[float] = None, export_timeout_millis: Optional[float] = None, ) -> None: super().__init__( exporter, export_interval_millis, export_timeout_millis ) self._collect_exception = exception # pylint: disable=overridden-final-method def collect(self, timeout_millis: float = 10_000) -> None: raise self._collect_exception metrics_list = [ Metric( name="sum_name", description="", unit="", data=Sum( data_points=[ NumberDataPoint( attributes={}, start_time_unix_nano=time_ns(), time_unix_nano=time_ns(), value=2, ) ], aggregation_temporality=1, is_monotonic=True, ), ), Metric( name="gauge_name", description="", unit="", data=Gauge( data_points=[ NumberDataPoint( attributes={}, start_time_unix_nano=time_ns(), time_unix_nano=time_ns(), value=2, ) ] ), ), ] class TestPeriodicExportingMetricReader(ConcurrencyTestBase): def test_defaults(self): pmr = PeriodicExportingMetricReader(FakeMetricsExporter()) self.assertEqual(pmr._export_interval_millis, 60000) self.assertEqual(pmr._export_timeout_millis, 30000) with self.assertLogs(level=WARNING): pmr.shutdown() def _create_periodic_reader( self, metrics, exporter, collect_wait=0, interval=60000, timeout=30000 ): pmr = PeriodicExportingMetricReader( exporter, export_interval_millis=interval, export_timeout_millis=timeout, ) def _collect(reader, timeout_millis): sleep(collect_wait) pmr._receive_metrics(metrics, timeout_millis) pmr._set_collect_callback(_collect) return pmr def test_ticker_called(self): collect_mock = Mock() exporter = FakeMetricsExporter() exporter.export = Mock() pmr = PeriodicExportingMetricReader(exporter, export_interval_millis=1) pmr._set_collect_callback(collect_mock) sleep(0.1) self.assertTrue(collect_mock.assert_called_once) pmr.shutdown() def test_ticker_not_called_on_infinity(self): collect_mock = Mock() exporter = FakeMetricsExporter() exporter.export = Mock() pmr = PeriodicExportingMetricReader( exporter, export_interval_millis=math.inf ) pmr._set_collect_callback(collect_mock) sleep(0.1) self.assertTrue(collect_mock.assert_not_called) pmr.shutdown() def test_ticker_value_exception_on_zero(self): exporter = FakeMetricsExporter() exporter.export = Mock() self.assertRaises( ValueError, PeriodicExportingMetricReader, exporter, export_interval_millis=0, ) def test_ticker_value_exception_on_negative(self): exporter = FakeMetricsExporter() exporter.export = Mock() self.assertRaises( ValueError, PeriodicExportingMetricReader, exporter, export_interval_millis=-100, ) @pytest.mark.flaky(max_runs=3, min_passes=1) def test_ticker_collects_metrics(self): exporter = FakeMetricsExporter() pmr = self._create_periodic_reader( metrics_list, exporter, interval=100 ) sleep(0.15) self.assertEqual(exporter.metrics, metrics_list) pmr.shutdown() def test_shutdown(self): exporter = FakeMetricsExporter() pmr = self._create_periodic_reader([], exporter) pmr.shutdown() self.assertEqual(exporter.metrics, []) self.assertTrue(pmr._shutdown) self.assertTrue(exporter._shutdown) def test_shutdown_multiple_times(self): pmr = self._create_periodic_reader([], FakeMetricsExporter()) with self.assertLogs(level="WARNING") as w: self.run_with_many_threads(pmr.shutdown) self.assertTrue("Can't shutdown multiple times" in w.output[0]) with self.assertLogs(level="WARNING") as w: pmr.shutdown() def test_exporter_temporality_preference(self): exporter = FakeMetricsExporter( preferred_temporality={ Counter: AggregationTemporality.DELTA, }, ) pmr = PeriodicExportingMetricReader(exporter) for key, value in pmr._instrument_class_temporality.items(): if key is not _Counter: self.assertEqual(value, AggregationTemporality.CUMULATIVE) else: self.assertEqual(value, AggregationTemporality.DELTA) def test_exporter_aggregation_preference(self): exporter = FakeMetricsExporter( preferred_aggregation={ Counter: LastValueAggregation(), }, ) pmr = PeriodicExportingMetricReader(exporter) for key, value in pmr._instrument_class_aggregation.items(): if key is not _Counter: self.assertTrue(isinstance(value, DefaultAggregation)) else: self.assertTrue(isinstance(value, LastValueAggregation)) def test_metric_timeout_does_not_kill_worker_thread(self): exporter = FakeMetricsExporter() pmr = ExceptionAtCollectionPeriodicExportingMetricReader( exporter, MetricsTimeoutError("test timeout"), export_timeout_millis=1, ) sleep(0.1) self.assertTrue(pmr._daemon_thread.is_alive()) pmr.shutdown() def test_metric_exporer_gc(self): # Given a PeriodicExportingMetricReader exporter = FakeMetricsExporter( preferred_aggregation={ Counter: LastValueAggregation(), }, ) processor = PeriodicExportingMetricReader(exporter) weak_ref = weakref.ref(processor) processor.shutdown() # When we garbage collect the reader del processor gc.collect() # Then the reference to the reader should no longer exist self.assertIsNone( weak_ref(), "The PeriodicExportingMetricReader object created by this test wasn't garbage collected", ) python-opentelemetry-1.39.1/opentelemetry-sdk/tests/metrics/test_point.py000066400000000000000000000264121511654350100270530ustar00rootroot00000000000000# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from unittest import TestCase from opentelemetry.sdk.metrics.export import ( AggregationTemporality, Buckets, ExponentialHistogram, ExponentialHistogramDataPoint, Gauge, Histogram, HistogramDataPoint, Metric, MetricsData, NumberDataPoint, ResourceMetrics, ScopeMetrics, Sum, ) from opentelemetry.sdk.resources import Resource from opentelemetry.sdk.util.instrumentation import InstrumentationScope class TestToJson(TestCase): @classmethod def setUpClass(cls): cls.attributes_0 = { "a": "b", "b": True, "c": 1, "d": 1.1, "e": ["a", "b"], "f": [True, False], "g": [1, 2], "h": [1.1, 2.2], } cls.attributes_0_str = '{"a": "b", "b": true, "c": 1, "d": 1.1, "e": ["a", "b"], "f": [true, false], "g": [1, 2], "h": [1.1, 2.2]}' cls.attributes_1 = { "i": "a", "j": False, "k": 2, "l": 2.2, "m": ["b", "a"], "n": [False, True], "o": [2, 1], "p": [2.2, 1.1], } cls.attributes_1_str = '{"i": "a", "j": false, "k": 2, "l": 2.2, "m": ["b", "a"], "n": [false, true], "o": [2, 1], "p": [2.2, 1.1]}' cls.number_data_point_0 = NumberDataPoint( attributes=cls.attributes_0, start_time_unix_nano=1, time_unix_nano=2, value=3.3, ) cls.number_data_point_0_str = f'{{"attributes": {cls.attributes_0_str}, "start_time_unix_nano": 1, "time_unix_nano": 2, "value": 3.3, "exemplars": []}}' cls.number_data_point_1 = NumberDataPoint( attributes=cls.attributes_1, start_time_unix_nano=2, time_unix_nano=3, value=4.4, ) cls.number_data_point_1_str = f'{{"attributes": {cls.attributes_1_str}, "start_time_unix_nano": 2, "time_unix_nano": 3, "value": 4.4, "exemplars": []}}' cls.histogram_data_point_0 = HistogramDataPoint( attributes=cls.attributes_0, start_time_unix_nano=1, time_unix_nano=2, count=3, sum=3.3, bucket_counts=[1, 1, 1], explicit_bounds=[0.1, 1.2, 2.3, 3.4], min=0.2, max=3.3, ) cls.histogram_data_point_0_str = f'{{"attributes": {cls.attributes_0_str}, "start_time_unix_nano": 1, "time_unix_nano": 2, "count": 3, "sum": 3.3, "bucket_counts": [1, 1, 1], "explicit_bounds": [0.1, 1.2, 2.3, 3.4], "min": 0.2, "max": 3.3, "exemplars": []}}' cls.histogram_data_point_1 = HistogramDataPoint( attributes=cls.attributes_1, start_time_unix_nano=2, time_unix_nano=3, count=4, sum=4.4, bucket_counts=[2, 1, 1], explicit_bounds=[1.2, 2.3, 3.4, 4.5], min=0.3, max=4.4, ) cls.histogram_data_point_1_str = f'{{"attributes": {cls.attributes_1_str}, "start_time_unix_nano": 2, "time_unix_nano": 3, "count": 4, "sum": 4.4, "bucket_counts": [2, 1, 1], "explicit_bounds": [1.2, 2.3, 3.4, 4.5], "min": 0.3, "max": 4.4, "exemplars": []}}' cls.exp_histogram_data_point_0 = ExponentialHistogramDataPoint( attributes=cls.attributes_0, start_time_unix_nano=1, time_unix_nano=2, count=1, sum=10, scale=1, zero_count=0, positive=Buckets(offset=0, bucket_counts=[1]), negative=Buckets(offset=0, bucket_counts=[0]), flags=0, min=10, max=10, ) cls.exp_histogram_data_point_0_str = f'{{"attributes": {cls.attributes_0_str}, "start_time_unix_nano": 1, "time_unix_nano": 2, "count": 1, "sum": 10, "scale": 1, "zero_count": 0, "positive": {{"offset": 0, "bucket_counts": [1]}}, "negative": {{"offset": 0, "bucket_counts": [0]}}, "flags": 0, "min": 10, "max": 10, "exemplars": []}}' cls.sum_0 = Sum( data_points=[cls.number_data_point_0, cls.number_data_point_1], aggregation_temporality=AggregationTemporality.DELTA, is_monotonic=False, ) cls.sum_0_str = f'{{"data_points": [{cls.number_data_point_0_str}, {cls.number_data_point_1_str}], "aggregation_temporality": 1, "is_monotonic": false}}' cls.gauge_0 = Gauge( data_points=[cls.number_data_point_0, cls.number_data_point_1], ) cls.gauge_0_str = f'{{"data_points": [{cls.number_data_point_0_str}, {cls.number_data_point_1_str}]}}' cls.histogram_0 = Histogram( data_points=[ cls.histogram_data_point_0, cls.histogram_data_point_1, ], aggregation_temporality=AggregationTemporality.DELTA, ) cls.histogram_0_str = f'{{"data_points": [{cls.histogram_data_point_0_str}, {cls.histogram_data_point_1_str}], "aggregation_temporality": 1}}' cls.exp_histogram_0 = ExponentialHistogram( data_points=[ cls.exp_histogram_data_point_0, ], aggregation_temporality=AggregationTemporality.CUMULATIVE, ) cls.exp_histogram_0_str = f'{{"data_points": [{cls.exp_histogram_data_point_0_str}], "aggregation_temporality": 2}}' cls.metric_0 = Metric( name="metric_0", description="description_0", unit="unit_0", data=cls.sum_0, ) cls.metric_0_str = f'{{"name": "metric_0", "description": "description_0", "unit": "unit_0", "data": {cls.sum_0_str}}}' cls.metric_1 = Metric( name="metric_1", description=None, unit="unit_1", data=cls.gauge_0 ) cls.metric_1_str = f'{{"name": "metric_1", "description": "", "unit": "unit_1", "data": {cls.gauge_0_str}}}' cls.metric_2 = Metric( name="metric_2", description="description_2", unit=None, data=cls.histogram_0, ) cls.metric_2_str = f'{{"name": "metric_2", "description": "description_2", "unit": "", "data": {cls.histogram_0_str}}}' cls.scope_metrics_0 = ScopeMetrics( scope=InstrumentationScope( name="name_0", version="version_0", schema_url="schema_url_0", ), metrics=[cls.metric_0, cls.metric_1, cls.metric_2], schema_url="schema_url_0", ) cls.scope_metrics_0_str = f'{{"scope": {{"name": "name_0", "version": "version_0", "schema_url": "schema_url_0", "attributes": null}}, "metrics": [{cls.metric_0_str}, {cls.metric_1_str}, {cls.metric_2_str}], "schema_url": "schema_url_0"}}' cls.scope_metrics_1 = ScopeMetrics( scope=InstrumentationScope( name="name_1", version="version_1", schema_url="schema_url_1", ), metrics=[cls.metric_0, cls.metric_1, cls.metric_2], schema_url="schema_url_1", ) cls.scope_metrics_1_str = f'{{"scope": {{"name": "name_1", "version": "version_1", "schema_url": "schema_url_1", "attributes": null}}, "metrics": [{cls.metric_0_str}, {cls.metric_1_str}, {cls.metric_2_str}], "schema_url": "schema_url_1"}}' cls.resource_metrics_0 = ResourceMetrics( resource=Resource( attributes=cls.attributes_0, schema_url="schema_url_0" ), scope_metrics=[cls.scope_metrics_0, cls.scope_metrics_1], schema_url="schema_url_0", ) cls.resource_metrics_0_str = f'{{"resource": {{"attributes": {cls.attributes_0_str}, "schema_url": "schema_url_0"}}, "scope_metrics": [{cls.scope_metrics_0_str}, {cls.scope_metrics_1_str}], "schema_url": "schema_url_0"}}' cls.resource_metrics_1 = ResourceMetrics( resource=Resource( attributes=cls.attributes_1, schema_url="schema_url_1" ), scope_metrics=[cls.scope_metrics_0, cls.scope_metrics_1], schema_url="schema_url_1", ) cls.resource_metrics_1_str = f'{{"resource": {{"attributes": {cls.attributes_1_str}, "schema_url": "schema_url_1"}}, "scope_metrics": [{cls.scope_metrics_0_str}, {cls.scope_metrics_1_str}], "schema_url": "schema_url_1"}}' cls.metrics_data_0 = MetricsData( resource_metrics=[cls.resource_metrics_0, cls.resource_metrics_1] ) cls.metrics_data_0_str = f'{{"resource_metrics": [{cls.resource_metrics_0_str}, {cls.resource_metrics_1_str}]}}' def test_number_data_point(self): self.assertEqual( self.number_data_point_0.to_json(indent=None), self.number_data_point_0_str, ) self.assertEqual( self.number_data_point_1.to_json(indent=None), self.number_data_point_1_str, ) def test_histogram_data_point(self): self.assertEqual( self.histogram_data_point_0.to_json(indent=None), self.histogram_data_point_0_str, ) self.assertEqual( self.histogram_data_point_1.to_json(indent=None), self.histogram_data_point_1_str, ) def test_exp_histogram_data_point(self): self.assertEqual( self.exp_histogram_data_point_0.to_json(indent=None), self.exp_histogram_data_point_0_str, ) def test_sum(self): self.assertEqual(self.sum_0.to_json(indent=None), self.sum_0_str) def test_gauge(self): self.assertEqual(self.gauge_0.to_json(indent=None), self.gauge_0_str) def test_histogram(self): self.assertEqual( self.histogram_0.to_json(indent=None), self.histogram_0_str ) def test_exp_histogram(self): self.assertEqual( self.exp_histogram_0.to_json(indent=None), self.exp_histogram_0_str ) def test_metric(self): self.assertEqual(self.metric_0.to_json(indent=None), self.metric_0_str) self.assertEqual(self.metric_1.to_json(indent=None), self.metric_1_str) self.assertEqual(self.metric_2.to_json(indent=None), self.metric_2_str) def test_scope_metrics(self): self.assertEqual( self.scope_metrics_0.to_json(indent=None), self.scope_metrics_0_str ) self.assertEqual( self.scope_metrics_1.to_json(indent=None), self.scope_metrics_1_str ) def test_resource_metrics(self): self.assertEqual( self.resource_metrics_0.to_json(indent=None), self.resource_metrics_0_str, ) self.assertEqual( self.resource_metrics_1.to_json(indent=None), self.resource_metrics_1_str, ) def test_metrics_data(self): self.assertEqual( self.metrics_data_0.to_json(indent=None), self.metrics_data_0_str ) python-opentelemetry-1.39.1/opentelemetry-sdk/tests/metrics/test_view.py000066400000000000000000000073001511654350100266670ustar00rootroot00000000000000# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # pylint: disable=protected-access from unittest import TestCase from unittest.mock import Mock from opentelemetry.sdk.metrics.view import View class TestView(TestCase): def test_required_instrument_criteria(self): with self.assertRaises(Exception): View() def test_instrument_type(self): self.assertTrue(View(instrument_type=Mock)._match(Mock())) def test_instrument_name(self): mock_instrument = Mock() mock_instrument.configure_mock(**{"name": "instrument_name"}) self.assertTrue( View(instrument_name="instrument_name")._match(mock_instrument) ) def test_instrument_unit(self): mock_instrument = Mock() mock_instrument.configure_mock(**{"unit": "instrument_unit"}) self.assertTrue( View(instrument_unit="instrument_unit")._match(mock_instrument) ) def test_meter_name(self): self.assertTrue( View(meter_name="meter_name")._match( Mock(**{"instrumentation_scope.name": "meter_name"}) ) ) def test_meter_version(self): self.assertTrue( View(meter_version="meter_version")._match( Mock(**{"instrumentation_scope.version": "meter_version"}) ) ) def test_meter_schema_url(self): self.assertTrue( View(meter_schema_url="meter_schema_url")._match( Mock( **{"instrumentation_scope.schema_url": "meter_schema_url"} ) ) ) self.assertFalse( View(meter_schema_url="meter_schema_url")._match( Mock( **{ "instrumentation_scope.schema_url": "meter_schema_urlabc" } ) ) ) self.assertTrue( View(meter_schema_url="meter_schema_url")._match( Mock( **{"instrumentation_scope.schema_url": "meter_schema_url"} ) ) ) def test_additive_criteria(self): view = View( meter_name="meter_name", meter_version="meter_version", meter_schema_url="meter_schema_url", ) self.assertTrue( view._match( Mock( **{ "instrumentation_scope.name": "meter_name", "instrumentation_scope.version": "meter_version", "instrumentation_scope.schema_url": "meter_schema_url", } ) ) ) self.assertFalse( view._match( Mock( **{ "instrumentation_scope.name": "meter_name", "instrumentation_scope.version": "meter_version", "instrumentation_scope.schema_url": "meter_schema_vrl", } ) ) ) def test_view_name(self): with self.assertRaises(Exception): View(name="name", instrument_name="instrument_name*") python-opentelemetry-1.39.1/opentelemetry-sdk/tests/metrics/test_view_instrument_match.py000066400000000000000000000637501511654350100323460ustar00rootroot00000000000000# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # pylint: disable=protected-access from __future__ import annotations from time import time_ns from typing import Callable, Sequence, Type from unittest import TestCase from unittest.mock import MagicMock, Mock, patch from opentelemetry.context import Context from opentelemetry.sdk.metrics._internal._view_instrument_match import ( _ViewInstrumentMatch, ) from opentelemetry.sdk.metrics._internal.aggregation import ( _Aggregation, _DropAggregation, _ExplicitBucketHistogramAggregation, _LastValueAggregation, ) from opentelemetry.sdk.metrics._internal.exemplar import ( AlignedHistogramBucketExemplarReservoir, ExemplarReservoirBuilder, SimpleFixedSizeExemplarReservoir, ) from opentelemetry.sdk.metrics._internal.instrument import _Counter, _Histogram from opentelemetry.sdk.metrics._internal.measurement import Measurement from opentelemetry.sdk.metrics._internal.sdk_configuration import ( SdkConfiguration, ) from opentelemetry.sdk.metrics._internal.view import _default_reservoir_factory from opentelemetry.sdk.metrics.export import AggregationTemporality from opentelemetry.sdk.metrics.view import ( DefaultAggregation, DropAggregation, LastValueAggregation, View, ) def generalized_reservoir_factory( size: int = 1, boundaries: Sequence[float] | None = None ) -> Callable[[Type[_Aggregation]], ExemplarReservoirBuilder]: def factory( aggregation_type: Type[_Aggregation], ) -> ExemplarReservoirBuilder: if issubclass(aggregation_type, _ExplicitBucketHistogramAggregation): return lambda **kwargs: AlignedHistogramBucketExemplarReservoir( boundaries=boundaries or [], **{k: v for k, v in kwargs.items() if k != "boundaries"}, ) return lambda **kwargs: SimpleFixedSizeExemplarReservoir( size=size, **{k: v for k, v in kwargs.items() if k != "size"}, ) return factory class Test_ViewInstrumentMatch(TestCase): # pylint: disable=invalid-name @classmethod def setUpClass(cls): cls.mock_aggregation_factory = Mock() cls.mock_created_aggregation = ( cls.mock_aggregation_factory._create_aggregation() ) cls.mock_resource = Mock() cls.mock_instrumentation_scope = Mock() cls.sdk_configuration = SdkConfiguration( exemplar_filter=Mock(), resource=cls.mock_resource, metric_readers=[], views=[], ) def test_consume_measurement(self): instrument1 = Mock(name="instrument1") instrument1.instrumentation_scope = self.mock_instrumentation_scope view_instrument_match = _ViewInstrumentMatch( view=View( instrument_name="instrument1", name="name", aggregation=self.mock_aggregation_factory, attribute_keys={"a", "c"}, ), instrument=instrument1, instrument_class_aggregation=MagicMock( **{"__getitem__.return_value": DefaultAggregation()} ), ) view_instrument_match.consume_measurement( Measurement( value=0, time_unix_nano=time_ns(), instrument=instrument1, context=Context(), attributes={"c": "d", "f": "g"}, ) ) self.assertEqual( view_instrument_match._attributes_aggregation, {frozenset([("c", "d")]): self.mock_created_aggregation}, ) view_instrument_match.consume_measurement( Measurement( value=0, time_unix_nano=time_ns(), instrument=instrument1, context=Context(), attributes={"w": "x", "y": "z"}, ) ) self.assertEqual( view_instrument_match._attributes_aggregation, { frozenset(): self.mock_created_aggregation, frozenset([("c", "d")]): self.mock_created_aggregation, }, ) # None attribute_keys (default) will keep all attributes view_instrument_match = _ViewInstrumentMatch( view=View( instrument_name="instrument1", name="name", aggregation=self.mock_aggregation_factory, ), instrument=instrument1, instrument_class_aggregation=MagicMock( **{"__getitem__.return_value": DefaultAggregation()} ), ) view_instrument_match.consume_measurement( Measurement( value=0, time_unix_nano=time_ns(), instrument=instrument1, context=Context(), attributes={"c": "d", "f": "g"}, ) ) self.assertEqual( view_instrument_match._attributes_aggregation, { frozenset( [("c", "d"), ("f", "g")] ): self.mock_created_aggregation }, ) # empty set attribute_keys will drop all labels and aggregate # everything together view_instrument_match = _ViewInstrumentMatch( view=View( instrument_name="instrument1", name="name", aggregation=self.mock_aggregation_factory, attribute_keys={}, ), instrument=instrument1, instrument_class_aggregation=MagicMock( **{"__getitem__.return_value": DefaultAggregation()} ), ) view_instrument_match.consume_measurement( Measurement( value=0, time_unix_nano=time_ns(), instrument=instrument1, context=Context(), attributes=None, ) ) self.assertEqual( view_instrument_match._attributes_aggregation, {frozenset({}): self.mock_created_aggregation}, ) # Test that a drop aggregation is handled in the same way as any # other aggregation. drop_aggregation = DropAggregation() view_instrument_match = _ViewInstrumentMatch( view=View( instrument_name="instrument1", name="name", aggregation=drop_aggregation, attribute_keys={}, ), instrument=instrument1, instrument_class_aggregation=MagicMock( **{"__getitem__.return_value": DefaultAggregation()} ), ) view_instrument_match.consume_measurement( Measurement( value=0, time_unix_nano=time_ns(), instrument=instrument1, context=Context(), attributes=None, ) ) self.assertIsInstance( view_instrument_match._attributes_aggregation[frozenset({})], _DropAggregation, ) def test_collect(self): instrument1 = _Counter( "instrument1", Mock(), Mock(), description="description", unit="unit", ) instrument1.instrumentation_scope = self.mock_instrumentation_scope view_instrument_match = _ViewInstrumentMatch( view=View( instrument_name="instrument1", name="name", aggregation=DefaultAggregation(), attribute_keys={"a", "c"}, ), instrument=instrument1, instrument_class_aggregation=MagicMock( **{"__getitem__.return_value": DefaultAggregation()} ), ) view_instrument_match.consume_measurement( Measurement( value=0, time_unix_nano=time_ns(), instrument=Mock(name="instrument1"), context=Context(), attributes={"c": "d", "f": "g"}, ) ) number_data_points = view_instrument_match.collect( AggregationTemporality.CUMULATIVE, 0 ) number_data_points = list(number_data_points) self.assertEqual(len(number_data_points), 1) number_data_point = number_data_points[0] self.assertEqual(number_data_point.attributes, {"c": "d"}) self.assertEqual(number_data_point.value, 0) @patch( "opentelemetry.sdk.metrics._internal._view_instrument_match.time_ns", side_effect=[0, 1, 2], ) def test_collect_resets_start_time_unix_nano(self, mock_time_ns): instrument = Mock(name="instrument") instrument.instrumentation_scope = self.mock_instrumentation_scope view_instrument_match = _ViewInstrumentMatch( view=View( instrument_name="instrument", name="name", aggregation=self.mock_aggregation_factory, ), instrument=instrument, instrument_class_aggregation=MagicMock( **{"__getitem__.return_value": DefaultAggregation()} ), ) start_time_unix_nano = 0 self.assertEqual(mock_time_ns.call_count, 0) # +1 call to _create_aggregation view_instrument_match.consume_measurement( Measurement( value=0, time_unix_nano=time_ns(), instrument=instrument, attributes={"foo": "bar0"}, context=Context(), ) ) view_instrument_match._view._aggregation._create_aggregation.assert_called_with( instrument, {"foo": "bar0"}, _default_reservoir_factory, start_time_unix_nano, ) collection_start_time_unix_nano = time_ns() collected_data_points = view_instrument_match.collect( AggregationTemporality.CUMULATIVE, collection_start_time_unix_nano ) self.assertIsNotNone(collected_data_points) self.assertEqual(len(collected_data_points), 1) # +1 call to _create_aggregation view_instrument_match.consume_measurement( Measurement( value=0, time_unix_nano=time_ns(), instrument=instrument, attributes={"foo": "bar1"}, context=Context(), ) ) view_instrument_match._view._aggregation._create_aggregation.assert_called_with( instrument, {"foo": "bar1"}, _default_reservoir_factory, 1 ) collection_start_time_unix_nano = time_ns() collected_data_points = view_instrument_match.collect( AggregationTemporality.CUMULATIVE, collection_start_time_unix_nano ) self.assertIsNotNone(collected_data_points) self.assertEqual(len(collected_data_points), 2) collected_data_points = view_instrument_match.collect( AggregationTemporality.CUMULATIVE, collection_start_time_unix_nano ) # +1 call to create_aggregation view_instrument_match.consume_measurement( Measurement( value=0, time_unix_nano=time_ns(), instrument=instrument, attributes={"foo": "bar"}, context=Context(), ) ) view_instrument_match._view._aggregation._create_aggregation.assert_called_with( instrument, {"foo": "bar"}, _default_reservoir_factory, 2 ) # No new calls to _create_aggregation because attributes remain the same view_instrument_match.consume_measurement( Measurement( value=0, time_unix_nano=time_ns(), instrument=instrument, attributes={"foo": "bar"}, context=Context(), ) ) view_instrument_match.consume_measurement( Measurement( value=0, time_unix_nano=time_ns(), instrument=instrument, attributes={"foo": "bar"}, context=Context(), ) ) # In total we have 5 calls for _create_aggregation # 1 from the _ViewInstrumentMatch initialization and 4 # from the consume_measurement calls with different attributes self.assertEqual( view_instrument_match._view._aggregation._create_aggregation.call_count, 5, ) def test_data_point_check(self): instrument1 = _Counter( "instrument1", Mock(), Mock(), description="description", unit="unit", ) instrument1.instrumentation_scope = self.mock_instrumentation_scope view_instrument_match = _ViewInstrumentMatch( view=View( instrument_name="instrument1", name="name", aggregation=DefaultAggregation(), ), instrument=instrument1, instrument_class_aggregation=MagicMock( **{ "__getitem__.return_value": Mock( **{ "_create_aggregation.return_value": Mock( **{ "collect.side_effect": [ Mock(), Mock(), None, Mock(), ] } ) } ) } ), ) view_instrument_match.consume_measurement( Measurement( value=0, time_unix_nano=time_ns(), instrument=Mock(name="instrument1"), context=Context(), attributes={"c": "d", "f": "g"}, ) ) view_instrument_match.consume_measurement( Measurement( value=0, time_unix_nano=time_ns(), instrument=Mock(name="instrument1"), context=Context(), attributes={"h": "i", "j": "k"}, ) ) view_instrument_match.consume_measurement( Measurement( value=0, time_unix_nano=time_ns(), instrument=Mock(name="instrument1"), context=Context(), attributes={"l": "m", "n": "o"}, ) ) view_instrument_match.consume_measurement( Measurement( value=0, time_unix_nano=time_ns(), instrument=Mock(name="instrument1"), context=Context(), attributes={"p": "q", "r": "s"}, ) ) result = view_instrument_match.collect( AggregationTemporality.CUMULATIVE, 0 ) self.assertEqual(len(list(result)), 3) def test_setting_aggregation(self): instrument1 = _Counter( name="instrument1", instrumentation_scope=Mock(), measurement_consumer=Mock(), description="description", unit="unit", ) instrument1.instrumentation_scope = self.mock_instrumentation_scope view_instrument_match = _ViewInstrumentMatch( view=View( instrument_name="instrument1", name="name", aggregation=DefaultAggregation(), attribute_keys={"a", "c"}, ), instrument=instrument1, instrument_class_aggregation={_Counter: LastValueAggregation()}, ) view_instrument_match.consume_measurement( Measurement( value=0, time_unix_nano=time_ns(), instrument=Mock(name="instrument1"), context=Context(), attributes={"c": "d", "f": "g"}, ) ) self.assertIsInstance( view_instrument_match._attributes_aggregation[ frozenset({("c", "d")}) ], _LastValueAggregation, ) class TestSimpleFixedSizeExemplarReservoir(TestCase): def test_consume_measurement_with_custom_reservoir_factory(self): simple_fixed_size_factory = generalized_reservoir_factory(size=10) # Create an instance of _Counter instrument1 = _Counter( name="instrument1", instrumentation_scope=None, measurement_consumer=None, description="description", unit="unit", ) view_instrument_match = _ViewInstrumentMatch( view=View( instrument_name="instrument1", name="name", aggregation=DefaultAggregation(), exemplar_reservoir_factory=simple_fixed_size_factory, ), instrument=instrument1, instrument_class_aggregation={_Counter: DefaultAggregation()}, ) # Consume measurements with the same attributes to ensure aggregation view_instrument_match.consume_measurement( Measurement( value=2.0, time_unix_nano=time_ns(), instrument=instrument1, context=Context(), attributes={"attribute1": "value1"}, ) ) view_instrument_match.consume_measurement( Measurement( value=4.0, time_unix_nano=time_ns(), instrument=instrument1, context=Context(), attributes={"attribute2": "value2"}, ) ) view_instrument_match.consume_measurement( Measurement( value=5.0, time_unix_nano=time_ns(), instrument=instrument1, context=Context(), attributes={"attribute2": "value2"}, ) ) data_points = list( view_instrument_match.collect(AggregationTemporality.CUMULATIVE, 0) ) # Ensure only one data point is collected self.assertEqual(len(data_points), 2) # Verify that exemplars have been correctly stored and collected self.assertEqual(len(data_points[0].exemplars), 1) self.assertEqual(len(data_points[1].exemplars), 2) self.assertEqual(data_points[0].exemplars[0].value, 2.0) self.assertEqual(data_points[1].exemplars[0].value, 4.0) self.assertEqual(data_points[1].exemplars[1].value, 5.0) def test_consume_measurement_with_exemplars(self): # Create an instance of _Counter instrument1 = _Counter( name="instrument1", instrumentation_scope=None, # No mock, set to None or actual scope if available measurement_consumer=None, # No mock, set to None or actual consumer if available description="description", unit="unit", ) view_instrument_match = _ViewInstrumentMatch( view=View( instrument_name="instrument1", name="name", aggregation=DefaultAggregation(), ), instrument=instrument1, instrument_class_aggregation={_Counter: DefaultAggregation()}, ) # Consume measurements with the same attributes to ensure aggregation view_instrument_match.consume_measurement( Measurement( value=4.0, time_unix_nano=time_ns(), instrument=instrument1, context=Context(), attributes={"attribute2": "value2"}, ) ) view_instrument_match.consume_measurement( Measurement( value=5.0, time_unix_nano=time_ns(), instrument=instrument1, context=Context(), attributes={"attribute2": "value2"}, ) ) # Collect the data points data_points = list( view_instrument_match.collect(AggregationTemporality.CUMULATIVE, 0) ) # Ensure only one data point is collected self.assertEqual(len(data_points), 1) # Verify that exemplars have been correctly stored and collected # As the default reservoir as only one bucket, it will retain # either one of the measurements based on random selection self.assertEqual(len(data_points[0].exemplars), 1) self.assertIn(data_points[0].exemplars[0].value, [4.0, 5.0]) def test_consume_measurement_with_exemplars_and_view_attributes_filter( self, ): value = 22 # Create an instance of _Counter instrument1 = _Counter( name="instrument1", instrumentation_scope=None, # No mock, set to None or actual scope if available measurement_consumer=None, # No mock, set to None or actual consumer if available ) view_instrument_match = _ViewInstrumentMatch( view=View( instrument_name="instrument1", name="name", attribute_keys={"X", "Y"}, ), instrument=instrument1, instrument_class_aggregation={_Counter: DefaultAggregation()}, ) view_instrument_match.consume_measurement( Measurement( value=value, time_unix_nano=time_ns(), instrument=instrument1, context=Context(), attributes={"X": "x-value", "Y": "y-value", "Z": "z-value"}, ) ) # Collect the data points data_points = list( view_instrument_match.collect(AggregationTemporality.CUMULATIVE, 0) ) # Ensure only one data point is collected self.assertEqual(len(data_points), 1) # Verify that exemplars have been correctly stored and collected self.assertEqual(len(data_points[0].exemplars), 1) # Check the exemplar has the dropped attribute exemplar = list(data_points[0].exemplars)[0] self.assertEqual(exemplar.value, value) self.assertDictEqual(exemplar.filtered_attributes, {"Z": "z-value"}) class TestAlignedHistogramBucketExemplarReservoir(TestCase): def test_consume_measurement_with_custom_reservoir_factory(self): # Custom factory for AlignedHistogramBucketExemplarReservoir with specific boundaries histogram_reservoir_factory = generalized_reservoir_factory( boundaries=[0, 5, 10, 25] ) # Create an instance of _Histogram instrument1 = _Histogram( name="instrument1", instrumentation_scope=None, measurement_consumer=None, description="description", unit="unit", ) view_instrument_match = _ViewInstrumentMatch( view=View( instrument_name="instrument1", name="name", aggregation=DefaultAggregation(), exemplar_reservoir_factory=histogram_reservoir_factory, ), instrument=instrument1, instrument_class_aggregation={_Histogram: DefaultAggregation()}, ) # Consume measurements with different values to ensure they are placed in the correct buckets view_instrument_match.consume_measurement( Measurement( value=2.0, # Should go into the first bucket (0 to 5) time_unix_nano=time_ns(), instrument=instrument1, context=Context(), attributes={"attribute1": "value1"}, ) ) view_instrument_match.consume_measurement( Measurement( value=7.0, # Should go into the second bucket (5 to 10) time_unix_nano=time_ns(), instrument=instrument1, context=Context(), attributes={"attribute2": "value2"}, ) ) view_instrument_match.consume_measurement( Measurement( value=8.0, # Should go into the second bucket (5 to 10) time_unix_nano=time_ns(), instrument=instrument1, context=Context(), attributes={"attribute2": "value2"}, ) ) view_instrument_match.consume_measurement( Measurement( value=15.0, # Should go into the third bucket (10 to 25) time_unix_nano=time_ns(), instrument=instrument1, context=Context(), attributes={"attribute3": "value3"}, ) ) # Collect the data points data_points = list( view_instrument_match.collect(AggregationTemporality.CUMULATIVE, 0) ) # Ensure three data points are collected, one for each bucket self.assertEqual(len(data_points), 3) # Verify that exemplars have been correctly stored and collected in their respective buckets self.assertEqual(len(data_points[0].exemplars), 1) self.assertEqual(len(data_points[1].exemplars), 1) self.assertEqual(len(data_points[2].exemplars), 1) self.assertEqual( data_points[0].exemplars[0].value, 2.0 ) # First bucket self.assertEqual( data_points[1].exemplars[0].value, 8.0 ) # Second bucket self.assertEqual( data_points[2].exemplars[0].value, 15.0 ) # Third bucket python-opentelemetry-1.39.1/opentelemetry-sdk/tests/resources/000077500000000000000000000000001511654350100246505ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-sdk/tests/resources/__init__.py000066400000000000000000000000001511654350100267470ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-sdk/tests/resources/test_resources.py000066400000000000000000000670171511654350100303060ustar00rootroot00000000000000# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import sys import unittest import uuid from concurrent.futures import TimeoutError from logging import ERROR, WARNING from os import environ from unittest.mock import Mock, patch from urllib import parse from opentelemetry.sdk.environment_variables import ( OTEL_EXPERIMENTAL_RESOURCE_DETECTORS, ) from opentelemetry.sdk.resources import ( _DEFAULT_RESOURCE, _EMPTY_RESOURCE, _OPENTELEMETRY_SDK_VERSION, HOST_ARCH, HOST_NAME, OS_TYPE, OS_VERSION, OTEL_RESOURCE_ATTRIBUTES, OTEL_SERVICE_NAME, PROCESS_COMMAND, PROCESS_COMMAND_ARGS, PROCESS_COMMAND_LINE, PROCESS_EXECUTABLE_NAME, PROCESS_EXECUTABLE_PATH, PROCESS_OWNER, PROCESS_PARENT_PID, PROCESS_PID, PROCESS_RUNTIME_DESCRIPTION, PROCESS_RUNTIME_NAME, PROCESS_RUNTIME_VERSION, SERVICE_NAME, TELEMETRY_SDK_LANGUAGE, TELEMETRY_SDK_NAME, TELEMETRY_SDK_VERSION, OsResourceDetector, OTELResourceDetector, ProcessResourceDetector, Resource, ResourceDetector, _HostResourceDetector, get_aggregated_resources, ) try: import psutil except ImportError: psutil = None class TestResources(unittest.TestCase): def setUp(self) -> None: environ[OTEL_RESOURCE_ATTRIBUTES] = "" def tearDown(self) -> None: environ.pop(OTEL_RESOURCE_ATTRIBUTES) def test_create(self): attributes = { "service": "ui", "version": 1, "has_bugs": True, "cost": 112.12, } expected_attributes = { "service": "ui", "version": 1, "has_bugs": True, "cost": 112.12, TELEMETRY_SDK_NAME: "opentelemetry", TELEMETRY_SDK_LANGUAGE: "python", TELEMETRY_SDK_VERSION: _OPENTELEMETRY_SDK_VERSION, SERVICE_NAME: "unknown_service", } resource = Resource.create(attributes) self.assertIsInstance(resource, Resource) self.assertEqual(resource.attributes, expected_attributes) self.assertEqual(resource.schema_url, "") schema_url = "https://opentelemetry.io/schemas/1.3.0" resource = Resource.create(attributes, schema_url) self.assertIsInstance(resource, Resource) self.assertEqual(resource.attributes, expected_attributes) self.assertEqual(resource.schema_url, schema_url) environ[OTEL_RESOURCE_ATTRIBUTES] = "key=value" resource = Resource.create(attributes) self.assertIsInstance(resource, Resource) expected_with_envar = expected_attributes.copy() expected_with_envar["key"] = "value" self.assertEqual(resource.attributes, expected_with_envar) environ[OTEL_RESOURCE_ATTRIBUTES] = "" resource = Resource.get_empty() self.assertEqual(resource, _EMPTY_RESOURCE) resource = Resource.create(None) self.assertEqual( resource, _DEFAULT_RESOURCE.merge( Resource({SERVICE_NAME: "unknown_service"}, "") ), ) self.assertEqual(resource.schema_url, "") resource = Resource.create(None, None) self.assertEqual( resource, _DEFAULT_RESOURCE.merge( Resource({SERVICE_NAME: "unknown_service"}, "") ), ) self.assertEqual(resource.schema_url, "") resource = Resource.create({}) self.assertEqual( resource, _DEFAULT_RESOURCE.merge( Resource({SERVICE_NAME: "unknown_service"}, "") ), ) self.assertEqual(resource.schema_url, "") resource = Resource.create({}, None) self.assertEqual( resource, _DEFAULT_RESOURCE.merge( Resource({SERVICE_NAME: "unknown_service"}, "") ), ) self.assertEqual(resource.schema_url, "") def test_resource_merge(self): left = Resource({"service": "ui"}) right = Resource({"host": "service-host"}) self.assertEqual( left.merge(right), Resource({"service": "ui", "host": "service-host"}), ) schema_urls = ( "https://opentelemetry.io/schemas/1.2.0", "https://opentelemetry.io/schemas/1.3.0", ) left = Resource.create({}, None) right = Resource.create({}, None) self.assertEqual(left.merge(right).schema_url, "") left = Resource.create({}, None) right = Resource.create({}, schema_urls[0]) self.assertEqual(left.merge(right).schema_url, schema_urls[0]) left = Resource.create({}, schema_urls[0]) right = Resource.create({}, None) self.assertEqual(left.merge(right).schema_url, schema_urls[0]) left = Resource.create({}, schema_urls[0]) right = Resource.create({}, schema_urls[0]) self.assertEqual(left.merge(right).schema_url, schema_urls[0]) left = Resource.create({}, schema_urls[0]) right = Resource.create({}, schema_urls[1]) with self.assertLogs(level=ERROR) as log_entry: self.assertEqual(left.merge(right), left) self.assertIn(schema_urls[0], log_entry.output[0]) self.assertIn(schema_urls[1], log_entry.output[0]) def test_resource_merge_empty_string(self): """Verify Resource.merge behavior with the empty string. Attributes from the source Resource take precedence, with the exception of the empty string. """ left = Resource({"service": "ui", "host": ""}) right = Resource({"host": "service-host", "service": "not-ui"}) self.assertEqual( left.merge(right), Resource({"service": "not-ui", "host": "service-host"}), ) def test_immutability(self): attributes = { "service": "ui", "version": 1, "has_bugs": True, "cost": 112.12, } default_attributes = { TELEMETRY_SDK_NAME: "opentelemetry", TELEMETRY_SDK_LANGUAGE: "python", TELEMETRY_SDK_VERSION: _OPENTELEMETRY_SDK_VERSION, SERVICE_NAME: "unknown_service", } attributes_copy = attributes.copy() attributes_copy.update(default_attributes) resource = Resource.create(attributes) self.assertEqual(resource.attributes, attributes_copy) with self.assertRaises(TypeError): resource.attributes["has_bugs"] = False self.assertEqual(resource.attributes, attributes_copy) attributes["cost"] = 999.91 self.assertEqual(resource.attributes, attributes_copy) with self.assertRaises(AttributeError): resource.schema_url = "bug" self.assertEqual(resource.schema_url, "") def test_service_name_using_process_name(self): resource = Resource.create({PROCESS_EXECUTABLE_NAME: "test"}) self.assertEqual( resource.attributes.get(SERVICE_NAME), "unknown_service:test", ) def test_invalid_resource_attribute_values(self): with self.assertLogs(level=WARNING): resource = Resource( { SERVICE_NAME: "test", "non-primitive-data-type": {}, "invalid-byte-type-attribute": ( b"\xd8\xe1\xb7\xeb\xa8\xe5 \xd2\xb7\xe1" ), "": "empty-key-value", None: "null-key-value", "another-non-primitive": uuid.uuid4(), } ) self.assertEqual( resource.attributes, { SERVICE_NAME: "test", }, ) self.assertEqual(len(resource.attributes), 1) def test_aggregated_resources_no_detectors(self): aggregated_resources = get_aggregated_resources([]) self.assertEqual( aggregated_resources, _DEFAULT_RESOURCE.merge( Resource({SERVICE_NAME: "unknown_service"}, "") ), ) def test_aggregated_resources_with_default_destroying_static_resource( self, ): static_resource = Resource({"static_key": "static_value"}) self.assertEqual( get_aggregated_resources([], initial_resource=static_resource), static_resource, ) resource_detector = Mock(spec=ResourceDetector) resource_detector.detect.return_value = Resource( {"static_key": "try_to_overwrite_existing_value", "key": "value"} ) self.assertEqual( get_aggregated_resources( [resource_detector], initial_resource=static_resource ), Resource( { "static_key": "try_to_overwrite_existing_value", "key": "value", } ), ) def test_aggregated_resources_multiple_detectors(self): resource_detector1 = Mock(spec=ResourceDetector) resource_detector1.detect.return_value = Resource({"key1": "value1"}) resource_detector2 = Mock(spec=ResourceDetector) resource_detector2.detect.return_value = Resource( {"key2": "value2", "key3": "value3"} ) resource_detector3 = Mock(spec=ResourceDetector) resource_detector3.detect.return_value = Resource( { "key2": "try_to_overwrite_existing_value", "key3": "try_to_overwrite_existing_value", "key4": "value4", } ) self.assertEqual( get_aggregated_resources( [resource_detector1, resource_detector2, resource_detector3] ), _DEFAULT_RESOURCE.merge( Resource({SERVICE_NAME: "unknown_service"}, "") ).merge( Resource( { "key1": "value1", "key2": "try_to_overwrite_existing_value", "key3": "try_to_overwrite_existing_value", "key4": "value4", } ) ), ) def test_aggregated_resources_different_schema_urls(self): resource_detector1 = Mock(spec=ResourceDetector) resource_detector1.detect.return_value = Resource( {"key1": "value1"}, "" ) resource_detector2 = Mock(spec=ResourceDetector) resource_detector2.detect.return_value = Resource( {"key2": "value2", "key3": "value3"}, "url1" ) resource_detector3 = Mock(spec=ResourceDetector) resource_detector3.detect.return_value = Resource( { "key2": "try_to_overwrite_existing_value", "key3": "try_to_overwrite_existing_value", "key4": "value4", }, "url2", ) resource_detector4 = Mock(spec=ResourceDetector) resource_detector4.detect.return_value = Resource( { "key2": "try_to_overwrite_existing_value", "key3": "try_to_overwrite_existing_value", "key4": "value4", }, "url1", ) self.assertEqual( get_aggregated_resources([resource_detector1, resource_detector2]), _DEFAULT_RESOURCE.merge( Resource({SERVICE_NAME: "unknown_service"}, "") ).merge( Resource( {"key1": "value1", "key2": "value2", "key3": "value3"}, "url1", ) ), ) with self.assertLogs(level=ERROR) as log_entry: self.assertEqual( get_aggregated_resources( [resource_detector2, resource_detector3] ), _DEFAULT_RESOURCE.merge( Resource({SERVICE_NAME: "unknown_service"}, "") ).merge( Resource({"key2": "value2", "key3": "value3"}, "url1") ), ) self.assertIn("url1", log_entry.output[0]) self.assertIn("url2", log_entry.output[0]) with self.assertLogs(level=ERROR): self.assertEqual( get_aggregated_resources( [ resource_detector2, resource_detector3, resource_detector4, resource_detector1, ] ), _DEFAULT_RESOURCE.merge( Resource({SERVICE_NAME: "unknown_service"}, "") ).merge( Resource( { "key1": "value1", "key2": "try_to_overwrite_existing_value", "key3": "try_to_overwrite_existing_value", "key4": "value4", }, "url1", ) ), ) self.assertIn("url1", log_entry.output[0]) self.assertIn("url2", log_entry.output[0]) def test_resource_detector_ignore_error(self): resource_detector = Mock(spec=ResourceDetector) resource_detector.detect.side_effect = Exception() resource_detector.raise_on_error = False with self.assertLogs(level=WARNING): self.assertEqual( get_aggregated_resources([resource_detector]), _DEFAULT_RESOURCE.merge( Resource({SERVICE_NAME: "unknown_service"}, "") ), ) def test_resource_detector_raise_error(self): resource_detector = Mock(spec=ResourceDetector) resource_detector.detect.side_effect = Exception() resource_detector.raise_on_error = True self.assertRaises( Exception, get_aggregated_resources, [resource_detector] ) @patch("opentelemetry.sdk.resources.logger") def test_resource_detector_timeout(self, mock_logger): resource_detector = Mock(spec=ResourceDetector) resource_detector.detect.side_effect = TimeoutError() resource_detector.raise_on_error = False self.assertEqual( get_aggregated_resources([resource_detector]), _DEFAULT_RESOURCE.merge( Resource({SERVICE_NAME: "unknown_service"}, "") ), ) mock_logger.warning.assert_called_with( "Detector %s took longer than %s seconds, skipping", resource_detector, 5, ) @patch.dict( environ, {"OTEL_RESOURCE_ATTRIBUTES": "key1=env_value1,key2=env_value2"}, ) def test_env_priority(self): resource_env = Resource.create() self.assertEqual(resource_env.attributes["key1"], "env_value1") self.assertEqual(resource_env.attributes["key2"], "env_value2") resource_env_override = Resource.create( {"key1": "value1", "key2": "value2"} ) self.assertEqual(resource_env_override.attributes["key1"], "value1") self.assertEqual(resource_env_override.attributes["key2"], "value2") @patch.dict( environ, { OTEL_SERVICE_NAME: "test-srv-name", OTEL_RESOURCE_ATTRIBUTES: "service.name=svc-name-from-resource", }, ) def test_service_name_env(self): resource = Resource.create() self.assertEqual(resource.attributes["service.name"], "test-srv-name") resource = Resource.create({"service.name": "from-code"}) self.assertEqual(resource.attributes["service.name"], "from-code") class TestOTELResourceDetector(unittest.TestCase): def setUp(self) -> None: environ[OTEL_RESOURCE_ATTRIBUTES] = "" def tearDown(self) -> None: environ.pop(OTEL_RESOURCE_ATTRIBUTES) def test_empty(self): detector = OTELResourceDetector() environ[OTEL_RESOURCE_ATTRIBUTES] = "" self.assertEqual(detector.detect(), Resource.get_empty()) def test_one(self): detector = OTELResourceDetector() environ[OTEL_RESOURCE_ATTRIBUTES] = "k=v" self.assertEqual(detector.detect(), Resource({"k": "v"})) def test_one_with_whitespace(self): detector = OTELResourceDetector() environ[OTEL_RESOURCE_ATTRIBUTES] = " k = v " self.assertEqual(detector.detect(), Resource({"k": "v"})) def test_multiple(self): detector = OTELResourceDetector() environ[OTEL_RESOURCE_ATTRIBUTES] = "k=v,k2=v2" self.assertEqual(detector.detect(), Resource({"k": "v", "k2": "v2"})) def test_multiple_with_whitespace(self): detector = OTELResourceDetector() environ[OTEL_RESOURCE_ATTRIBUTES] = " k = v , k2 = v2 " self.assertEqual(detector.detect(), Resource({"k": "v", "k2": "v2"})) def test_invalid_key_value_pairs(self): detector = OTELResourceDetector() environ[OTEL_RESOURCE_ATTRIBUTES] = "k=v,k2=v2,invalid,,foo=bar=baz," with self.assertLogs(level=WARNING): self.assertEqual( detector.detect(), Resource({"k": "v", "k2": "v2", "foo": "bar=baz"}), ) def test_multiple_with_url_decode(self): detector = OTELResourceDetector() environ[OTEL_RESOURCE_ATTRIBUTES] = ( "key=value%20test%0A, key2=value+%202" ) self.assertEqual( detector.detect(), Resource({"key": "value test\n", "key2": "value+ 2"}), ) self.assertEqual( detector.detect(), Resource( { "key": parse.unquote("value%20test%0A"), "key2": parse.unquote("value+%202"), } ), ) @patch.dict( environ, {OTEL_SERVICE_NAME: "test-srv-name"}, ) def test_service_name_env(self): detector = OTELResourceDetector() self.assertEqual( detector.detect(), Resource({"service.name": "test-srv-name"}), ) @patch.dict( environ, { OTEL_SERVICE_NAME: "from-service-name", OTEL_RESOURCE_ATTRIBUTES: "service.name=from-resource-attrs", }, ) def test_service_name_env_precedence(self): detector = OTELResourceDetector() self.assertEqual( detector.detect(), Resource({"service.name": "from-service-name"}), ) @patch( "sys.argv", ["uvicorn", "main:app", "--host", "0.0.0.0", "--port", "8000"], ) def test_process_detector(self): initial_resource = Resource({"foo": "bar"}) aggregated_resource = get_aggregated_resources( [ProcessResourceDetector()], initial_resource ) self.assertIn( PROCESS_RUNTIME_NAME, aggregated_resource.attributes.keys(), ) self.assertIn( PROCESS_RUNTIME_DESCRIPTION, aggregated_resource.attributes.keys(), ) self.assertIn( PROCESS_RUNTIME_VERSION, aggregated_resource.attributes.keys(), ) self.assertEqual( aggregated_resource.attributes[PROCESS_PID], os.getpid() ) if hasattr(os, "getppid"): self.assertEqual( aggregated_resource.attributes[PROCESS_PARENT_PID], os.getppid(), ) if psutil is not None: self.assertEqual( aggregated_resource.attributes[PROCESS_OWNER], psutil.Process().username(), ) self.assertEqual( aggregated_resource.attributes[PROCESS_EXECUTABLE_NAME], sys.executable, ) self.assertEqual( aggregated_resource.attributes[PROCESS_EXECUTABLE_PATH], os.path.dirname(sys.executable), ) self.assertEqual( aggregated_resource.attributes[PROCESS_COMMAND], sys.argv[0] ) self.assertEqual( aggregated_resource.attributes[PROCESS_COMMAND_LINE], " ".join(sys.argv), ) self.assertEqual( aggregated_resource.attributes[PROCESS_COMMAND_ARGS], tuple(sys.argv), ) def test_resource_detector_entry_points_default(self): resource = Resource({}).create() self.assertEqual( resource.attributes["telemetry.sdk.language"], "python" ) self.assertEqual( resource.attributes["telemetry.sdk.name"], "opentelemetry" ) self.assertEqual( resource.attributes["service.name"], "unknown_service" ) self.assertEqual(resource.schema_url, "") resource = Resource({}).create({"a": "b", "c": "d"}) self.assertEqual( resource.attributes["telemetry.sdk.language"], "python" ) self.assertEqual( resource.attributes["telemetry.sdk.name"], "opentelemetry" ) self.assertEqual( resource.attributes["service.name"], "unknown_service" ) self.assertEqual(resource.attributes["a"], "b") self.assertEqual(resource.attributes["c"], "d") self.assertEqual(resource.schema_url, "") @patch.dict( environ, {OTEL_EXPERIMENTAL_RESOURCE_DETECTORS: "mock"}, clear=True ) @patch( "opentelemetry.sdk.resources.entry_points", Mock( return_value=[ Mock( **{ "load.return_value": Mock( return_value=Mock( **{"detect.return_value": Resource({"a": "b"})} ) ) } ) ] ), ) def test_resource_detector_entry_points_non_default(self): resource = Resource({}).create() self.assertEqual( resource.attributes["telemetry.sdk.language"], "python" ) self.assertEqual( resource.attributes["telemetry.sdk.name"], "opentelemetry" ) self.assertEqual( resource.attributes["service.name"], "unknown_service" ) self.assertEqual(resource.attributes["a"], "b") self.assertEqual(resource.schema_url, "") @patch.dict( environ, {OTEL_EXPERIMENTAL_RESOURCE_DETECTORS: ""}, clear=True ) def test_resource_detector_entry_points_empty(self): resource = Resource({}).create() self.assertEqual( resource.attributes["telemetry.sdk.language"], "python" ) @patch.dict( environ, {OTEL_EXPERIMENTAL_RESOURCE_DETECTORS: "os"}, clear=True ) def test_resource_detector_entry_points_os(self): resource = Resource({}).create() self.assertIn(OS_TYPE, resource.attributes) self.assertIn(OS_VERSION, resource.attributes) def test_resource_detector_entry_points_otel(self): """ Test that OTELResourceDetector-resource-generated attributes are always being added. """ with patch.dict( environ, {OTEL_RESOURCE_ATTRIBUTES: "a=b,c=d"}, clear=True ): resource = Resource({}).create() self.assertEqual( resource.attributes["telemetry.sdk.language"], "python" ) self.assertEqual( resource.attributes["telemetry.sdk.name"], "opentelemetry" ) self.assertEqual( resource.attributes["service.name"], "unknown_service" ) self.assertEqual(resource.attributes["a"], "b") self.assertEqual(resource.attributes["c"], "d") self.assertEqual(resource.schema_url, "") with patch.dict( environ, { OTEL_RESOURCE_ATTRIBUTES: "a=b,c=d", OTEL_EXPERIMENTAL_RESOURCE_DETECTORS: "process", }, clear=True, ): resource = Resource({}).create() self.assertEqual( resource.attributes["telemetry.sdk.language"], "python" ) self.assertEqual( resource.attributes["telemetry.sdk.name"], "opentelemetry" ) self.assertEqual( resource.attributes["service.name"], "unknown_service:" + resource.attributes["process.executable.name"], ) self.assertEqual(resource.attributes["a"], "b") self.assertEqual(resource.attributes["c"], "d") self.assertIn(PROCESS_RUNTIME_NAME, resource.attributes.keys()) self.assertIn( PROCESS_RUNTIME_DESCRIPTION, resource.attributes.keys() ) self.assertIn(PROCESS_RUNTIME_VERSION, resource.attributes.keys()) self.assertEqual(resource.schema_url, "") @patch("platform.system", lambda: "Linux") @patch("platform.release", lambda: "666.5.0-35-generic") def test_os_detector_linux(self): resource = get_aggregated_resources( [OsResourceDetector()], Resource({}), ) self.assertEqual(resource.attributes[OS_TYPE], "linux") self.assertEqual(resource.attributes[OS_VERSION], "666.5.0-35-generic") @patch("platform.system", lambda: "Windows") @patch("platform.version", lambda: "10.0.666") def test_os_detector_windows(self): resource = get_aggregated_resources( [OsResourceDetector()], Resource({}), ) self.assertEqual(resource.attributes[OS_TYPE], "windows") self.assertEqual(resource.attributes[OS_VERSION], "10.0.666") @patch("platform.system", lambda: "SunOS") @patch("platform.version", lambda: "666.4.0.15.0") def test_os_detector_solaris(self): resource = get_aggregated_resources( [OsResourceDetector()], Resource({}), ) self.assertEqual(resource.attributes[OS_TYPE], "solaris") self.assertEqual(resource.attributes[OS_VERSION], "666.4.0.15.0") class TestHostResourceDetector(unittest.TestCase): @patch("socket.gethostname", lambda: "foo") @patch("platform.machine", lambda: "AMD64") def test_host_resource_detector(self): resource = get_aggregated_resources( [_HostResourceDetector()], Resource({}), ) self.assertEqual(resource.attributes[HOST_NAME], "foo") self.assertEqual(resource.attributes[HOST_ARCH], "AMD64") @patch.dict( environ, {OTEL_EXPERIMENTAL_RESOURCE_DETECTORS: "host"}, clear=True ) def test_resource_detector_entry_points_host(self): resource = Resource({}).create() self.assertIn(HOST_NAME, resource.attributes) self.assertIn(HOST_ARCH, resource.attributes) @patch.dict( environ, {OTEL_EXPERIMENTAL_RESOURCE_DETECTORS: "doesnotexist,host"}, clear=True, ) def test_resource_detector_entry_points_tolerate_missing_detector(self): resource = Resource({}).create() self.assertEqual( resource.attributes["telemetry.sdk.language"], "python" ) self.assertIn(HOST_NAME, resource.attributes) python-opentelemetry-1.39.1/opentelemetry-sdk/tests/shared_internal/000077500000000000000000000000001511654350100260005ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-sdk/tests/shared_internal/__init__.py000066400000000000000000000011101511654350100301020ustar00rootroot00000000000000# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. python-opentelemetry-1.39.1/opentelemetry-sdk/tests/shared_internal/test_batch_processor.py000066400000000000000000000226031511654350100325740ustar00rootroot00000000000000# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # pylint: disable=protected-access import gc import logging import multiprocessing import os import threading import time import unittest import weakref from platform import system from typing import Any from unittest.mock import Mock import pytest from opentelemetry._logs import ( LogRecord, ) from opentelemetry.sdk._logs import ( ReadWriteLogRecord, ) from opentelemetry.sdk._logs.export import ( BatchLogRecordProcessor, ) from opentelemetry.sdk._shared_internal import ( DuplicateFilter, ) from opentelemetry.sdk.trace import ReadableSpan from opentelemetry.sdk.trace.export import BatchSpanProcessor from opentelemetry.sdk.util.instrumentation import InstrumentationScope EMPTY_LOG = ReadWriteLogRecord( log_record=LogRecord(), instrumentation_scope=InstrumentationScope("example", "example"), ) BASIC_SPAN = ReadableSpan( "MySpan", instrumentation_scope=InstrumentationScope("example", "example"), ) if system() != "Windows": multiprocessing.set_start_method("fork") class MockExporterForTesting: def __init__(self, export_sleep: int): self.num_export_calls = 0 self.export_sleep = export_sleep self._shutdown = False self.sleep_interrupted = False self.export_sleep_event = threading.Event() def export(self, _: list[Any]): self.num_export_calls += 1 if self._shutdown: raise ValueError("Cannot export, already shutdown") sleep_interrupted = self.export_sleep_event.wait(self.export_sleep) if sleep_interrupted: self.sleep_interrupted = True raise ValueError("Did not get to finish !") def shutdown(self): # Force export to finish sleeping. self._shutdown = True self.export_sleep_event.set() # BatchLogRecodProcessor/BatchSpanProcessor initialize and use BatchProcessor. # Important: make sure to call .shutdown() before the end of the test, # otherwise the worker thread will continue to run after the end of the test. @pytest.mark.parametrize( "batch_processor_class,telemetry", [(BatchLogRecordProcessor, EMPTY_LOG), (BatchSpanProcessor, BASIC_SPAN)], ) class TestBatchProcessor: # pylint: disable=no-self-use def test_telemetry_exported_once_batch_size_reached( self, batch_processor_class, telemetry ): exporter = Mock() batch_processor = batch_processor_class( exporter, max_queue_size=15, max_export_batch_size=15, # Will not reach this during the test, this sleep should be interrupted when batch size is reached. schedule_delay_millis=30000, export_timeout_millis=500, ) before_export = time.time_ns() for _ in range(15): batch_processor._batch_processor.emit(telemetry) # Wait a bit for the worker thread to wake up and call export. time.sleep(0.1) exporter.export.assert_called_once() after_export = time.time_ns() # Shows the worker's 30 second sleep was interrupted within a second. assert after_export - before_export < 1e9 batch_processor.shutdown() # pylint: disable=no-self-use def test_telemetry_exported_once_schedule_delay_reached( self, batch_processor_class, telemetry ): exporter = Mock() batch_processor = batch_processor_class( exporter, max_queue_size=15, max_export_batch_size=15, schedule_delay_millis=100, export_timeout_millis=500, ) batch_processor._batch_processor.emit(telemetry) time.sleep(0.2) exporter.export.assert_called_once_with([telemetry]) batch_processor.shutdown() def test_telemetry_flushed_before_shutdown_and_dropped_after_shutdown( self, batch_processor_class, telemetry ): exporter = Mock() batch_processor = batch_processor_class( exporter, # Neither of these thresholds should be hit before test ends. max_queue_size=15, max_export_batch_size=15, schedule_delay_millis=30000, export_timeout_millis=500, ) # This log should be flushed because it was written before shutdown. batch_processor._batch_processor.emit(telemetry) batch_processor.shutdown() exporter.export.assert_called_once_with([telemetry]) assert batch_processor._batch_processor._shutdown is True # This should not be flushed. batch_processor._batch_processor.emit(telemetry) exporter.export.assert_called_once() # pylint: disable=no-self-use def test_force_flush_flushes_telemetry( self, batch_processor_class, telemetry ): exporter = Mock() batch_processor = batch_processor_class( exporter, # Neither of these thresholds should be hit before test ends. max_queue_size=15, max_export_batch_size=15, schedule_delay_millis=30000, export_timeout_millis=500, ) for _ in range(10): batch_processor._batch_processor.emit(telemetry) batch_processor.force_flush() exporter.export.assert_called_once_with([telemetry for _ in range(10)]) batch_processor.shutdown() @unittest.skipUnless( hasattr(os, "fork"), "needs *nix", ) def test_batch_telemetry_record_processor_fork( self, batch_processor_class, telemetry ): exporter = Mock() batch_processor = batch_processor_class( exporter, max_queue_size=200, max_export_batch_size=10, schedule_delay_millis=30000, export_timeout_millis=500, ) # This telemetry should be flushed only from the parent process. # _at_fork_reinit should be called in the child process, to # clear the logs/spans in the child process. for _ in range(9): batch_processor._batch_processor.emit(telemetry) def child(conn): for _ in range(100): batch_processor._batch_processor.emit(telemetry) batch_processor.force_flush() # Expect force flush to export 10 batches of max export batch size (10) conn.send(exporter.export.call_count == 10) conn.close() parent_conn, child_conn = multiprocessing.Pipe() process = multiprocessing.Process(target=child, args=(child_conn,)) process.start() assert parent_conn.recv() is True process.join() batch_processor.force_flush() # Single export for the telemetry we emitted at the start of the test. assert exporter.export.call_count == 1 batch_processor.shutdown() def test_record_processor_is_garbage_collected( self, batch_processor_class, telemetry ): exporter = Mock() processor = batch_processor_class(exporter) weak_ref = weakref.ref(processor) processor.shutdown() # When the processor is garbage collected del processor gc.collect() # Then the reference to the processor should no longer exist assert weak_ref() is None def test_shutdown_allows_1_export_to_finish( self, batch_processor_class, telemetry ): # This exporter throws an exception if it's export sleep cannot finish. exporter = MockExporterForTesting(export_sleep=2) processor = batch_processor_class( exporter, max_queue_size=200, max_export_batch_size=1, schedule_delay_millis=30000, ) # Max export batch size is 1, so 3 emit calls requires 3 separate calls (each block for 2 seconds) to Export to clear the queue. processor._batch_processor.emit(telemetry) processor._batch_processor.emit(telemetry) processor._batch_processor.emit(telemetry) before = time.time() processor._batch_processor.shutdown(timeout_millis=3000) # Shutdown does not kill the thread. assert processor._batch_processor._worker_thread.is_alive() is True after = time.time() assert after - before < 3.3 # Thread will naturally finish after a little bit. time.sleep(0.1) assert processor._batch_processor._worker_thread.is_alive() is False # Expect the second call to be interrupted by shutdown, and the third call to never be made. assert exporter.sleep_interrupted is True assert 2 == exporter.num_export_calls class TestCommonFuncs(unittest.TestCase): def test_duplicate_logs_filter_works(self): test_logger = logging.getLogger("testLogger") test_logger.addFilter(DuplicateFilter()) with self.assertLogs("testLogger") as cm: test_logger.info("message") test_logger.info("message") self.assertEqual(len(cm.output), 1) python-opentelemetry-1.39.1/opentelemetry-sdk/tests/test_configurator.py000066400000000000000000001272271511654350100267640ustar00rootroot00000000000000# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # type: ignore # pylint: skip-file from __future__ import annotations import logging import logging.config from logging import WARNING, getLogger from os import environ from typing import Iterable, Optional, Sequence from unittest import TestCase, mock from unittest.mock import Mock, patch from pytest import raises from opentelemetry import trace from opentelemetry.context import Context from opentelemetry.environment_variables import OTEL_PYTHON_ID_GENERATOR from opentelemetry.sdk._configuration import ( _EXPORTER_OTLP, _EXPORTER_OTLP_PROTO_GRPC, _EXPORTER_OTLP_PROTO_HTTP, _get_exporter_names, _get_id_generator, _get_sampler, _import_config_components, _import_exporters, _import_id_generator, _import_sampler, _init_logging, _init_metrics, _init_tracing, _initialize_components, _OTelSDKConfigurator, ) from opentelemetry.sdk._logs import LoggingHandler from opentelemetry.sdk._logs._internal.export import LogRecordExporter from opentelemetry.sdk._logs.export import ConsoleLogRecordExporter from opentelemetry.sdk.environment_variables import ( OTEL_TRACES_SAMPLER, OTEL_TRACES_SAMPLER_ARG, ) from opentelemetry.sdk.metrics import MeterProvider from opentelemetry.sdk.metrics.export import ( AggregationTemporality, ConsoleMetricExporter, Metric, MetricExporter, MetricReader, ) from opentelemetry.sdk.metrics.view import Aggregation from opentelemetry.sdk.resources import SERVICE_NAME, Resource from opentelemetry.sdk.trace.export import ConsoleSpanExporter from opentelemetry.sdk.trace.id_generator import IdGenerator, RandomIdGenerator from opentelemetry.sdk.trace.sampling import ( ALWAYS_ON, Decision, ParentBased, Sampler, SamplingResult, TraceIdRatioBased, ) from opentelemetry.test.mock_test_classes import IterEntryPoint from opentelemetry.trace import Link, SpanKind from opentelemetry.trace.span import TraceState from opentelemetry.util.types import Attributes class Provider: def __init__(self, resource=None, sampler=None, id_generator=None): self.sampler = sampler self.id_generator = id_generator self.processor = None self.resource = resource or Resource.create({}) def add_span_processor(self, processor): self.processor = processor class DummyLoggerProvider: def __init__(self, resource=None): self.resource = resource self.processor = DummyLogRecordProcessor(DummyOTLPLogExporter()) def add_log_record_processor(self, processor): self.processor = processor def get_logger(self, name, *args, **kwargs): return DummyLogger(name, self.resource, self.processor) def force_flush(self, *args, **kwargs): pass class DummyMeterProvider(MeterProvider): pass class DummyLogger: def __init__(self, name, resource, processor): self.name = name self.resource = resource self.processor = processor def emit( self, record=None, *, timestamp=None, observed_timestamp=None, context=None, severity_number=None, severity_text=None, body=None, attributes=None, event_name=None, ): self.processor.emit(record) class DummyLogRecordProcessor: def __init__(self, exporter): self.exporter = exporter def emit(self, record): self.exporter.export([record]) def force_flush(self, time): pass def shutdown(self): pass class Processor: def __init__(self, exporter): self.exporter = exporter class DummyMetricReader(MetricReader): def __init__( self, exporter: MetricExporter, preferred_temporality: dict[type, AggregationTemporality] | None = None, preferred_aggregation: dict[type, Aggregation] | None = None, export_interval_millis: float | None = None, export_timeout_millis: float | None = None, ) -> None: super().__init__( preferred_temporality=preferred_temporality, preferred_aggregation=preferred_aggregation, ) self.exporter = exporter def _receive_metrics( self, metrics: Iterable[Metric], timeout_millis: float = 10_000, **kwargs, ) -> None: self.exporter.export(None) def shutdown(self, timeout_millis: float = 30_000, **kwargs) -> None: return True # MetricReader that can be configured as a pull exporter class DummyMetricReaderPullExporter(MetricReader): def _receive_metrics( self, metrics: Iterable[Metric], timeout_millis: float = 10_000, **kwargs, ) -> None: pass def shutdown(self, timeout_millis: float = 30_000, **kwargs) -> None: return True class DummyOTLPMetricExporter: def __init__(self, compression: str | None = None, *args, **kwargs): self.export_called = False self.compression = compression def export(self, batch): self.export_called = True def shutdown(self): pass class Exporter: def __init__(self): tracer_provider = trace.get_tracer_provider() self.service_name = ( tracer_provider.resource.attributes[SERVICE_NAME] if getattr(tracer_provider, "resource", None) else Resource.create().attributes.get(SERVICE_NAME) ) def shutdown(self): pass class OTLPSpanExporter: def __init__(self, compression: str | None = None, *args, **kwargs): self.compression = compression class DummyOTLPLogExporter(LogRecordExporter): def __init__(self, compression: str | None = None, *args, **kwargs): self.export_called = False self.compression = compression def export(self, batch): self.export_called = True def shutdown(self): pass class CustomSampler(Sampler): def __init__(self) -> None: pass def get_description(self) -> str: return "CustomSampler" def should_sample( self, parent_context: Optional["Context"], trace_id: int, name: str, kind: SpanKind = None, attributes: Attributes = None, links: Sequence[Link] = None, trace_state: TraceState = None, ) -> "SamplingResult": return SamplingResult( Decision.RECORD_AND_SAMPLE, None, None, ) class CustomRatioSampler(TraceIdRatioBased): def __init__(self, ratio): if not isinstance(ratio, float): raise ValueError( "CustomRatioSampler ratio argument is not a float." ) self.ratio = ratio super().__init__(ratio) def get_description(self) -> str: return "CustomSampler" def should_sample( self, parent_context: "Context" | None, trace_id: int, name: str, kind: SpanKind | None = None, attributes: Attributes = None, links: Sequence[Link] | None = None, trace_state: TraceState | None = None, ) -> "SamplingResult": return SamplingResult( Decision.RECORD_AND_SAMPLE, None, None, ) class CustomSamplerFactory: @staticmethod def get_custom_sampler(unused_sampler_arg): return CustomSampler() @staticmethod def get_custom_ratio_sampler(sampler_arg): return CustomRatioSampler(float(sampler_arg)) @staticmethod def empty_get_custom_sampler(sampler_arg): return class CustomIdGenerator(IdGenerator): def generate_span_id(self): pass def generate_trace_id(self): pass class TestTraceInit(TestCase): def setUp(self): super() self.get_provider_patcher = patch( "opentelemetry.sdk._configuration.TracerProvider", Provider ) self.get_processor_patcher = patch( "opentelemetry.sdk._configuration.BatchSpanProcessor", Processor ) self.set_provider_patcher = patch( "opentelemetry.sdk._configuration.set_tracer_provider" ) self.get_provider_mock = self.get_provider_patcher.start() self.get_processor_mock = self.get_processor_patcher.start() self.set_provider_mock = self.set_provider_patcher.start() def tearDown(self): super() self.get_provider_patcher.stop() self.get_processor_patcher.stop() self.set_provider_patcher.stop() # pylint: disable=protected-access @patch.dict( environ, {"OTEL_RESOURCE_ATTRIBUTES": "service.name=my-test-service"} ) def test_trace_init_default(self): auto_resource = Resource.create( { "telemetry.auto.version": "test-version", } ) _init_tracing( {"zipkin": Exporter}, id_generator=RandomIdGenerator(), resource=auto_resource, ) self.assertEqual(self.set_provider_mock.call_count, 1) provider = self.set_provider_mock.call_args[0][0] self.assertIsInstance(provider, Provider) self.assertIsInstance(provider.id_generator, RandomIdGenerator) self.assertIsInstance(provider.processor, Processor) self.assertIsInstance(provider.processor.exporter, Exporter) self.assertEqual( provider.processor.exporter.service_name, "my-test-service" ) self.assertEqual( provider.resource.attributes.get("telemetry.auto.version"), "test-version", ) @patch.dict( environ, {"OTEL_RESOURCE_ATTRIBUTES": "service.name=my-otlp-test-service"}, ) def test_trace_init_otlp(self): _init_tracing( {"otlp": OTLPSpanExporter}, id_generator=RandomIdGenerator() ) self.assertEqual(self.set_provider_mock.call_count, 1) provider = self.set_provider_mock.call_args[0][0] self.assertIsInstance(provider, Provider) self.assertIsInstance(provider.id_generator, RandomIdGenerator) self.assertIsInstance(provider.processor, Processor) self.assertIsInstance(provider.processor.exporter, OTLPSpanExporter) self.assertIsInstance(provider.resource, Resource) self.assertEqual( provider.resource.attributes.get("service.name"), "my-otlp-test-service", ) def test_trace_init_exporter_uses_exporter_args_map(self): _init_tracing( {"otlp": OTLPSpanExporter}, id_generator=RandomIdGenerator(), exporter_args_map={ OTLPSpanExporter: {"compression": "gzip"}, DummyMetricReaderPullExporter: {"compression": "no"}, }, ) provider = self.set_provider_mock.call_args[0][0] exporter = provider.processor.exporter self.assertEqual(exporter.compression, "gzip") @patch.dict(environ, {OTEL_PYTHON_ID_GENERATOR: "custom_id_generator"}) @patch("opentelemetry.sdk._configuration.IdGenerator", new=IdGenerator) @patch("opentelemetry.sdk._configuration.entry_points") def test_trace_init_custom_id_generator(self, mock_entry_points): mock_entry_points.configure_mock( return_value=[ IterEntryPoint("custom_id_generator", CustomIdGenerator) ] ) id_generator_name = _get_id_generator() id_generator = _import_id_generator(id_generator_name) _init_tracing({}, id_generator=id_generator) provider = self.set_provider_mock.call_args[0][0] self.assertIsInstance(provider.id_generator, CustomIdGenerator) @patch.dict( "os.environ", {OTEL_TRACES_SAMPLER: "non_existent_entry_point"} ) def test_trace_init_custom_sampler_with_env_non_existent_entry_point(self): sampler_name = _get_sampler() with self.assertLogs(level=WARNING): sampler = _import_sampler(sampler_name) _init_tracing({}, sampler=sampler) provider = self.set_provider_mock.call_args[0][0] self.assertIsNone(provider.sampler) @patch("opentelemetry.sdk._configuration.entry_points") @patch.dict("os.environ", {OTEL_TRACES_SAMPLER: "custom_sampler_factory"}) def test_trace_init_custom_sampler_with_env(self, mock_entry_points): mock_entry_points.configure_mock( return_value=[ IterEntryPoint( "custom_sampler_factory", CustomSamplerFactory.get_custom_sampler, ) ] ) sampler_name = _get_sampler() sampler = _import_sampler(sampler_name) _init_tracing({}, sampler=sampler) provider = self.set_provider_mock.call_args[0][0] self.assertIsInstance(provider.sampler, CustomSampler) @patch("opentelemetry.sdk._configuration.entry_points") @patch.dict("os.environ", {OTEL_TRACES_SAMPLER: "custom_sampler_factory"}) def test_trace_init_custom_sampler_with_env_bad_factory( self, mock_entry_points ): mock_entry_points.configure_mock( return_value=[ IterEntryPoint( "custom_sampler_factory", CustomSamplerFactory.empty_get_custom_sampler, ) ] ) sampler_name = _get_sampler() with self.assertLogs(level=WARNING): sampler = _import_sampler(sampler_name) _init_tracing({}, sampler=sampler) provider = self.set_provider_mock.call_args[0][0] self.assertIsNone(provider.sampler) @patch("opentelemetry.sdk._configuration.entry_points") @patch.dict( "os.environ", { OTEL_TRACES_SAMPLER: "custom_sampler_factory", OTEL_TRACES_SAMPLER_ARG: "0.5", }, ) def test_trace_init_custom_sampler_with_env_unused_arg( self, mock_entry_points ): mock_entry_points.configure_mock( return_value=[ IterEntryPoint( "custom_sampler_factory", CustomSamplerFactory.get_custom_sampler, ) ] ) sampler_name = _get_sampler() sampler = _import_sampler(sampler_name) _init_tracing({}, sampler=sampler) provider = self.set_provider_mock.call_args[0][0] self.assertIsInstance(provider.sampler, CustomSampler) @patch("opentelemetry.sdk._configuration.entry_points") @patch.dict( "os.environ", { OTEL_TRACES_SAMPLER: "custom_ratio_sampler_factory", OTEL_TRACES_SAMPLER_ARG: "0.5", }, ) def test_trace_init_custom_ratio_sampler_with_env(self, mock_entry_points): mock_entry_points.configure_mock( return_value=[ IterEntryPoint( "custom_ratio_sampler_factory", CustomSamplerFactory.get_custom_ratio_sampler, ) ] ) sampler_name = _get_sampler() sampler = _import_sampler(sampler_name) _init_tracing({}, sampler=sampler) provider = self.set_provider_mock.call_args[0][0] self.assertIsInstance(provider.sampler, CustomRatioSampler) self.assertEqual(provider.sampler.ratio, 0.5) @patch("opentelemetry.sdk._configuration.entry_points") @patch.dict( "os.environ", { OTEL_TRACES_SAMPLER: "custom_ratio_sampler_factory", OTEL_TRACES_SAMPLER_ARG: "foobar", }, ) def test_trace_init_custom_ratio_sampler_with_env_bad_arg( self, mock_entry_points ): mock_entry_points.configure_mock( return_value=[ IterEntryPoint( "custom_ratio_sampler_factory", CustomSamplerFactory.get_custom_ratio_sampler, ) ] ) sampler_name = _get_sampler() with self.assertLogs(level=WARNING): sampler = _import_sampler(sampler_name) _init_tracing({}, sampler=sampler) provider = self.set_provider_mock.call_args[0][0] self.assertIsNone(provider.sampler) @patch("opentelemetry.sdk._configuration.entry_points") @patch.dict( "os.environ", { OTEL_TRACES_SAMPLER: "custom_ratio_sampler_factory", }, ) def test_trace_init_custom_ratio_sampler_with_env_missing_arg( self, mock_entry_points ): mock_entry_points.configure_mock( return_value=[ IterEntryPoint( "custom_ratio_sampler_factory", CustomSamplerFactory.get_custom_ratio_sampler, ) ] ) sampler_name = _get_sampler() with self.assertLogs(level=WARNING): sampler = _import_sampler(sampler_name) _init_tracing({}, sampler=sampler) provider = self.set_provider_mock.call_args[0][0] self.assertIsNone(provider.sampler) @patch("opentelemetry.sdk._configuration.entry_points") @patch.dict( "os.environ", { OTEL_TRACES_SAMPLER: "custom_sampler_factory", OTEL_TRACES_SAMPLER_ARG: "0.5", }, ) def test_trace_init_custom_ratio_sampler_with_env_multiple_entry_points( self, mock_entry_points ): mock_entry_points.configure_mock( return_value=[ IterEntryPoint( "custom_sampler_factory", CustomSamplerFactory.get_custom_sampler, ), ] ) sampler_name = _get_sampler() sampler = _import_sampler(sampler_name) _init_tracing({}, sampler=sampler) provider = self.set_provider_mock.call_args[0][0] self.assertIsInstance(provider.sampler, CustomSampler) def verify_default_sampler(self, tracer_provider): self.assertIsInstance(tracer_provider.sampler, ParentBased) # pylint: disable=protected-access self.assertEqual(tracer_provider.sampler._root, ALWAYS_ON) class TestLoggingInit(TestCase): def setUp(self): self.processor_patch = patch( "opentelemetry.sdk._configuration.BatchLogRecordProcessor", DummyLogRecordProcessor, ) self.provider_patch = patch( "opentelemetry.sdk._configuration.LoggerProvider", DummyLoggerProvider, ) self.set_provider_patch = patch( "opentelemetry.sdk._configuration.set_logger_provider" ) self.event_logger_provider_instance_mock = Mock() self.event_logger_provider_patch = patch( "opentelemetry.sdk._events.EventLoggerProvider", return_value=self.event_logger_provider_instance_mock, ) self.set_event_logger_provider_patch = patch( "opentelemetry._events.set_event_logger_provider" ) self.processor_mock = self.processor_patch.start() self.provider_mock = self.provider_patch.start() self.set_provider_mock = self.set_provider_patch.start() self.event_logger_provider_mock = ( self.event_logger_provider_patch.start() ) self.set_event_logger_provider_mock = ( self.set_event_logger_provider_patch.start() ) def tearDown(self): self.processor_patch.stop() self.set_provider_patch.stop() self.provider_patch.stop() self.event_logger_provider_patch.stop() self.set_event_logger_provider_patch.stop() root_logger = getLogger("root") root_logger.handlers = [ handler for handler in root_logger.handlers if not isinstance(handler, LoggingHandler) ] def test_logging_init_empty(self): with ResetGlobalLoggingState(): auto_resource = Resource.create( { "telemetry.auto.version": "auto-version", } ) _init_logging({}, resource=auto_resource) self.assertEqual(self.set_provider_mock.call_count, 1) provider = self.set_provider_mock.call_args[0][0] self.assertIsInstance(provider, DummyLoggerProvider) self.assertIsInstance(provider.resource, Resource) self.assertEqual( provider.resource.attributes.get("telemetry.auto.version"), "auto-version", ) self.event_logger_provider_mock.assert_called_once_with( logger_provider=provider ) self.set_event_logger_provider_mock.assert_called_once_with( self.event_logger_provider_instance_mock ) @patch.dict( environ, {"OTEL_RESOURCE_ATTRIBUTES": "service.name=otlp-service"}, ) def test_logging_init_exporter(self): with ResetGlobalLoggingState(): resource = Resource.create({}) _init_logging({"otlp": DummyOTLPLogExporter}, resource=resource) self.assertEqual(self.set_provider_mock.call_count, 1) provider = self.set_provider_mock.call_args[0][0] self.assertIsInstance(provider, DummyLoggerProvider) self.assertIsInstance(provider.resource, Resource) self.assertEqual( provider.resource.attributes.get("service.name"), "otlp-service", ) self.assertIsInstance(provider.processor, DummyLogRecordProcessor) self.assertIsInstance( provider.processor.exporter, DummyOTLPLogExporter ) getLogger(__name__).error("hello") self.assertTrue(provider.processor.exporter.export_called) def test_logging_init_exporter_uses_exporter_args_map(self): with ResetGlobalLoggingState(): resource = Resource.create({}) _init_logging( {"otlp": DummyOTLPLogExporter}, resource=resource, exporter_args_map={ DummyOTLPLogExporter: {"compression": "gzip"}, DummyOTLPMetricExporter: {"compression": "no"}, }, ) self.assertEqual(self.set_provider_mock.call_count, 1) provider = self.set_provider_mock.call_args[0][0] self.assertEqual(provider.processor.exporter.compression, "gzip") @patch.dict( environ, {"OTEL_RESOURCE_ATTRIBUTES": "service.name=otlp-service"}, ) def test_logging_init_exporter_without_handler_setup(self): resource = Resource.create({}) _init_logging( {"otlp": DummyOTLPLogExporter}, resource=resource, setup_logging_handler=False, ) self.assertEqual(self.set_provider_mock.call_count, 1) provider = self.set_provider_mock.call_args[0][0] self.assertIsInstance(provider, DummyLoggerProvider) self.assertIsInstance(provider.resource, Resource) self.assertEqual( provider.resource.attributes.get("service.name"), "otlp-service", ) self.assertIsInstance(provider.processor, DummyLogRecordProcessor) self.assertIsInstance( provider.processor.exporter, DummyOTLPLogExporter ) getLogger(__name__).error("hello") self.assertFalse(provider.processor.exporter.export_called) @patch.dict( environ, {"OTEL_RESOURCE_ATTRIBUTES": "service.name=otlp-service"}, ) @patch("opentelemetry.sdk._configuration._init_tracing") @patch("opentelemetry.sdk._configuration._init_logging") def test_logging_init_disable_default(self, logging_mock, tracing_mock): _initialize_components(auto_instrumentation_version="auto-version") self.assertEqual(tracing_mock.call_count, 1) logging_mock.assert_called_once_with( mock.ANY, mock.ANY, False, exporter_args_map=None ) @patch.dict( environ, { "OTEL_RESOURCE_ATTRIBUTES": "service.name=otlp-service", "OTEL_PYTHON_LOGGING_AUTO_INSTRUMENTATION_ENABLED": "True", }, ) @patch("opentelemetry.sdk._configuration._init_tracing") @patch("opentelemetry.sdk._configuration._init_logging") def test_logging_init_enable_env(self, logging_mock, tracing_mock): with self.assertLogs(level=WARNING): _initialize_components(auto_instrumentation_version="auto-version") logging_mock.assert_called_once_with( mock.ANY, mock.ANY, True, exporter_args_map=None ) self.assertEqual(tracing_mock.call_count, 1) @patch.dict( environ, { "OTEL_RESOURCE_ATTRIBUTES": "service.name=otlp-service", "OTEL_PYTHON_LOGGING_AUTO_INSTRUMENTATION_ENABLED": "True", }, ) @patch("opentelemetry.sdk._configuration._init_tracing") @patch("opentelemetry.sdk._configuration._init_logging") @patch("opentelemetry.sdk._configuration._init_metrics") def test_initialize_components_resource( self, metrics_mock, logging_mock, tracing_mock ): _initialize_components(auto_instrumentation_version="auto-version") self.assertEqual(logging_mock.call_count, 1) self.assertEqual(tracing_mock.call_count, 1) self.assertEqual(metrics_mock.call_count, 1) _, args, _ = logging_mock.mock_calls[0] logging_resource = args[1] _, _, kwargs = tracing_mock.mock_calls[0] tracing_resource = kwargs["resource"] _, args, _ = metrics_mock.mock_calls[0] metrics_resource = args[1] self.assertEqual(logging_resource, tracing_resource) self.assertEqual(logging_resource, metrics_resource) self.assertEqual(tracing_resource, metrics_resource) @patch.dict( environ, { "OTEL_TRACES_EXPORTER": _EXPORTER_OTLP, "OTEL_METRICS_EXPORTER": _EXPORTER_OTLP_PROTO_GRPC, "OTEL_LOGS_EXPORTER": _EXPORTER_OTLP_PROTO_HTTP, }, ) @patch.dict( environ, { "OTEL_RESOURCE_ATTRIBUTES": "service.name=otlp-service, custom.key.1=env-value", "OTEL_PYTHON_LOGGING_AUTO_INSTRUMENTATION_ENABLED": "False", }, ) @patch("opentelemetry.sdk._configuration.Resource") @patch("opentelemetry.sdk._configuration._import_exporters") @patch("opentelemetry.sdk._configuration._get_exporter_names") @patch("opentelemetry.sdk._configuration._init_tracing") @patch("opentelemetry.sdk._configuration._init_logging") @patch("opentelemetry.sdk._configuration._init_metrics") def test_initialize_components_kwargs( self, metrics_mock, logging_mock, tracing_mock, exporter_names_mock, import_exporters_mock, resource_mock, ): exporter_names_mock.return_value = [ "env_var_exporter_1", "env_var_exporter_2", ] import_exporters_mock.return_value = ( "TEST_SPAN_EXPORTERS_DICT", "TEST_METRICS_EXPORTERS_DICT", "TEST_LOG_EXPORTERS_DICT", ) resource_mock.create.return_value = "TEST_RESOURCE" kwargs = { "auto_instrumentation_version": "auto-version", "trace_exporter_names": ["custom_span_exporter"], "metric_exporter_names": ["custom_metric_exporter"], "log_exporter_names": ["custom_log_exporter"], "sampler": "TEST_SAMPLER", "resource_attributes": { "custom.key.1": "pass-in-value-1", "custom.key.2": "pass-in-value-2", }, "id_generator": "TEST_GENERATOR", "setup_logging_handler": True, "exporter_args_map": {1: {"compression": "gzip"}}, } _initialize_components(**kwargs) import_exporters_mock.assert_called_once_with( [ "custom_span_exporter", "env_var_exporter_1", "env_var_exporter_2", ], [ "custom_metric_exporter", "env_var_exporter_1", "env_var_exporter_2", ], [ "custom_log_exporter", "env_var_exporter_1", "env_var_exporter_2", ], ) resource_mock.create.assert_called_once_with( { "telemetry.auto.version": "auto-version", "custom.key.1": "pass-in-value-1", "custom.key.2": "pass-in-value-2", } ) # Resource is checked separates tracing_mock.assert_called_once_with( exporters="TEST_SPAN_EXPORTERS_DICT", id_generator="TEST_GENERATOR", sampler="TEST_SAMPLER", resource="TEST_RESOURCE", exporter_args_map={1: {"compression": "gzip"}}, ) metrics_mock.assert_called_once_with( "TEST_METRICS_EXPORTERS_DICT", "TEST_RESOURCE", exporter_args_map={1: {"compression": "gzip"}}, ) logging_mock.assert_called_once_with( "TEST_LOG_EXPORTERS_DICT", "TEST_RESOURCE", True, exporter_args_map={1: {"compression": "gzip"}}, ) def test_basicConfig_works_with_otel_handler(self): with ResetGlobalLoggingState(): _init_logging( {"otlp": DummyOTLPLogExporter}, Resource.create({}), setup_logging_handler=True, ) logging.basicConfig(level=logging.INFO) root_logger = logging.getLogger() stream_handlers = [ h for h in root_logger.handlers if isinstance(h, logging.StreamHandler) ] self.assertEqual( len(stream_handlers), 1, "basicConfig should add a StreamHandler even when OTel handler exists", ) def test_basicConfig_preserves_otel_handler(self): with ResetGlobalLoggingState(): _init_logging( {"otlp": DummyOTLPLogExporter}, Resource.create({}), setup_logging_handler=True, ) root_logger = logging.getLogger() self.assertEqual( len(root_logger.handlers), 1, "Should be exactly one OpenTelemetry LoggingHandler", ) handler = root_logger.handlers[0] self.assertIsInstance(handler, LoggingHandler) logging.basicConfig() self.assertGreater(len(root_logger.handlers), 1) logging_handlers = [ h for h in root_logger.handlers if isinstance(h, LoggingHandler) ] self.assertEqual( len(logging_handlers), 1, "Should still have exactly one OpenTelemetry LoggingHandler", ) def test_dictConfig_preserves_otel_handler(self): with ResetGlobalLoggingState(): _init_logging( {"otlp": DummyOTLPLogExporter}, Resource.create({}), setup_logging_handler=True, ) root = logging.getLogger() self.assertEqual( len(root.handlers), 1, "Should be exactly one OpenTelemetry LoggingHandler", ) logging.config.dictConfig( { "version": 1, "disable_existing_loggers": False, # If this is True all loggers are disabled. Many unit tests assert loggers emit logs. "handlers": { "console": { "class": "logging.StreamHandler", "level": "DEBUG", "stream": "ext://sys.stdout", }, }, "loggers": { "": { # root logger "handlers": ["console"], }, }, } ) self.assertEqual(len(root.handlers), 2) logging_handlers = [ h for h in root.handlers if isinstance(h, LoggingHandler) ] self.assertEqual( len(logging_handlers), 1, "Should still have exactly one OpenTelemetry LoggingHandler", ) class TestMetricsInit(TestCase): def setUp(self): self.metric_reader_patch = patch( "opentelemetry.sdk._configuration.PeriodicExportingMetricReader", DummyMetricReader, ) self.provider_patch = patch( "opentelemetry.sdk._configuration.MeterProvider", DummyMeterProvider, ) self.set_provider_patch = patch( "opentelemetry.sdk._configuration.set_meter_provider" ) self.metric_reader_mock = self.metric_reader_patch.start() self.provider_mock = self.provider_patch.start() self.set_provider_mock = self.set_provider_patch.start() def tearDown(self): self.metric_reader_patch.stop() self.set_provider_patch.stop() self.provider_patch.stop() def test_metrics_init_empty(self): auto_resource = Resource.create( { "telemetry.auto.version": "auto-version", } ) _init_metrics({}, resource=auto_resource) self.assertEqual(self.set_provider_mock.call_count, 1) provider = self.set_provider_mock.call_args[0][0] self.assertIsInstance(provider, DummyMeterProvider) self.assertIsInstance(provider._sdk_config.resource, Resource) self.assertEqual( provider._sdk_config.resource.attributes.get( "telemetry.auto.version" ), "auto-version", ) @patch.dict( environ, {"OTEL_RESOURCE_ATTRIBUTES": "service.name=otlp-service"}, ) def test_metrics_init_exporter(self): resource = Resource.create({}) _init_metrics({"otlp": DummyOTLPMetricExporter}, resource=resource) self.assertEqual(self.set_provider_mock.call_count, 1) provider = self.set_provider_mock.call_args[0][0] self.assertIsInstance(provider, DummyMeterProvider) self.assertIsInstance(provider._sdk_config.resource, Resource) self.assertEqual( provider._sdk_config.resource.attributes.get("service.name"), "otlp-service", ) reader = provider._sdk_config.metric_readers[0] self.assertIsInstance(reader, DummyMetricReader) self.assertIsInstance(reader.exporter, DummyOTLPMetricExporter) def test_metrics_init_pull_exporter(self): resource = Resource.create({}) _init_metrics( {"dummy_metric_reader": DummyMetricReaderPullExporter}, resource=resource, ) self.assertEqual(self.set_provider_mock.call_count, 1) provider = self.set_provider_mock.call_args[0][0] self.assertIsInstance(provider, DummyMeterProvider) reader = provider._sdk_config.metric_readers[0] self.assertIsInstance(reader, DummyMetricReaderPullExporter) def test_metrics_init_exporter_uses_exporter_args_map(self): resource = Resource.create({}) _init_metrics( {"otlp": DummyOTLPMetricExporter}, resource=resource, exporter_args_map={ DummyOTLPMetricExporter: {"compression": "gzip"}, DummyMetricReaderPullExporter: {"compression": "no"}, }, ) provider = self.set_provider_mock.call_args[0][0] reader = provider._sdk_config.metric_readers[0] self.assertEqual(reader.exporter.compression, "gzip") class TestExporterNames(TestCase): @patch.dict( environ, { "OTEL_TRACES_EXPORTER": _EXPORTER_OTLP, "OTEL_METRICS_EXPORTER": _EXPORTER_OTLP_PROTO_GRPC, "OTEL_LOGS_EXPORTER": _EXPORTER_OTLP_PROTO_HTTP, }, ) def test_otlp_exporter(self): self.assertEqual( _get_exporter_names("traces"), [_EXPORTER_OTLP_PROTO_GRPC] ) self.assertEqual( _get_exporter_names("metrics"), [_EXPORTER_OTLP_PROTO_GRPC] ) self.assertEqual( _get_exporter_names("logs"), [_EXPORTER_OTLP_PROTO_HTTP] ) @patch.dict( environ, { "OTEL_TRACES_EXPORTER": _EXPORTER_OTLP, "OTEL_METRICS_EXPORTER": _EXPORTER_OTLP, "OTEL_EXPORTER_OTLP_PROTOCOL": "http/protobuf", "OTEL_EXPORTER_OTLP_METRICS_PROTOCOL": "grpc", }, ) def test_otlp_custom_exporter(self): self.assertEqual( _get_exporter_names("traces"), [_EXPORTER_OTLP_PROTO_HTTP] ) self.assertEqual( _get_exporter_names("metrics"), [_EXPORTER_OTLP_PROTO_GRPC] ) @patch.dict( environ, { "OTEL_TRACES_EXPORTER": _EXPORTER_OTLP_PROTO_HTTP, "OTEL_METRICS_EXPORTER": _EXPORTER_OTLP_PROTO_GRPC, "OTEL_EXPORTER_OTLP_PROTOCOL": "grpc", "OTEL_EXPORTER_OTLP_METRICS_PROTOCOL": "http/protobuf", }, ) def test_otlp_exporter_conflict(self): # Verify that OTEL_*_EXPORTER is used, and a warning is logged with self.assertLogs(level="WARNING") as logs_context: self.assertEqual( _get_exporter_names("traces"), [_EXPORTER_OTLP_PROTO_HTTP] ) assert len(logs_context.output) == 1 with self.assertLogs(level="WARNING") as logs_context: self.assertEqual( _get_exporter_names("metrics"), [_EXPORTER_OTLP_PROTO_GRPC] ) assert len(logs_context.output) == 1 @patch.dict(environ, {"OTEL_TRACES_EXPORTER": "zipkin"}) def test_multiple_exporters(self): self.assertEqual(sorted(_get_exporter_names("traces")), ["zipkin"]) @patch.dict(environ, {"OTEL_TRACES_EXPORTER": "none"}) def test_none_exporters(self): self.assertEqual(sorted(_get_exporter_names("traces")), []) def test_no_exporters(self): self.assertEqual(sorted(_get_exporter_names("traces")), []) @patch.dict(environ, {"OTEL_TRACES_EXPORTER": ""}) def test_empty_exporters(self): self.assertEqual(sorted(_get_exporter_names("traces")), []) class TestImportExporters(TestCase): def test_console_exporters(self): trace_exporters, metric_exporterts, logs_exporters = _import_exporters( ["console"], ["console"], ["console"] ) self.assertEqual( trace_exporters["console"].__class__, ConsoleSpanExporter.__class__ ) self.assertEqual( logs_exporters["console"].__class__, ConsoleLogRecordExporter.__class__, ) self.assertEqual( metric_exporterts["console"].__class__, ConsoleMetricExporter.__class__, ) @patch( "opentelemetry.sdk._configuration.entry_points", ) def test_metric_pull_exporter(self, mock_entry_points: Mock): def mock_entry_points_impl(group, name): if name == "dummy_pull_exporter": return [ IterEntryPoint( name=name, class_type=DummyMetricReaderPullExporter ) ] return [] mock_entry_points.side_effect = mock_entry_points_impl _, metric_exporters, _ = _import_exporters( [], ["dummy_pull_exporter"], [] ) self.assertIs( metric_exporters["dummy_pull_exporter"], DummyMetricReaderPullExporter, ) class TestImportConfigComponents(TestCase): @patch( "opentelemetry.sdk._configuration.entry_points", **{"side_effect": KeyError}, ) def test__import_config_components_missing_entry_point( self, mock_entry_points ): with raises(RuntimeError) as error: _import_config_components(["a", "b", "c"], "name") self.assertEqual( str(error.value), "Requested entry point 'name' not found" ) @patch( "opentelemetry.sdk._configuration.entry_points", **{"side_effect": StopIteration}, ) def test__import_config_components_missing_component( self, mock_entry_points ): with raises(RuntimeError) as error: _import_config_components(["a", "b", "c"], "name") self.assertEqual( str(error.value), "Requested component 'a' not found in entry point 'name'", ) class TestConfigurator(TestCase): class CustomConfigurator(_OTelSDKConfigurator): def _configure(self, **kwargs): kwargs["sampler"] = "TEST_SAMPLER" super()._configure(**kwargs) @patch("opentelemetry.sdk._configuration._initialize_components") def test_custom_configurator(self, mock_init_comp): custom_configurator = TestConfigurator.CustomConfigurator() custom_configurator._configure( auto_instrumentation_version="TEST_VERSION2" ) kwargs = { "auto_instrumentation_version": "TEST_VERSION2", "sampler": "TEST_SAMPLER", } mock_init_comp.assert_called_once_with(**kwargs) # Any test that calls _init_logging with setup_logging_handler=True # should call _init_logging within this context manager, to # ensure the global logging state is reset after the test. class ResetGlobalLoggingState: def __init__(self): self.original_basic_config = logging.basicConfig self.original_dict_config = logging.config.dictConfig self.original_file_config = logging.config.fileConfig self.root_logger = getLogger() self.original_handlers = None def __enter__(self): self.original_handlers = self.root_logger.handlers[:] self.root_logger.handlers = [] return self def __exit__(self, exc_type, exc_val, exc_tb): self.root_logger.handlers = [] for handler in self.original_handlers: self.root_logger.addHandler(handler) logging.basicConfig = self.original_basic_config logging.config.dictConfig = self.original_dict_config logging.config.fileConfig = self.original_file_config class TestClearLoggingHandlers(TestCase): def test_preserves_handlers(self): root_logger = getLogger() initial_handlers = root_logger.handlers[:] test_handler = logging.StreamHandler() root_logger.addHandler(test_handler) expected_handlers = initial_handlers + [test_handler] with ResetGlobalLoggingState(): self.assertEqual(len(root_logger.handlers), 0) temp_handler = logging.StreamHandler() root_logger.addHandler(temp_handler) self.assertEqual(len(root_logger.handlers), len(expected_handlers)) for h1, h2 in zip(root_logger.handlers, expected_handlers): self.assertIs(h1, h2) root_logger.removeHandler(test_handler) def test_preserves_original_logging_fns(self): def f(x): print("f") with ResetGlobalLoggingState(): logging.basicConfig = f logging.config.dictConfig = f logging.config.fileConfig = f self.assertEqual(logging.config.dictConfig.__name__, "dictConfig") self.assertEqual(logging.basicConfig.__name__, "basicConfig") self.assertEqual(logging.config.fileConfig.__name__, "fileConfig") python-opentelemetry-1.39.1/opentelemetry-sdk/tests/test_util.py000066400000000000000000000107001511654350100252220ustar00rootroot00000000000000# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from opentelemetry.sdk.util import BoundedList # pylint: disable=unsubscriptable-object class TestBoundedList(unittest.TestCase): base = [52, 36, 53, 29, 54, 99, 56, 48, 22, 35, 21, 65, 10, 95, 42, 60] def test_raises(self): """Test corner cases - negative list size - access out of range indexes """ with self.assertRaises(ValueError): BoundedList(-1) blist = BoundedList(4) blist.append(37) blist.append(13) with self.assertRaises(IndexError): _ = blist[2] with self.assertRaises(IndexError): _ = blist[4] with self.assertRaises(IndexError): _ = blist[-3] def test_from_seq(self): list_len = len(self.base) base_copy = list(self.base) blist = BoundedList.from_seq(list_len, base_copy) self.assertEqual(len(blist), list_len) # modify base_copy and test that blist is not changed for idx in range(list_len): base_copy[idx] = idx * base_copy[idx] for idx in range(list_len): self.assertEqual(blist[idx], self.base[idx]) # test that iter yields the correct number of elements self.assertEqual(len(tuple(blist)), list_len) # sequence too big blist = BoundedList.from_seq(list_len // 2, base_copy) self.assertEqual(len(blist), list_len // 2) self.assertEqual(blist.dropped, list_len - (list_len // 2)) def test_append_no_drop(self): """Append max capacity elements to the list without dropping elements.""" # create empty list list_len = len(self.base) blist = BoundedList(list_len) self.assertEqual(len(blist), 0) # fill list for item in self.base: blist.append(item) self.assertEqual(len(blist), list_len) self.assertEqual(blist.dropped, 0) for idx in range(list_len): self.assertEqual(blist[idx], self.base[idx]) # test __iter__ in BoundedList for idx, val in enumerate(blist): self.assertEqual(val, self.base[idx]) def test_append_drop(self): """Append more than max capacity elements and test that oldest ones are dropped.""" list_len = len(self.base) # create full BoundedList blist = BoundedList.from_seq(list_len, self.base) # try to append more items for val in self.base: # should drop the element without raising exceptions blist.append(2 * val) self.assertEqual(len(blist), list_len) self.assertEqual(blist.dropped, list_len) # test that new elements are in the list for idx in range(list_len): self.assertEqual(blist[idx], 2 * self.base[idx]) def test_extend_no_drop(self): # create empty list list_len = len(self.base) blist = BoundedList(list_len) self.assertEqual(len(blist), 0) # fill list blist.extend(self.base) self.assertEqual(len(blist), list_len) self.assertEqual(blist.dropped, 0) for idx in range(list_len): self.assertEqual(blist[idx], self.base[idx]) # test __iter__ in BoundedList for idx, val in enumerate(blist): self.assertEqual(val, self.base[idx]) def test_extend_drop(self): list_len = len(self.base) # create full BoundedList blist = BoundedList.from_seq(list_len, self.base) other_list = [13, 37, 51, 91] # try to extend with more elements blist.extend(other_list) self.assertEqual(len(blist), list_len) self.assertEqual(blist.dropped, len(other_list)) def test_no_limit(self): blist = BoundedList(maxlen=None) for num in range(100): blist.append(num) for num in range(100): self.assertEqual(blist[num], num) python-opentelemetry-1.39.1/opentelemetry-sdk/tests/trace/000077500000000000000000000000001511654350100237345ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-sdk/tests/trace/__init__.py000066400000000000000000000011101511654350100260360ustar00rootroot00000000000000# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. python-opentelemetry-1.39.1/opentelemetry-sdk/tests/trace/composite_sampler/000077500000000000000000000000001511654350100274615ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-sdk/tests/trace/composite_sampler/__init__.py000066400000000000000000000000001511654350100315600ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-sdk/tests/trace/composite_sampler/test_always_off.py000066400000000000000000000030521511654350100332240ustar00rootroot00000000000000# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from opentelemetry.sdk.trace._sampling_experimental import ( composable_always_off, composite_sampler, ) from opentelemetry.sdk.trace.id_generator import RandomIdGenerator from opentelemetry.sdk.trace.sampling import Decision def test_description(): assert composable_always_off().get_description() == "ComposableAlwaysOff" def test_threshold(): assert ( composable_always_off() .sampling_intent(None, "test", None, {}, None, None) .threshold == -1 ) def test_sampling(): sampler = composite_sampler(composable_always_off()) num_sampled = 0 for _ in range(10000): res = sampler.should_sample( None, RandomIdGenerator().generate_trace_id(), "span", None, None, None, None, ) if res.decision == Decision.RECORD_AND_SAMPLE: num_sampled += 1 assert not res.trace_state assert num_sampled == 0 python-opentelemetry-1.39.1/opentelemetry-sdk/tests/trace/composite_sampler/test_always_on.py000066400000000000000000000031571511654350100330740ustar00rootroot00000000000000# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from opentelemetry.sdk.trace._sampling_experimental import ( composable_always_on, composite_sampler, ) from opentelemetry.sdk.trace.id_generator import RandomIdGenerator from opentelemetry.sdk.trace.sampling import Decision def test_description(): assert composable_always_on().get_description() == "ComposableAlwaysOn" def test_threshold(): assert ( composable_always_on() .sampling_intent(None, "test", None, {}, None, None) .threshold == 0 ) def test_sampling(): sampler = composite_sampler(composable_always_on()) num_sampled = 0 for _ in range(10000): res = sampler.should_sample( None, RandomIdGenerator().generate_trace_id(), "span", None, None, None, None, ) if res.decision == Decision.RECORD_AND_SAMPLE: num_sampled += 1 assert res.trace_state is not None assert res.trace_state.get("ot", "") == "th:0" assert num_sampled == 10000 python-opentelemetry-1.39.1/opentelemetry-sdk/tests/trace/composite_sampler/test_sampler.py000066400000000000000000000137361511654350100325470ustar00rootroot00000000000000# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import annotations from dataclasses import dataclass import pytest from pytest import param as p from opentelemetry.sdk.trace._sampling_experimental import ( ComposableSampler, composable_always_off, composable_always_on, composable_parent_threshold, composable_traceid_ratio_based, composite_sampler, ) from opentelemetry.sdk.trace._sampling_experimental._trace_state import ( OtelTraceState, ) from opentelemetry.sdk.trace._sampling_experimental._util import ( INVALID_RANDOM_VALUE, INVALID_THRESHOLD, ) from opentelemetry.sdk.trace.sampling import Decision from opentelemetry.trace import ( NonRecordingSpan, SpanContext, TraceFlags, TraceState, set_span_in_context, ) TRACE_ID = int("00112233445566778800000000000000", 16) SPAN_ID = int("0123456789abcdef", 16) @dataclass class Input: sampler: ComposableSampler sampled: bool threshold: int | None random_value: int | None @dataclass class Output: sampled: bool threshold: int random_value: int @pytest.mark.parametrize( "input,output", ( p( Input( sampler=composable_always_on(), sampled=True, threshold=None, random_value=None, ), Output( sampled=True, threshold=0, random_value=INVALID_RANDOM_VALUE ), id="min threshold no parent random value", ), p( Input( sampler=composable_always_on(), sampled=True, threshold=None, random_value=0x7F99AA40C02744, ), Output(sampled=True, threshold=0, random_value=0x7F99AA40C02744), id="min threshold with parent random value", ), p( Input( sampler=composable_always_off(), sampled=True, threshold=None, random_value=None, ), Output( sampled=False, threshold=INVALID_THRESHOLD, random_value=INVALID_RANDOM_VALUE, ), id="max threshold", ), p( Input( sampler=composable_parent_threshold(composable_always_on()), sampled=False, threshold=0x7F99AA40C02744, random_value=0x7F99AA40C02744, ), Output( sampled=True, threshold=0x7F99AA40C02744, random_value=0x7F99AA40C02744, ), id="parent based in consistent mode", ), p( Input( sampler=composable_parent_threshold(composable_always_on()), sampled=True, threshold=None, random_value=None, ), Output( sampled=True, threshold=INVALID_THRESHOLD, random_value=INVALID_RANDOM_VALUE, ), id="parent based in legacy mode", ), p( Input( sampler=composable_traceid_ratio_based(0.5), sampled=True, threshold=None, random_value=0x7FFFFFFFFFFFFF, ), Output( sampled=False, threshold=INVALID_THRESHOLD, random_value=0x7FFFFFFFFFFFFF, ), id="half threshold not sampled", ), p( Input( sampler=composable_traceid_ratio_based(0.5), sampled=False, threshold=None, random_value=0x80000000000000, ), Output( sampled=True, threshold=0x80000000000000, random_value=0x80000000000000, ), id="half threshold sampled", ), p( Input( sampler=composable_traceid_ratio_based(1.0), sampled=False, threshold=0x80000000000000, random_value=0x80000000000000, ), Output(sampled=True, threshold=0, random_value=0x80000000000000), id="parent violating invariant", ), ), ) def test_sample(input: Input, output: Output): parent_state = OtelTraceState.invalid() if input.threshold is not None: parent_state.threshold = input.threshold if input.random_value is not None: parent_state.random_value = input.random_value parent_state_str = parent_state.serialize() parent_trace_state = ( TraceState((("ot", parent_state_str),)) if parent_state_str else None ) flags = ( TraceFlags(TraceFlags.SAMPLED) if input.sampled else TraceFlags.get_default() ) parent_span_context = SpanContext( TRACE_ID, SPAN_ID, False, flags, parent_trace_state ) parent_span = NonRecordingSpan(parent_span_context) parent_context = set_span_in_context(parent_span) result = composite_sampler(input.sampler).should_sample( parent_context, TRACE_ID, "name", trace_state=parent_trace_state ) decision = Decision.RECORD_AND_SAMPLE if output.sampled else Decision.DROP state = OtelTraceState.parse(result.trace_state) assert result.decision == decision assert state.threshold == output.threshold assert state.random_value == output.random_value python-opentelemetry-1.39.1/opentelemetry-sdk/tests/trace/composite_sampler/test_traceid_ratio.py000066400000000000000000000046331511654350100337110ustar00rootroot00000000000000# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import pytest from opentelemetry.sdk.trace._sampling_experimental import ( composable_traceid_ratio_based, composite_sampler, ) from opentelemetry.sdk.trace._sampling_experimental._trace_state import ( OtelTraceState, ) from opentelemetry.sdk.trace.id_generator import RandomIdGenerator from opentelemetry.sdk.trace.sampling import Decision @pytest.mark.parametrize( ("ratio", "threshold"), ( (1.0, "0"), (0.5, "8"), (0.25, "c"), (1e-300, "max"), (0, "max"), ), ) def test_description(ratio: float, threshold: str): assert ( composable_traceid_ratio_based(ratio).get_description() == f"ComposableTraceIDRatioBased{{threshold={threshold}, ratio={ratio}}}" ) @pytest.mark.parametrize( ("ratio", "threshold"), ( (1.0, 0), (0.5, 36028797018963968), (0.25, 54043195528445952), (0.125, 63050394783186944), (0.0, 72057594037927936), (0.45, 39631676720860364), (0.2, 57646075230342348), (0.13, 62690106812997304), (0.05, 68454714336031539), ), ) def test_sampling(ratio: float, threshold: int): sampler = composite_sampler(composable_traceid_ratio_based(ratio)) num_sampled = 0 for _ in range(10000): res = sampler.should_sample( None, RandomIdGenerator().generate_trace_id(), "span", None, None, None, None, ) if res.decision == Decision.RECORD_AND_SAMPLE: num_sampled += 1 assert res.trace_state is not None otts = OtelTraceState.parse(res.trace_state) assert otts.threshold == threshold assert otts.random_value == -1 expected_num_sampled = int(10000 * ratio) assert abs(num_sampled - expected_num_sampled) < 50 python-opentelemetry-1.39.1/opentelemetry-sdk/tests/trace/composite_sampler/test_tracestate.py000066400000000000000000000045261511654350100332400ustar00rootroot00000000000000# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import pytest from opentelemetry.sdk.trace._sampling_experimental._trace_state import ( OtelTraceState, ) from opentelemetry.trace import TraceState @pytest.mark.parametrize( "input_str,output_str", ( ("a", "a"), ("#", "#"), ("rv:1234567890abcd", "rv:1234567890abcd"), ("rv:01020304050607", "rv:01020304050607"), ("rv:1234567890abcde", ""), ("th:1234567890abcd", "th:1234567890abcd"), ("th:1234567890abcd", "th:1234567890abcd"), ("th:10000000000000", "th:1"), ("th:1234500000000", "th:12345"), ("th:0", "th:0"), ("th:100000000000000", ""), ("th:1234567890abcde", ""), pytest.param( f"a:{'X' * 214};rv:1234567890abcd;th:1234567890abcd;x:3", f"th:1234567890abcd;rv:1234567890abcd;a:{'X' * 214};x:3", id="long", ), ("th:x", ""), ("th:100000000000000", ""), ("th:10000000000000", "th:1"), ("th:1000000000000", "th:1"), ("th:100000000000", "th:1"), ("th:10000000000", "th:1"), ("th:1000000000", "th:1"), ("th:100000000", "th:1"), ("th:10000000", "th:1"), ("th:1000000", "th:1"), ("th:100000", "th:1"), ("th:10000", "th:1"), ("th:1000", "th:1"), ("th:100", "th:1"), ("th:10", "th:1"), ("th:1", "th:1"), ("th:10000000000001", "th:10000000000001"), ("th:10000000000010", "th:1000000000001"), ("rv:x", ""), ("rv:100000000000000", ""), ("rv:10000000000000", "rv:10000000000000"), ("rv:1000000000000", ""), ), ) def test_marshal(input_str: str, output_str: str): state = OtelTraceState.parse(TraceState((("ot", input_str),))).serialize() assert state == output_str python-opentelemetry-1.39.1/opentelemetry-sdk/tests/trace/export/000077500000000000000000000000001511654350100252555ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-sdk/tests/trace/export/__init__.py000066400000000000000000000011101511654350100273570ustar00rootroot00000000000000# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. python-opentelemetry-1.39.1/opentelemetry-sdk/tests/trace/export/test_export.py000066400000000000000000000275471511654350100302260ustar00rootroot00000000000000# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import threading import time import unittest from unittest import mock from opentelemetry import trace as trace_api from opentelemetry.context import Context from opentelemetry.sdk import trace from opentelemetry.sdk.environment_variables import ( OTEL_BSP_EXPORT_TIMEOUT, OTEL_BSP_MAX_EXPORT_BATCH_SIZE, OTEL_BSP_MAX_QUEUE_SIZE, OTEL_BSP_SCHEDULE_DELAY, ) from opentelemetry.sdk.trace import export from opentelemetry.sdk.trace.export import logger # pylint: disable=protected-access class MySpanExporter(export.SpanExporter): """Very simple span exporter used for testing.""" def __init__( self, destination, max_export_batch_size=None, export_timeout_millis=0.0, export_event: threading.Event = None, ): self.destination = destination self.max_export_batch_size = max_export_batch_size self.is_shutdown = False self.export_timeout = export_timeout_millis / 1e3 self.export_event = export_event def export(self, spans: trace.Span) -> export.SpanExportResult: if ( self.max_export_batch_size is not None and len(spans) > self.max_export_batch_size ): raise ValueError("Batch is too big") time.sleep(self.export_timeout) self.destination.extend(span.name for span in spans) if self.export_event: self.export_event.set() return export.SpanExportResult.SUCCESS def shutdown(self): self.is_shutdown = True class TestSimpleSpanProcessor(unittest.TestCase): def test_simple_span_processor(self): tracer_provider = trace.TracerProvider() tracer = tracer_provider.get_tracer(__name__) spans_names_list = [] my_exporter = MySpanExporter(destination=spans_names_list) span_processor = export.SimpleSpanProcessor(my_exporter) tracer_provider.add_span_processor(span_processor) with tracer.start_as_current_span("foo"): with tracer.start_as_current_span("bar"): with tracer.start_as_current_span("xxx"): pass self.assertListEqual(["xxx", "bar", "foo"], spans_names_list) span_processor.shutdown() self.assertTrue(my_exporter.is_shutdown) def test_simple_span_processor_no_context(self): """Check that we process spans that are never made active. SpanProcessors should act on a span's start and end events whether or not it is ever the active span. """ tracer_provider = trace.TracerProvider() tracer = tracer_provider.get_tracer(__name__) spans_names_list = [] my_exporter = MySpanExporter(destination=spans_names_list) span_processor = export.SimpleSpanProcessor(my_exporter) tracer_provider.add_span_processor(span_processor) with tracer.start_span("foo"): with tracer.start_span("bar"): with tracer.start_span("xxx"): pass self.assertListEqual(["xxx", "bar", "foo"], spans_names_list) def test_on_start_accepts_context(self): # pylint: disable=no-self-use tracer_provider = trace.TracerProvider() tracer = tracer_provider.get_tracer(__name__) exporter = MySpanExporter([]) span_processor = mock.Mock(wraps=export.SimpleSpanProcessor(exporter)) tracer_provider.add_span_processor(span_processor) context = Context() span = tracer.start_span("foo", context=context) span_processor.on_start.assert_called_once_with( span, parent_context=context ) def test_simple_span_processor_not_sampled(self): tracer_provider = trace.TracerProvider( sampler=trace.sampling.ALWAYS_OFF ) tracer = tracer_provider.get_tracer(__name__) spans_names_list = [] my_exporter = MySpanExporter(destination=spans_names_list) span_processor = export.SimpleSpanProcessor(my_exporter) tracer_provider.add_span_processor(span_processor) with tracer.start_as_current_span("foo"): with tracer.start_as_current_span("bar"): with tracer.start_as_current_span("xxx"): pass self.assertListEqual([], spans_names_list) # Many more test cases for the BatchSpanProcessor exist under # opentelemetry-sdk/tests/shared_internal/test_batch_processor.py. # Important: make sure to call .shutdown() on the BatchSpanProcessor # before the end of the test, otherwise the worker thread will continue # to run after the end of the test. class TestBatchSpanProcessor(unittest.TestCase): def test_get_span_exporter(self): exporter = MySpanExporter(destination=[]) batch_span_processor = export.BatchSpanProcessor(exporter) self.assertEqual(exporter, batch_span_processor.span_exporter) @mock.patch.dict( "os.environ", { OTEL_BSP_MAX_QUEUE_SIZE: "10", OTEL_BSP_SCHEDULE_DELAY: "2", OTEL_BSP_MAX_EXPORT_BATCH_SIZE: "3", OTEL_BSP_EXPORT_TIMEOUT: "4", }, ) def test_args_env_var(self): batch_span_processor = export.BatchSpanProcessor( MySpanExporter(destination=[]) ) self.assertEqual( batch_span_processor._batch_processor._max_queue_size, 10 ) self.assertEqual( batch_span_processor._batch_processor._schedule_delay_millis, 2 ) self.assertEqual( batch_span_processor._batch_processor._max_export_batch_size, 3 ) self.assertEqual( batch_span_processor._batch_processor._export_timeout_millis, 4 ) batch_span_processor.shutdown() def test_args_env_var_defaults(self): batch_span_processor = export.BatchSpanProcessor( MySpanExporter(destination=[]) ) self.assertEqual( batch_span_processor._batch_processor._max_queue_size, 2048 ) self.assertEqual( batch_span_processor._batch_processor._schedule_delay_millis, 5000 ) self.assertEqual( batch_span_processor._batch_processor._max_export_batch_size, 512 ) self.assertEqual( batch_span_processor._batch_processor._export_timeout_millis, 30000 ) batch_span_processor.shutdown() @mock.patch.dict( "os.environ", { OTEL_BSP_MAX_QUEUE_SIZE: "a", OTEL_BSP_SCHEDULE_DELAY: " ", OTEL_BSP_MAX_EXPORT_BATCH_SIZE: "One", OTEL_BSP_EXPORT_TIMEOUT: "@", }, ) def test_args_env_var_value_error(self): logger.disabled = True batch_span_processor = export.BatchSpanProcessor( MySpanExporter(destination=[]) ) logger.disabled = False self.assertEqual( batch_span_processor._batch_processor._max_queue_size, 2048 ) self.assertEqual( batch_span_processor._batch_processor._schedule_delay_millis, 5000 ) self.assertEqual( batch_span_processor._batch_processor._max_export_batch_size, 512 ) self.assertEqual( batch_span_processor._batch_processor._export_timeout_millis, 30000 ) batch_span_processor.shutdown() def test_on_start_accepts_parent_context(self): # pylint: disable=no-self-use my_exporter = MySpanExporter(destination=[]) span_processor = mock.Mock( wraps=export.BatchSpanProcessor(my_exporter) ) tracer_provider = trace.TracerProvider() tracer_provider.add_span_processor(span_processor) tracer = tracer_provider.get_tracer(__name__) context = Context() span = tracer.start_span("foo", context=context) span_processor.on_start.assert_called_once_with( span, parent_context=context ) def test_batch_span_processor_not_sampled(self): tracer_provider = trace.TracerProvider( sampler=trace.sampling.ALWAYS_OFF ) tracer = tracer_provider.get_tracer(__name__) spans_names_list = [] my_exporter = MySpanExporter( destination=spans_names_list, max_export_batch_size=128 ) span_processor = export.BatchSpanProcessor( my_exporter, max_queue_size=256, max_export_batch_size=64, schedule_delay_millis=100, ) tracer_provider.add_span_processor(span_processor) with tracer.start_as_current_span("foo"): pass time.sleep(0.05) # give some time for the exporter to upload spans span_processor.force_flush() self.assertEqual(len(spans_names_list), 0) span_processor.shutdown() def test_batch_span_processor_parameters(self): # zero max_queue_size self.assertRaises( ValueError, export.BatchSpanProcessor, None, max_queue_size=0 ) # negative max_queue_size self.assertRaises( ValueError, export.BatchSpanProcessor, None, max_queue_size=-500, ) # zero schedule_delay_millis self.assertRaises( ValueError, export.BatchSpanProcessor, None, schedule_delay_millis=0, ) # negative schedule_delay_millis self.assertRaises( ValueError, export.BatchSpanProcessor, None, schedule_delay_millis=-500, ) # zero max_export_batch_size self.assertRaises( ValueError, export.BatchSpanProcessor, None, max_export_batch_size=0, ) # negative max_export_batch_size self.assertRaises( ValueError, export.BatchSpanProcessor, None, max_export_batch_size=-500, ) # max_export_batch_size > max_queue_size: self.assertRaises( ValueError, export.BatchSpanProcessor, None, max_queue_size=256, max_export_batch_size=512, ) class TestConsoleSpanExporter(unittest.TestCase): def test_export(self): # pylint: disable=no-self-use """Check that the console exporter prints spans.""" exporter = export.ConsoleSpanExporter() # Mocking stdout interferes with debugging and test reporting, mock on # the exporter instance instead. span = trace._Span("span name", trace_api.INVALID_SPAN_CONTEXT) with mock.patch.object(exporter, "out") as mock_stdout: exporter.export([span]) mock_stdout.write.assert_called_once_with(span.to_json() + os.linesep) self.assertEqual(mock_stdout.write.call_count, 1) self.assertEqual(mock_stdout.flush.call_count, 1) def test_export_custom(self): # pylint: disable=no-self-use """Check that console exporter uses custom io, formatter.""" mock_span_str = mock.Mock(str) def formatter(span): # pylint: disable=unused-argument return mock_span_str mock_stdout = mock.Mock() exporter = export.ConsoleSpanExporter( out=mock_stdout, formatter=formatter ) exporter.export([trace._Span("span name", mock.Mock())]) mock_stdout.write.assert_called_once_with(mock_span_str) python-opentelemetry-1.39.1/opentelemetry-sdk/tests/trace/export/test_in_memory_span_exporter.py000066400000000000000000000052761511654350100336470ustar00rootroot00000000000000# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from unittest import mock from opentelemetry import trace as trace_api from opentelemetry.sdk import trace from opentelemetry.sdk.trace import export from opentelemetry.sdk.trace.export.in_memory_span_exporter import ( InMemorySpanExporter, ) class TestInMemorySpanExporter(unittest.TestCase): def setUp(self): self.tracer_provider = trace.TracerProvider() self.tracer = self.tracer_provider.get_tracer(__name__) self.memory_exporter = InMemorySpanExporter() span_processor = export.SimpleSpanProcessor(self.memory_exporter) self.tracer_provider.add_span_processor(span_processor) self.exec_scenario() def exec_scenario(self): with self.tracer.start_as_current_span("foo"): with self.tracer.start_as_current_span("bar"): with self.tracer.start_as_current_span("xxx"): pass def test_get_finished_spans(self): span_list = self.memory_exporter.get_finished_spans() spans_names_list = [span.name for span in span_list] self.assertListEqual(["xxx", "bar", "foo"], spans_names_list) def test_clear(self): self.memory_exporter.clear() span_list = self.memory_exporter.get_finished_spans() self.assertEqual(len(span_list), 0) def test_shutdown(self): span_list = self.memory_exporter.get_finished_spans() self.assertEqual(len(span_list), 3) self.memory_exporter.shutdown() # after shutdown no new spans are accepted self.exec_scenario() span_list = self.memory_exporter.get_finished_spans() self.assertEqual(len(span_list), 3) def test_return_code(self): span = trace._Span("name", mock.Mock(spec=trace_api.SpanContext)) span_list = (span,) memory_exporter = InMemorySpanExporter() ret = memory_exporter.export(span_list) self.assertEqual(ret, export.SpanExportResult.SUCCESS) memory_exporter.shutdown() # after shutdown export should fail ret = memory_exporter.export(span_list) self.assertEqual(ret, export.SpanExportResult.FAILURE) python-opentelemetry-1.39.1/opentelemetry-sdk/tests/trace/propagation/000077500000000000000000000000001511654350100262575ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-sdk/tests/trace/propagation/__init__.py000066400000000000000000000000001511654350100303560ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-sdk/tests/trace/test_globals.py000066400000000000000000000016031511654350100267700ustar00rootroot00000000000000# type:ignore import unittest from logging import WARNING from opentelemetry import trace from opentelemetry.sdk.trace import TracerProvider # type:ignore class TestGlobals(unittest.TestCase): def test_tracer_provider_override_warning(self): """trace.set_tracer_provider should throw a warning when overridden""" trace.set_tracer_provider(TracerProvider()) tracer_provider = trace.get_tracer_provider() with self.assertLogs(level=WARNING) as test: trace.set_tracer_provider(TracerProvider()) self.assertEqual( test.output, [ ( "WARNING:opentelemetry.trace:Overriding of current " "TracerProvider is not allowed" ) ], ) self.assertIs(tracer_provider, trace.get_tracer_provider()) python-opentelemetry-1.39.1/opentelemetry-sdk/tests/trace/test_implementation.py000066400000000000000000000037271511654350100304030ustar00rootroot00000000000000# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from opentelemetry.sdk import trace from opentelemetry.trace import INVALID_SPAN, INVALID_SPAN_CONTEXT class TestTracerImplementation(unittest.TestCase): """ This test is in place to ensure the SDK implementation of the API is returning values that are valid. The same tests have been added to the API with different expected results. See issue for more details: https://github.com/open-telemetry/opentelemetry-python/issues/142 """ def test_tracer(self): tracer = trace.TracerProvider().get_tracer(__name__) with tracer.start_span("test") as span: self.assertNotEqual(span.get_span_context(), INVALID_SPAN_CONTEXT) self.assertNotEqual(span, INVALID_SPAN) self.assertIs(span.is_recording(), True) with tracer.start_span("test2") as span2: self.assertNotEqual( span2.get_span_context(), INVALID_SPAN_CONTEXT ) self.assertNotEqual(span2, INVALID_SPAN) self.assertIs(span2.is_recording(), True) def test_span(self): with self.assertRaises(Exception): # pylint: disable=no-value-for-parameter span = trace._Span() span = trace._Span("name", INVALID_SPAN_CONTEXT) self.assertEqual(span.get_span_context(), INVALID_SPAN_CONTEXT) self.assertIs(span.is_recording(), True) python-opentelemetry-1.39.1/opentelemetry-sdk/tests/trace/test_sampling.py000066400000000000000000000513521511654350100271650ustar00rootroot00000000000000# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import contextlib import sys import typing import unittest from opentelemetry import context as context_api from opentelemetry import trace from opentelemetry.sdk.trace import sampling TO_DEFAULT = trace.TraceFlags(trace.TraceFlags.DEFAULT) TO_SAMPLED = trace.TraceFlags(trace.TraceFlags.SAMPLED) class TestDecision(unittest.TestCase): def test_is_recording(self): self.assertTrue( sampling.Decision.is_recording(sampling.Decision.RECORD_ONLY) ) self.assertTrue( sampling.Decision.is_recording(sampling.Decision.RECORD_AND_SAMPLE) ) self.assertFalse( sampling.Decision.is_recording(sampling.Decision.DROP) ) def test_is_sampled(self): self.assertFalse( sampling.Decision.is_sampled(sampling.Decision.RECORD_ONLY) ) self.assertTrue( sampling.Decision.is_sampled(sampling.Decision.RECORD_AND_SAMPLE) ) self.assertFalse(sampling.Decision.is_sampled(sampling.Decision.DROP)) class TestSamplingResult(unittest.TestCase): def test_ctr(self): attributes = {"asd": "test"} trace_state = {} # pylint: disable=E1137 trace_state["test"] = "123" result = sampling.SamplingResult( sampling.Decision.RECORD_ONLY, attributes, trace_state ) self.assertIs(result.decision, sampling.Decision.RECORD_ONLY) with self.assertRaises(TypeError): result.attributes["test"] = "mess-this-up" self.assertTrue(len(result.attributes), 1) self.assertEqual(result.attributes["asd"], "test") self.assertEqual(result.trace_state["test"], "123") class TestSampler(unittest.TestCase): def _create_parent( self, trace_flags: trace.TraceFlags, is_remote=False, trace_state=None ) -> typing.Optional[context_api.Context]: if trace_flags is None: return None return trace.set_span_in_context( self._create_parent_span(trace_flags, is_remote, trace_state) ) @staticmethod def _create_parent_span( trace_flags: trace.TraceFlags, is_remote=False, trace_state=None ) -> trace.NonRecordingSpan: return trace.NonRecordingSpan( trace.SpanContext( 0xDEADBEEF, 0xDEADBEF0, is_remote=is_remote, trace_flags=trace_flags, trace_state=trace_state, ) ) def test_always_on(self): trace_state = trace.TraceState([("key", "value")]) test_data = (TO_DEFAULT, TO_SAMPLED, None) for trace_flags in test_data: with self.subTest(trace_flags=trace_flags): context = self._create_parent(trace_flags, False, trace_state) sample_result = sampling.ALWAYS_ON.should_sample( context, 0xDEADBEF1, "sampling on", trace.SpanKind.INTERNAL, attributes={"sampled.expect": "true"}, ) self.assertTrue(sample_result.decision.is_sampled()) self.assertEqual( sample_result.attributes, {"sampled.expect": "true"} ) if context is not None: self.assertEqual(sample_result.trace_state, trace_state) else: self.assertIsNone(sample_result.trace_state) def test_always_off(self): trace_state = trace.TraceState([("key", "value")]) test_data = (TO_DEFAULT, TO_SAMPLED, None) for trace_flags in test_data: with self.subTest(trace_flags=trace_flags): context = self._create_parent(trace_flags, False, trace_state) sample_result = sampling.ALWAYS_OFF.should_sample( context, 0xDEADBEF1, "sampling off", trace.SpanKind.INTERNAL, attributes={"sampled.expect": "false"}, ) self.assertFalse(sample_result.decision.is_sampled()) self.assertEqual(sample_result.attributes, {}) if context is not None: self.assertEqual(sample_result.trace_state, trace_state) else: self.assertIsNone(sample_result.trace_state) def test_default_on(self): trace_state = trace.TraceState([("key", "value")]) context = self._create_parent(TO_DEFAULT, False, trace_state) sample_result = sampling.DEFAULT_ON.should_sample( context, 0xDEADBEF1, "unsampled parent, sampling on", trace.SpanKind.INTERNAL, attributes={"sampled.expect": "false"}, ) self.assertFalse(sample_result.decision.is_sampled()) self.assertEqual(sample_result.attributes, {}) self.assertEqual(sample_result.trace_state, trace_state) context = self._create_parent(TO_SAMPLED, False, trace_state) sample_result = sampling.DEFAULT_ON.should_sample( context, 0xDEADBEF1, "sampled parent, sampling on", trace.SpanKind.INTERNAL, attributes={"sampled.expect": "true"}, ) self.assertTrue(sample_result.decision.is_sampled()) self.assertEqual(sample_result.attributes, {"sampled.expect": "true"}) self.assertEqual(sample_result.trace_state, trace_state) sample_result = sampling.DEFAULT_ON.should_sample( None, 0xDEADBEF1, "no parent, sampling on", trace.SpanKind.INTERNAL, attributes={"sampled.expect": "true"}, ) self.assertTrue(sample_result.decision.is_sampled()) self.assertEqual(sample_result.attributes, {"sampled.expect": "true"}) self.assertIsNone(sample_result.trace_state) def test_default_off(self): trace_state = trace.TraceState([("key", "value")]) context = self._create_parent(TO_DEFAULT, False, trace_state) sample_result = sampling.DEFAULT_OFF.should_sample( context, 0xDEADBEF1, "unsampled parent, sampling off", trace.SpanKind.INTERNAL, attributes={"sampled.expect", "false"}, ) self.assertFalse(sample_result.decision.is_sampled()) self.assertEqual(sample_result.attributes, {}) self.assertEqual(sample_result.trace_state, trace_state) context = self._create_parent(TO_SAMPLED, False, trace_state) sample_result = sampling.DEFAULT_OFF.should_sample( context, 0xDEADBEF1, "sampled parent, sampling on", trace.SpanKind.INTERNAL, attributes={"sampled.expect": "true"}, ) self.assertTrue(sample_result.decision.is_sampled()) self.assertEqual(sample_result.attributes, {"sampled.expect": "true"}) self.assertEqual(sample_result.trace_state, trace_state) default_off = sampling.DEFAULT_OFF.should_sample( None, 0xDEADBEF1, "unsampled parent, sampling off", trace.SpanKind.INTERNAL, attributes={"sampled.expect": "false"}, ) self.assertFalse(default_off.decision.is_sampled()) self.assertEqual(default_off.attributes, {}) self.assertIsNone(default_off.trace_state) def test_probability_sampler(self): sampler = sampling.TraceIdRatioBased(0.5) # Check that we sample based on the trace ID if the parent context is # null # trace_state should also be empty since it is based off of parent sampled_result = sampler.should_sample( None, 0x7FFFFFFFFFFFFFFF, "sampled true", trace.SpanKind.INTERNAL, attributes={"sampled.expect": "true"}, ) self.assertTrue(sampled_result.decision.is_sampled()) self.assertEqual(sampled_result.attributes, {"sampled.expect": "true"}) self.assertIsNone(sampled_result.trace_state) not_sampled_result = sampler.should_sample( None, 0x8000000000000000, "sampled false", trace.SpanKind.INTERNAL, attributes={"sampled.expect": "false"}, ) self.assertFalse(not_sampled_result.decision.is_sampled()) self.assertEqual(not_sampled_result.attributes, {}) self.assertIsNone(sampled_result.trace_state) def test_probability_sampler_zero(self): default_off = sampling.TraceIdRatioBased(0.0) self.assertFalse( default_off.should_sample( None, 0x0, "span name" ).decision.is_sampled() ) def test_probability_sampler_one(self): default_off = sampling.TraceIdRatioBased(1.0) self.assertTrue( default_off.should_sample( None, 0xFFFFFFFFFFFFFFFF, "span name" ).decision.is_sampled() ) def test_probability_sampler_limits(self): # Sample one of every 2^64 (= 5e-20) traces. This is the lowest # possible meaningful sampling rate, only traces with trace ID 0x0 # should get sampled. almost_always_off = sampling.TraceIdRatioBased(2**-64) self.assertTrue( almost_always_off.should_sample( None, 0x0, "span name" ).decision.is_sampled() ) self.assertFalse( almost_always_off.should_sample( None, 0x1, "span name" ).decision.is_sampled() ) self.assertEqual( sampling.TraceIdRatioBased.get_bound_for_rate(2**-64), 0x1 ) # Sample every trace with trace ID less than 0xffffffffffffffff. In # principle this is the highest possible sampling rate less than 1, but # we can't actually express this rate as a float! # # In practice, the highest possible sampling rate is: # # 1 - sys.float_info.epsilon almost_always_on = sampling.TraceIdRatioBased(1 - 2**-64) self.assertTrue( almost_always_on.should_sample( None, 0xFFFFFFFFFFFFFFFE, "span name" ).decision.is_sampled() ) # These tests are logically consistent, but fail because of the float # precision issue above. Changing the sampler to check fewer bytes of # the trace ID will cause these to pass. # self.assertFalse( # almost_always_on.should_sample( # None, # 0xFFFFFFFFFFFFFFFF, # "span name", # ).decision.is_sampled() # ) # self.assertEqual( # sampling.TraceIdRatioBased.get_bound_for_rate(1 - 2 ** -64)), # 0xFFFFFFFFFFFFFFFF, # ) # Check that a sampler with the highest effective sampling rate < 1 # refuses to sample traces with trace ID 0xffffffffffffffff. almost_almost_always_on = sampling.TraceIdRatioBased( 1 - sys.float_info.epsilon ) self.assertFalse( almost_almost_always_on.should_sample( None, 0xFFFFFFFFFFFFFFFF, "span name" ).decision.is_sampled() ) # Check that the highest effective sampling rate is actually lower than # the highest theoretical sampling rate. If this test fails the test # above is wrong. self.assertLess( almost_almost_always_on.bound, 0xFFFFFFFFFFFFFFFF, ) # pylint:disable=too-many-statements def exec_parent_based(self, parent_sampling_context): trace_state = trace.TraceState([("key", "value")]) sampler = sampling.ParentBased(sampling.ALWAYS_ON) # Check that the sampling decision matches the parent context if given with parent_sampling_context( self._create_parent_span( trace_flags=TO_DEFAULT, trace_state=trace_state, ) ) as context: # local, not sampled not_sampled_result = sampler.should_sample( context, 0x7FFFFFFFFFFFFFFF, "unsampled parent, sampling on", trace.SpanKind.INTERNAL, attributes={"sampled": "false"}, ) self.assertFalse(not_sampled_result.decision.is_sampled()) self.assertEqual(not_sampled_result.attributes, {}) self.assertEqual(not_sampled_result.trace_state, trace_state) with parent_sampling_context( self._create_parent_span( trace_flags=TO_DEFAULT, trace_state=trace_state, ) ) as context: sampler = sampling.ParentBased( root=sampling.ALWAYS_OFF, local_parent_not_sampled=sampling.ALWAYS_ON, ) # local, not sampled -> opposite sampler sampled_result = sampler.should_sample( context, 0x7FFFFFFFFFFFFFFF, "unsampled parent, sampling on", trace.SpanKind.INTERNAL, attributes={"sampled": "false"}, ) self.assertTrue(sampled_result.decision.is_sampled()) self.assertEqual(sampled_result.attributes, {"sampled": "false"}) self.assertEqual(sampled_result.trace_state, trace_state) with parent_sampling_context( self._create_parent_span( trace_flags=TO_SAMPLED, trace_state=trace_state, ) ) as context: sampler = sampling.ParentBased(sampling.ALWAYS_OFF) # local, sampled sampled_result = sampler.should_sample( context, 0x8000000000000000, "sampled parent, sampling off", trace.SpanKind.INTERNAL, attributes={"sampled": "true"}, trace_state=trace_state, ) self.assertTrue(sampled_result.decision.is_sampled()) self.assertEqual(sampled_result.attributes, {"sampled": "true"}) self.assertEqual(sampled_result.trace_state, trace_state) with parent_sampling_context( self._create_parent_span( trace_flags=TO_SAMPLED, trace_state=trace_state, ) ) as context: sampler = sampling.ParentBased( root=sampling.ALWAYS_ON, local_parent_sampled=sampling.ALWAYS_OFF, ) # local, sampled -> opposite sampler not_sampled_result = sampler.should_sample( context, 0x7FFFFFFFFFFFFFFF, "unsampled parent, sampling on", trace.SpanKind.INTERNAL, attributes={"sampled": "false"}, trace_state=trace_state, ) self.assertFalse(not_sampled_result.decision.is_sampled()) self.assertEqual(not_sampled_result.attributes, {}) self.assertEqual(not_sampled_result.trace_state, trace_state) with parent_sampling_context( self._create_parent_span( trace_flags=TO_DEFAULT, is_remote=True, trace_state=trace_state, ) ) as context: sampler = sampling.ParentBased(sampling.ALWAYS_ON) # remote, not sampled not_sampled_result = sampler.should_sample( context, 0x7FFFFFFFFFFFFFFF, "unsampled parent, sampling on", trace.SpanKind.INTERNAL, attributes={"sampled": "false"}, trace_state=trace_state, ) self.assertFalse(not_sampled_result.decision.is_sampled()) self.assertEqual(not_sampled_result.attributes, {}) self.assertEqual(not_sampled_result.trace_state, trace_state) with parent_sampling_context( self._create_parent_span( trace_flags=TO_DEFAULT, is_remote=True, trace_state=trace_state, ) ) as context: sampler = sampling.ParentBased( root=sampling.ALWAYS_OFF, remote_parent_not_sampled=sampling.ALWAYS_ON, ) # remote, not sampled -> opposite sampler sampled_result = sampler.should_sample( context, 0x7FFFFFFFFFFFFFFF, "unsampled parent, sampling on", trace.SpanKind.INTERNAL, attributes={"sampled": "false"}, ) self.assertTrue(sampled_result.decision.is_sampled()) self.assertEqual(sampled_result.attributes, {"sampled": "false"}) self.assertEqual(sampled_result.trace_state, trace_state) with parent_sampling_context( self._create_parent_span( trace_flags=TO_SAMPLED, is_remote=True, trace_state=trace_state, ) ) as context: sampler = sampling.ParentBased(sampling.ALWAYS_OFF) # remote, sampled sampled_result = sampler.should_sample( context, 0x8000000000000000, "sampled parent, sampling off", trace.SpanKind.INTERNAL, attributes={"sampled": "true"}, ) self.assertTrue(sampled_result.decision.is_sampled()) self.assertEqual(sampled_result.attributes, {"sampled": "true"}) self.assertEqual(sampled_result.trace_state, trace_state) with parent_sampling_context( self._create_parent_span( trace_flags=TO_SAMPLED, is_remote=True, trace_state=trace_state, ) ) as context: sampler = sampling.ParentBased( root=sampling.ALWAYS_ON, remote_parent_sampled=sampling.ALWAYS_OFF, ) # remote, sampled -> opposite sampler not_sampled_result = sampler.should_sample( context, 0x7FFFFFFFFFFFFFFF, "unsampled parent, sampling on", trace.SpanKind.INTERNAL, attributes={"sampled": "false"}, ) self.assertFalse(not_sampled_result.decision.is_sampled()) self.assertEqual(not_sampled_result.attributes, {}) self.assertEqual(not_sampled_result.trace_state, trace_state) # for root span follow decision of root sampler with parent_sampling_context(trace.INVALID_SPAN) as context: sampler = sampling.ParentBased(sampling.ALWAYS_OFF) not_sampled_result = sampler.should_sample( context, 0x8000000000000000, "parent, sampling off", trace.SpanKind.INTERNAL, attributes={"sampled": "false"}, ) self.assertFalse(not_sampled_result.decision.is_sampled()) self.assertEqual(not_sampled_result.attributes, {}) self.assertIsNone(not_sampled_result.trace_state) with parent_sampling_context(trace.INVALID_SPAN) as context: sampler = sampling.ParentBased(sampling.ALWAYS_ON) sampled_result = sampler.should_sample( context, 0x8000000000000000, "no parent, sampling on", trace.SpanKind.INTERNAL, attributes={"sampled": "true"}, trace_state=trace_state, ) self.assertTrue(sampled_result.decision.is_sampled()) self.assertEqual(sampled_result.attributes, {"sampled": "true"}) self.assertIsNone(sampled_result.trace_state) def test_parent_based_explicit_parent_context(self): @contextlib.contextmanager def explicit_parent_context(span: trace.Span): yield trace.set_span_in_context(span) self.exec_parent_based(explicit_parent_context) def test_parent_based_implicit_parent_context(self): @contextlib.contextmanager def implicit_parent_context(span: trace.Span): token = context_api.attach(trace.set_span_in_context(span)) yield None context_api.detach(token) self.exec_parent_based(implicit_parent_context) python-opentelemetry-1.39.1/opentelemetry-sdk/tests/trace/test_span_processor.py000066400000000000000000000262311511654350100304110ustar00rootroot00000000000000# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import abc import time import typing import unittest from platform import python_implementation, system from threading import Event from typing import Optional from unittest import mock from pytest import mark from opentelemetry import trace as trace_api from opentelemetry.context import Context from opentelemetry.sdk import trace def span_event_start_fmt(span_processor_name, span_name): return span_processor_name + ":" + span_name + ":start" def span_event_end_fmt(span_processor_name, span_name): return span_processor_name + ":" + span_name + ":end" class MySpanProcessor(trace.SpanProcessor): def __init__(self, name, span_list): self.name = name self.span_list = span_list def on_start( self, span: "trace.Span", parent_context: Optional[Context] = None ) -> None: self.span_list.append(span_event_start_fmt(self.name, span.name)) def on_end(self, span: "trace.Span") -> None: self.span_list.append(span_event_end_fmt(self.name, span.name)) class TestSpanProcessor(unittest.TestCase): def test_span_processor(self): tracer_provider = trace.TracerProvider() tracer = tracer_provider.get_tracer(__name__) spans_calls_list = [] # filled by MySpanProcessor expected_list = [] # filled by hand # Span processors are created but not added to the tracer yet sp1 = MySpanProcessor("SP1", spans_calls_list) sp2 = MySpanProcessor("SP2", spans_calls_list) with tracer.start_as_current_span("foo"): with tracer.start_as_current_span("bar"): with tracer.start_as_current_span("baz"): pass # at this point lists must be empty self.assertEqual(len(spans_calls_list), 0) # add single span processor tracer_provider.add_span_processor(sp1) with tracer.start_as_current_span("foo"): expected_list.append(span_event_start_fmt("SP1", "foo")) with tracer.start_as_current_span("bar"): expected_list.append(span_event_start_fmt("SP1", "bar")) with tracer.start_as_current_span("baz"): expected_list.append(span_event_start_fmt("SP1", "baz")) expected_list.append(span_event_end_fmt("SP1", "baz")) expected_list.append(span_event_end_fmt("SP1", "bar")) expected_list.append(span_event_end_fmt("SP1", "foo")) self.assertListEqual(spans_calls_list, expected_list) spans_calls_list.clear() expected_list.clear() # go for multiple span processors tracer_provider.add_span_processor(sp2) with tracer.start_as_current_span("foo"): expected_list.append(span_event_start_fmt("SP1", "foo")) expected_list.append(span_event_start_fmt("SP2", "foo")) with tracer.start_as_current_span("bar"): expected_list.append(span_event_start_fmt("SP1", "bar")) expected_list.append(span_event_start_fmt("SP2", "bar")) with tracer.start_as_current_span("baz"): expected_list.append(span_event_start_fmt("SP1", "baz")) expected_list.append(span_event_start_fmt("SP2", "baz")) expected_list.append(span_event_end_fmt("SP1", "baz")) expected_list.append(span_event_end_fmt("SP2", "baz")) expected_list.append(span_event_end_fmt("SP1", "bar")) expected_list.append(span_event_end_fmt("SP2", "bar")) expected_list.append(span_event_end_fmt("SP1", "foo")) expected_list.append(span_event_end_fmt("SP2", "foo")) # compare if two lists are the same self.assertListEqual(spans_calls_list, expected_list) def test_add_span_processor_after_span_creation(self): tracer_provider = trace.TracerProvider() tracer = tracer_provider.get_tracer(__name__) spans_calls_list = [] # filled by MySpanProcessor expected_list = [] # filled by hand # Span processors are created but not added to the tracer yet sp = MySpanProcessor("SP1", spans_calls_list) with tracer.start_as_current_span("foo"): with tracer.start_as_current_span("bar"): with tracer.start_as_current_span("baz"): # add span processor after spans have been created tracer_provider.add_span_processor(sp) expected_list.append(span_event_end_fmt("SP1", "baz")) expected_list.append(span_event_end_fmt("SP1", "bar")) expected_list.append(span_event_end_fmt("SP1", "foo")) self.assertListEqual(spans_calls_list, expected_list) class MultiSpanProcessorTestBase(abc.ABC): @abc.abstractmethod def create_multi_span_processor( self, ) -> typing.Union[ trace.SynchronousMultiSpanProcessor, trace.ConcurrentMultiSpanProcessor ]: pass @staticmethod def create_default_span() -> trace_api.Span: span_context = trace_api.SpanContext(37, 73, is_remote=False) return trace_api.NonRecordingSpan(span_context) def test_on_start(self): multi_processor = self.create_multi_span_processor() mocks = [mock.Mock(spec=trace.SpanProcessor) for _ in range(0, 5)] for mock_processor in mocks: multi_processor.add_span_processor(mock_processor) span = self.create_default_span() context = Context() multi_processor.on_start(span, parent_context=context) for mock_processor in mocks: mock_processor.on_start.assert_called_once_with( span, parent_context=context ) multi_processor.shutdown() def test_on_end(self): multi_processor = self.create_multi_span_processor() mocks = [mock.Mock(spec=trace.SpanProcessor) for _ in range(0, 5)] for mock_processor in mocks: multi_processor.add_span_processor(mock_processor) span = self.create_default_span() multi_processor.on_end(span) for mock_processor in mocks: mock_processor.on_end.assert_called_once_with(span) multi_processor.shutdown() def test_on_shutdown(self): multi_processor = self.create_multi_span_processor() mocks = [mock.Mock(spec=trace.SpanProcessor) for _ in range(0, 5)] for mock_processor in mocks: multi_processor.add_span_processor(mock_processor) multi_processor.shutdown() for mock_processor in mocks: mock_processor.shutdown.assert_called_once_with() def test_force_flush(self): multi_processor = self.create_multi_span_processor() mocks = [mock.Mock(spec=trace.SpanProcessor) for _ in range(0, 5)] for mock_processor in mocks: multi_processor.add_span_processor(mock_processor) timeout_millis = 100 flushed = multi_processor.force_flush(timeout_millis) # pylint: disable=no-member self.assertTrue(flushed) for mock_processor in mocks: # pylint: disable=no-member self.assertEqual(1, mock_processor.force_flush.call_count) multi_processor.shutdown() class TestSynchronousMultiSpanProcessor( MultiSpanProcessorTestBase, unittest.TestCase ): def create_multi_span_processor( self, ) -> trace.SynchronousMultiSpanProcessor: return trace.SynchronousMultiSpanProcessor() def test_force_flush_late_by_timeout(self): multi_processor = trace.SynchronousMultiSpanProcessor() def delayed_flush(_): time.sleep(0.055) mock_processor1 = mock.Mock(spec=trace.SpanProcessor) mock_processor1.force_flush = mock.Mock(side_effect=delayed_flush) multi_processor.add_span_processor(mock_processor1) mock_processor2 = mock.Mock(spec=trace.SpanProcessor) multi_processor.add_span_processor(mock_processor2) flushed = multi_processor.force_flush(50) self.assertFalse(flushed) self.assertEqual(1, mock_processor1.force_flush.call_count) self.assertEqual(0, mock_processor2.force_flush.call_count) def test_force_flush_late_by_span_processor(self): multi_processor = trace.SynchronousMultiSpanProcessor() mock_processor1 = mock.Mock(spec=trace.SpanProcessor) mock_processor1.force_flush = mock.Mock(return_value=False) multi_processor.add_span_processor(mock_processor1) mock_processor2 = mock.Mock(spec=trace.SpanProcessor) multi_processor.add_span_processor(mock_processor2) flushed = multi_processor.force_flush(50) self.assertFalse(flushed) self.assertEqual(1, mock_processor1.force_flush.call_count) self.assertEqual(0, mock_processor2.force_flush.call_count) class TestConcurrentMultiSpanProcessor( MultiSpanProcessorTestBase, unittest.TestCase ): def create_multi_span_processor( self, ) -> trace.ConcurrentMultiSpanProcessor: return trace.ConcurrentMultiSpanProcessor(3) @mark.skipif( python_implementation() == "PyPy" and system() == "Windows", reason="This test randomly fails in Windows with PyPy", ) def test_force_flush_late_by_timeout(self): multi_processor = trace.ConcurrentMultiSpanProcessor(5) wait_event = Event() def delayed_flush(_): wait_event.wait() late_mock = mock.Mock(spec=trace.SpanProcessor) late_mock.force_flush = mock.Mock(side_effect=delayed_flush) mocks = [mock.Mock(spec=trace.SpanProcessor) for _ in range(0, 4)] mocks.insert(0, late_mock) for mock_processor in mocks: multi_processor.add_span_processor(mock_processor) flushed = multi_processor.force_flush(timeout_millis=10) # let the thread executing the late_mock continue wait_event.set() self.assertFalse(flushed) for mock_processor in mocks: self.assertEqual(1, mock_processor.force_flush.call_count) multi_processor.shutdown() def test_force_flush_late_by_span_processor(self): multi_processor = trace.ConcurrentMultiSpanProcessor(5) late_mock = mock.Mock(spec=trace.SpanProcessor) late_mock.force_flush = mock.Mock(return_value=False) mocks = [mock.Mock(spec=trace.SpanProcessor) for _ in range(0, 4)] mocks.insert(0, late_mock) for mock_processor in mocks: multi_processor.add_span_processor(mock_processor) flushed = multi_processor.force_flush() self.assertFalse(flushed) for mock_processor in mocks: self.assertEqual(1, mock_processor.force_flush.call_count) multi_processor.shutdown() python-opentelemetry-1.39.1/opentelemetry-sdk/tests/trace/test_trace.py000066400000000000000000002373611511654350100264570ustar00rootroot00000000000000# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # pylint: disable=too-many-lines # pylint: disable=no-member import shutil import subprocess import unittest from importlib import reload from logging import ERROR, WARNING from random import randint from time import time_ns from typing import Optional from unittest import mock from unittest.mock import Mock, patch from opentelemetry import trace as trace_api from opentelemetry.attributes import BoundedAttributes from opentelemetry.context import Context from opentelemetry.sdk import resources, trace from opentelemetry.sdk.environment_variables import ( OTEL_ATTRIBUTE_COUNT_LIMIT, OTEL_ATTRIBUTE_VALUE_LENGTH_LIMIT, OTEL_EVENT_ATTRIBUTE_COUNT_LIMIT, OTEL_LINK_ATTRIBUTE_COUNT_LIMIT, OTEL_SDK_DISABLED, OTEL_SPAN_ATTRIBUTE_COUNT_LIMIT, OTEL_SPAN_ATTRIBUTE_VALUE_LENGTH_LIMIT, OTEL_SPAN_EVENT_COUNT_LIMIT, OTEL_SPAN_LINK_COUNT_LIMIT, OTEL_TRACES_SAMPLER, OTEL_TRACES_SAMPLER_ARG, ) from opentelemetry.sdk.trace import Resource, TracerProvider from opentelemetry.sdk.trace.id_generator import RandomIdGenerator from opentelemetry.sdk.trace.sampling import ( ALWAYS_OFF, ALWAYS_ON, Decision, ParentBased, StaticSampler, ) from opentelemetry.sdk.util import BoundedDict, ns_to_iso_str from opentelemetry.sdk.util.instrumentation import InstrumentationInfo from opentelemetry.test.spantestutil import ( get_span_with_dropped_attributes_events_links, new_tracer, ) from opentelemetry.trace import ( Status, StatusCode, get_tracer, set_tracer_provider, ) class TestTracer(unittest.TestCase): def test_no_deprecated_warning(self): with self.assertRaises(AssertionError): with self.assertWarns(DeprecationWarning): TracerProvider(Mock(), Mock()).get_tracer(Mock(), Mock()) # This is being added here to make sure the filter on # InstrumentationInfo does not affect other DeprecationWarnings that # may be raised. with self.assertWarns(DeprecationWarning): BoundedDict(0) def test_extends_api(self): tracer = new_tracer() self.assertIsInstance(tracer, trace.Tracer) self.assertIsInstance(tracer, trace_api.Tracer) def test_shutdown(self): tracer_provider = trace.TracerProvider() mock_processor1 = mock.Mock(spec=trace.SpanProcessor) tracer_provider.add_span_processor(mock_processor1) mock_processor2 = mock.Mock(spec=trace.SpanProcessor) tracer_provider.add_span_processor(mock_processor2) tracer_provider.shutdown() self.assertEqual(mock_processor1.shutdown.call_count, 1) self.assertEqual(mock_processor2.shutdown.call_count, 1) shutdown_python_code = """ import atexit from unittest import mock from opentelemetry.sdk import trace mock_processor = mock.Mock(spec=trace.SpanProcessor) def print_shutdown_count(): print(mock_processor.shutdown.call_count) # atexit hooks are called in inverse order they are added, so do this before # creating the tracer atexit.register(print_shutdown_count) tracer_provider = trace.TracerProvider({tracer_parameters}) tracer_provider.add_span_processor(mock_processor) {tracer_shutdown} """ def run_general_code(shutdown_on_exit, explicit_shutdown): tracer_parameters = "" tracer_shutdown = "" if not shutdown_on_exit: tracer_parameters = "shutdown_on_exit=False" if explicit_shutdown: tracer_shutdown = "tracer_provider.shutdown()" return subprocess.check_output( [ # use shutil to avoid calling python outside the # virtualenv on windows. shutil.which("python"), "-c", shutdown_python_code.format( tracer_parameters=tracer_parameters, tracer_shutdown=tracer_shutdown, ), ] ) # test default shutdown_on_exit (True) out = run_general_code(True, False) self.assertTrue(out.startswith(b"1")) # test that shutdown is called only once even if Tracer.shutdown is # called explicitly out = run_general_code(True, True) self.assertTrue(out.startswith(b"1")) # test shutdown_on_exit=False out = run_general_code(False, False) self.assertTrue(out.startswith(b"0")) def test_tracer_provider_accepts_concurrent_multi_span_processor(self): span_processor = trace.ConcurrentMultiSpanProcessor(2) tracer_provider = trace.TracerProvider( active_span_processor=span_processor ) # pylint: disable=protected-access self.assertEqual( span_processor, tracer_provider._active_span_processor ) def test_get_tracer_sdk(self): tracer_provider = trace.TracerProvider() tracer = tracer_provider.get_tracer( "module_name", "library_version", "schema_url", {"key1": "value1", "key2": 6}, ) # pylint: disable=protected-access self.assertEqual(tracer._instrumentation_scope._name, "module_name") # pylint: disable=protected-access self.assertEqual( tracer._instrumentation_scope._version, "library_version" ) # pylint: disable=protected-access self.assertEqual( tracer._instrumentation_scope._schema_url, "schema_url" ) # pylint: disable=protected-access self.assertEqual( tracer._instrumentation_scope._attributes, {"key1": "value1", "key2": 6}, ) @mock.patch.dict("os.environ", {OTEL_SDK_DISABLED: "true"}) def test_get_tracer_with_sdk_disabled(self): tracer_provider = trace.TracerProvider() self.assertIsInstance( tracer_provider.get_tracer(Mock()), trace_api.NoOpTracer ) class TestTracerSampling(unittest.TestCase): def tearDown(self): reload(trace) def test_default_sampler(self): tracer = new_tracer() # Check that the default tracer creates real spans via the default # sampler root_span = tracer.start_span(name="root span", context=None) ctx = trace_api.set_span_in_context(root_span) self.assertIsInstance(root_span, trace.Span) child_span = tracer.start_span(name="child span", context=ctx) self.assertIsInstance(child_span, trace.Span) self.assertTrue(root_span.context.trace_flags.sampled) self.assertEqual( root_span.get_span_context().trace_flags, trace_api.TraceFlags.SAMPLED, ) self.assertEqual( child_span.get_span_context().trace_flags, trace_api.TraceFlags.SAMPLED, ) def test_default_sampler_type(self): tracer_provider = trace.TracerProvider() self.verify_default_sampler(tracer_provider) @mock.patch("opentelemetry.sdk.trace.sampling._get_from_env_or_default") def test_sampler_no_sampling(self, _get_from_env_or_default): tracer_provider = trace.TracerProvider(ALWAYS_OFF) tracer = tracer_provider.get_tracer(__name__) # Check that the default tracer creates no-op spans if the sampler # decides not to sampler root_span = tracer.start_span(name="root span", context=None) ctx = trace_api.set_span_in_context(root_span) self.assertIsInstance(root_span, trace_api.NonRecordingSpan) child_span = tracer.start_span(name="child span", context=ctx) self.assertIsInstance(child_span, trace_api.NonRecordingSpan) self.assertEqual( root_span.get_span_context().trace_flags, trace_api.TraceFlags.DEFAULT, ) self.assertEqual( child_span.get_span_context().trace_flags, trace_api.TraceFlags.DEFAULT, ) self.assertFalse(_get_from_env_or_default.called) @mock.patch.dict("os.environ", {OTEL_TRACES_SAMPLER: "always_off"}) def test_sampler_with_env(self): # pylint: disable=protected-access reload(trace) tracer_provider = trace.TracerProvider() self.assertIsInstance(tracer_provider.sampler, StaticSampler) self.assertEqual(tracer_provider.sampler._decision, Decision.DROP) tracer = tracer_provider.get_tracer(__name__) root_span = tracer.start_span(name="root span", context=None) # Should be no-op self.assertIsInstance(root_span, trace_api.NonRecordingSpan) @mock.patch.dict( "os.environ", { OTEL_TRACES_SAMPLER: "parentbased_traceidratio", OTEL_TRACES_SAMPLER_ARG: "0.25", }, ) def test_ratio_sampler_with_env(self): # pylint: disable=protected-access reload(trace) tracer_provider = trace.TracerProvider() self.assertIsInstance(tracer_provider.sampler, ParentBased) self.assertEqual(tracer_provider.sampler._root.rate, 0.25) def verify_default_sampler(self, tracer_provider): self.assertIsInstance(tracer_provider.sampler, ParentBased) # pylint: disable=protected-access self.assertEqual(tracer_provider.sampler._root, ALWAYS_ON) class TestSpanCreation(unittest.TestCase): def test_start_span_invalid_spancontext(self): """If an invalid span context is passed as the parent, the created span should use a new span id. Invalid span contexts should also not be added as a parent. This eliminates redundant error handling logic in exporters. """ tracer = new_tracer() parent_context = trace_api.set_span_in_context( trace_api.INVALID_SPAN_CONTEXT ) new_span = tracer.start_span("root", context=parent_context) self.assertTrue(new_span.context.is_valid) self.assertIsNone(new_span.parent) def test_instrumentation_info(self): tracer_provider = trace.TracerProvider() schema_url = "https://opentelemetry.io/schemas/1.3.0" tracer1 = tracer_provider.get_tracer("instr1") tracer2 = tracer_provider.get_tracer("instr2", "1.3b3", schema_url) span1 = tracer1.start_span("s1") span2 = tracer2.start_span("s2") with self.assertWarns(DeprecationWarning): self.assertEqual( span1.instrumentation_info, InstrumentationInfo("instr1", "") ) with self.assertWarns(DeprecationWarning): self.assertEqual( span2.instrumentation_info, InstrumentationInfo("instr2", "1.3b3", schema_url), ) with self.assertWarns(DeprecationWarning): self.assertEqual(span2.instrumentation_info.schema_url, schema_url) with self.assertWarns(DeprecationWarning): self.assertEqual(span2.instrumentation_info.version, "1.3b3") with self.assertWarns(DeprecationWarning): self.assertEqual(span2.instrumentation_info.name, "instr2") with self.assertWarns(DeprecationWarning): self.assertLess( span1.instrumentation_info, span2.instrumentation_info ) # Check sortability. def test_invalid_instrumentation_info(self): tracer_provider = trace.TracerProvider() with self.assertLogs(level=ERROR): tracer1 = tracer_provider.get_tracer("") with self.assertLogs(level=ERROR): tracer2 = tracer_provider.get_tracer(None) self.assertIsInstance( tracer1.instrumentation_info, InstrumentationInfo ) span1 = tracer1.start_span("foo") self.assertTrue(span1.is_recording()) self.assertEqual(tracer1.instrumentation_info.schema_url, "") self.assertEqual(tracer1.instrumentation_info.version, "") self.assertEqual(tracer1.instrumentation_info.name, "") self.assertIsInstance( tracer2.instrumentation_info, InstrumentationInfo ) span2 = tracer2.start_span("bar") self.assertTrue(span2.is_recording()) self.assertEqual(tracer2.instrumentation_info.schema_url, "") self.assertEqual(tracer2.instrumentation_info.version, "") self.assertEqual(tracer2.instrumentation_info.name, "") self.assertEqual( tracer1.instrumentation_info, tracer2.instrumentation_info ) def test_span_processor_for_source(self): tracer_provider = trace.TracerProvider() tracer1 = tracer_provider.get_tracer("instr1") tracer2 = tracer_provider.get_tracer("instr2", "1.3b3") span1 = tracer1.start_span("s1") span2 = tracer2.start_span("s2") # pylint:disable=protected-access self.assertIs( span1._span_processor, tracer_provider._active_span_processor ) self.assertIs( span2._span_processor, tracer_provider._active_span_processor ) def test_start_span_implicit(self): tracer = new_tracer() self.assertEqual(trace_api.get_current_span(), trace_api.INVALID_SPAN) root = tracer.start_span("root") self.assertIsNotNone(root.start_time) self.assertIsNone(root.end_time) self.assertEqual(root.kind, trace_api.SpanKind.INTERNAL) with trace_api.use_span(root, True): self.assertIs(trace_api.get_current_span(), root) with tracer.start_span( "child", kind=trace_api.SpanKind.CLIENT ) as child: self.assertIs(child.parent, root.get_span_context()) self.assertEqual(child.kind, trace_api.SpanKind.CLIENT) self.assertIsNotNone(child.start_time) self.assertIsNone(child.end_time) # The new child span should inherit the parent's context but # get a new span ID. root_context = root.get_span_context() child_context = child.get_span_context() self.assertEqual(root_context.trace_id, child_context.trace_id) self.assertNotEqual( root_context.span_id, child_context.span_id ) self.assertEqual( root_context.trace_state, child_context.trace_state ) self.assertEqual( root_context.trace_flags, child_context.trace_flags ) # Verify start_span() did not set the current span. self.assertIs(trace_api.get_current_span(), root) self.assertIsNotNone(child.end_time) self.assertEqual(trace_api.get_current_span(), trace_api.INVALID_SPAN) self.assertIsNotNone(root.end_time) def test_start_span_explicit(self): tracer = new_tracer() other_parent = trace._Span( "name", trace_api.SpanContext( trace_id=0x000000000000000000000000DEADBEEF, span_id=0x00000000DEADBEF0, is_remote=False, trace_flags=trace_api.TraceFlags(trace_api.TraceFlags.SAMPLED), ), ) other_parent_context = trace_api.set_span_in_context(other_parent) self.assertEqual(trace_api.get_current_span(), trace_api.INVALID_SPAN) root = tracer.start_span("root") self.assertIsNotNone(root.start_time) self.assertIsNone(root.end_time) # Test with the implicit root span with trace_api.use_span(root, True): self.assertIs(trace_api.get_current_span(), root) with tracer.start_span("stepchild", other_parent_context) as child: # The child's parent should be the one passed in, # not the current span. self.assertNotEqual(child.parent, root) self.assertIs(child.parent, other_parent.get_span_context()) self.assertIsNotNone(child.start_time) self.assertIsNone(child.end_time) # The child should inherit its context from the explicit # parent, not the current span. child_context = child.get_span_context() self.assertEqual( other_parent.get_span_context().trace_id, child_context.trace_id, ) self.assertNotEqual( other_parent.get_span_context().span_id, child_context.span_id, ) self.assertEqual( other_parent.get_span_context().trace_state, child_context.trace_state, ) self.assertEqual( other_parent.get_span_context().trace_flags, child_context.trace_flags, ) # Verify start_span() did not set the current span. self.assertIs(trace_api.get_current_span(), root) # Verify ending the child did not set the current span. self.assertIs(trace_api.get_current_span(), root) self.assertIsNotNone(child.end_time) def test_start_as_current_span_implicit(self): tracer = new_tracer() self.assertEqual(trace_api.get_current_span(), trace_api.INVALID_SPAN) with tracer.start_as_current_span("root") as root: self.assertIs(trace_api.get_current_span(), root) with tracer.start_as_current_span("child") as child: self.assertIs(trace_api.get_current_span(), child) self.assertIs(child.parent, root.get_span_context()) # After exiting the child's scope the parent should become the # current span again. self.assertIs(trace_api.get_current_span(), root) self.assertIsNotNone(child.end_time) self.assertEqual(trace_api.get_current_span(), trace_api.INVALID_SPAN) self.assertIsNotNone(root.end_time) def test_start_as_current_span_explicit(self): tracer = new_tracer() other_parent = trace._Span( "name", trace_api.SpanContext( trace_id=0x000000000000000000000000DEADBEEF, span_id=0x00000000DEADBEF0, is_remote=False, trace_flags=trace_api.TraceFlags(trace_api.TraceFlags.SAMPLED), ), ) other_parent_ctx = trace_api.set_span_in_context(other_parent) self.assertEqual(trace_api.get_current_span(), trace_api.INVALID_SPAN) # Test with the implicit root span with tracer.start_as_current_span("root") as root: self.assertIs(trace_api.get_current_span(), root) self.assertIsNotNone(root.start_time) self.assertIsNone(root.end_time) with tracer.start_as_current_span( "stepchild", other_parent_ctx ) as child: # The child should become the current span as usual, but its # parent should be the one passed in, not the # previously-current span. self.assertIs(trace_api.get_current_span(), child) self.assertNotEqual(child.parent, root) self.assertIs(child.parent, other_parent.get_span_context()) # After exiting the child's scope the last span on the stack should # become current, not the child's parent. self.assertNotEqual(trace_api.get_current_span(), other_parent) self.assertIs(trace_api.get_current_span(), root) self.assertIsNotNone(child.end_time) def test_start_as_current_span_decorator(self): tracer = new_tracer() self.assertEqual(trace_api.get_current_span(), trace_api.INVALID_SPAN) @tracer.start_as_current_span("root") def func(): root = trace_api.get_current_span() with tracer.start_as_current_span("child") as child: self.assertIs(trace_api.get_current_span(), child) self.assertIs(child.parent, root.get_span_context()) # After exiting the child's scope the parent should become the # current span again. self.assertIs(trace_api.get_current_span(), root) self.assertIsNotNone(child.end_time) return root root1 = func() self.assertEqual(trace_api.get_current_span(), trace_api.INVALID_SPAN) self.assertIsNotNone(root1.end_time) # Second call must create a new span root2 = func() self.assertEqual(trace_api.get_current_span(), trace_api.INVALID_SPAN) self.assertIsNotNone(root2.end_time) self.assertIsNot(root1, root2) def test_start_as_current_span_no_end_on_exit(self): tracer = new_tracer() with tracer.start_as_current_span("root", end_on_exit=False) as root: self.assertIsNone(root.end_time) self.assertIsNone(root.end_time) def test_explicit_span_resource(self): resource = resources.Resource.create({}) tracer_provider = trace.TracerProvider(resource=resource) tracer = tracer_provider.get_tracer(__name__) span = tracer.start_span("root") self.assertIs(span.resource, resource) def test_default_span_resource(self): tracer_provider = trace.TracerProvider() tracer = tracer_provider.get_tracer(__name__) span = tracer.start_span("root") # pylint: disable=protected-access self.assertIsInstance(span.resource, resources.Resource) self.assertEqual( span.resource.attributes.get(resources.SERVICE_NAME), "unknown_service", ) self.assertEqual( span.resource.attributes.get(resources.TELEMETRY_SDK_LANGUAGE), "python", ) self.assertEqual( span.resource.attributes.get(resources.TELEMETRY_SDK_NAME), "opentelemetry", ) self.assertEqual( span.resource.attributes.get(resources.TELEMETRY_SDK_VERSION), resources._OPENTELEMETRY_SDK_VERSION, ) def test_span_context_remote_flag(self): tracer = new_tracer() span = tracer.start_span("foo") self.assertFalse(span.context.is_remote) def test_disallow_direct_span_creation(self): with self.assertRaises(TypeError): # pylint: disable=abstract-class-instantiated trace.Span("name", mock.Mock(spec=trace_api.SpanContext)) def test_surplus_span_links(self): # pylint: disable=protected-access max_links = trace.SpanLimits().max_links links = [ trace_api.Link(trace_api.SpanContext(0x1, idx, is_remote=False)) for idx in range(0, 16 + max_links) ] tracer = new_tracer() with tracer.start_as_current_span("span", links=links) as root: self.assertEqual(len(root.links), max_links) def test_surplus_span_attributes(self): # pylint: disable=protected-access max_attrs = trace.SpanLimits().max_span_attributes attributes = {str(idx): idx for idx in range(0, 16 + max_attrs)} tracer = new_tracer() with tracer.start_as_current_span( "span", attributes=attributes ) as root: self.assertEqual(len(root.attributes), max_attrs) class TestReadableSpan(unittest.TestCase): def test_links(self): span = trace.ReadableSpan("test") self.assertEqual(span.links, ()) span = trace.ReadableSpan( "test", links=[trace_api.Link(context=trace_api.INVALID_SPAN_CONTEXT)] * 2, ) self.assertEqual(len(span.links), 2) for link in span.links: self.assertFalse(link.context.is_valid) def test_events(self): span = trace.ReadableSpan("test") self.assertEqual(span.events, ()) events = [ trace.Event("foo1", {"bar1": "baz1"}), trace.Event("foo2", {"bar2": "baz2"}), ] span = trace.ReadableSpan("test", events=events) self.assertEqual(span.events, tuple(events)) def test_event_dropped_attributes(self): event1 = trace.Event( "foo1", BoundedAttributes(0, attributes={"bar1": "baz1"}) ) self.assertEqual(event1.dropped_attributes, 1) event2 = trace.Event("foo2", {"bar2": "baz2"}) self.assertEqual(event2.dropped_attributes, 0) def test_link_dropped_attributes(self): link1 = trace_api.Link( mock.Mock(spec=trace_api.SpanContext), BoundedAttributes(0, attributes={"bar1": "baz1"}), ) self.assertEqual(link1.dropped_attributes, 1) link2 = trace_api.Link( mock.Mock(spec=trace_api.SpanContext), {"bar2": "baz2"}, ) self.assertEqual(link2.dropped_attributes, 0) class DummyError(Exception): pass class TestSpan(unittest.TestCase): # pylint: disable=too-many-public-methods def setUp(self): self.tracer = new_tracer() def test_basic_span(self): span = trace._Span("name", mock.Mock(spec=trace_api.SpanContext)) self.assertEqual(span.name, "name") def test_attributes(self): with self.tracer.start_as_current_span("root") as root: root.set_attributes( { "http.request.method": "GET", "url.full": "https://example.com:779/path/12/?q=d#123", } ) root.set_attribute("http.response.status_code", 200) root.set_attribute("http.status_text", "OK") root.set_attribute("misc.pi", 3.14) # Setting an attribute with the same key as an existing attribute # SHOULD overwrite the existing attribute's value. root.set_attribute("attr-key", "attr-value1") root.set_attribute("attr-key", "attr-value2") root.set_attribute("empty-list", []) list_of_bools = [True, True, False] root.set_attribute("list-of-bools", list_of_bools) list_of_numerics = [123, 314, 0] root.set_attribute("list-of-numerics", list_of_numerics) self.assertEqual(len(root.attributes), 9) self.assertEqual(root.attributes["http.request.method"], "GET") self.assertEqual( root.attributes["url.full"], "https://example.com:779/path/12/?q=d#123", ) self.assertEqual(root.attributes["http.response.status_code"], 200) self.assertEqual(root.attributes["http.status_text"], "OK") self.assertEqual(root.attributes["misc.pi"], 3.14) self.assertEqual(root.attributes["attr-key"], "attr-value2") self.assertEqual(root.attributes["empty-list"], ()) self.assertEqual( root.attributes["list-of-bools"], (True, True, False) ) list_of_bools.append(False) self.assertEqual( root.attributes["list-of-bools"], (True, True, False) ) self.assertEqual( root.attributes["list-of-numerics"], (123, 314, 0) ) list_of_numerics.append(227) self.assertEqual( root.attributes["list-of-numerics"], (123, 314, 0) ) attributes = { "attr-key": "val", "attr-key2": "val2", "attr-in-both": "span-attr", } with self.tracer.start_as_current_span( "root2", attributes=attributes ) as root: self.assertEqual(len(root.attributes), 3) self.assertEqual(root.attributes["attr-key"], "val") self.assertEqual(root.attributes["attr-key2"], "val2") self.assertEqual(root.attributes["attr-in-both"], "span-attr") def test_invalid_attribute_values(self): with self.tracer.start_as_current_span("root") as root: with self.assertLogs(level=WARNING): root.set_attributes( {"correct-value": "foo", "non-primitive-data-type": {}} ) with self.assertLogs(level=WARNING): root.set_attribute("non-primitive-data-type", {}) with self.assertLogs(level=WARNING): root.set_attribute( "list-of-mixed-data-types-numeric-first", [123, False, "string"], ) with self.assertLogs(level=WARNING): root.set_attribute( "list-of-mixed-data-types-non-numeric-first", [False, 123, "string"], ) with self.assertLogs(level=WARNING): root.set_attribute( "list-with-non-primitive-data-type", [{}, 123] ) with self.assertLogs(level=WARNING): root.set_attribute("list-with-numeric-and-bool", [1, True]) with self.assertLogs(level=WARNING): root.set_attribute("", 123) with self.assertLogs(level=WARNING): root.set_attribute(None, 123) self.assertEqual(len(root.attributes), 1) self.assertEqual(root.attributes["correct-value"], "foo") def test_byte_type_attribute_value(self): with self.tracer.start_as_current_span("root") as root: with self.assertLogs(level=WARNING): root.set_attribute( "invalid-byte-type-attribute", b"\xd8\xe1\xb7\xeb\xa8\xe5 \xd2\xb7\xe1", ) self.assertFalse( "invalid-byte-type-attribute" in root.attributes ) root.set_attribute("valid-byte-type-attribute", b"valid byte") self.assertTrue( isinstance(root.attributes["valid-byte-type-attribute"], str) ) def test_sampling_attributes(self): sampling_attributes = { "sampler-attr": "sample-val", "attr-in-both": "decision-attr", } tracer_provider = trace.TracerProvider( StaticSampler(Decision.RECORD_AND_SAMPLE) ) self.tracer = tracer_provider.get_tracer(__name__) with self.tracer.start_as_current_span( name="root2", attributes=sampling_attributes ) as root: self.assertEqual(len(root.attributes), 2) self.assertEqual(root.attributes["sampler-attr"], "sample-val") self.assertEqual(root.attributes["attr-in-both"], "decision-attr") self.assertEqual( root.get_span_context().trace_flags, trace_api.TraceFlags.SAMPLED, ) def test_events(self): self.assertEqual(trace_api.get_current_span(), trace_api.INVALID_SPAN) with self.tracer.start_as_current_span("root") as root: # only event name root.add_event("event0") # event name and attributes root.add_event( "event1", {"name": "pluto", "some_bools": [True, False]} ) # event name, attributes and timestamp now = time_ns() root.add_event("event2", {"name": ["birthday"]}, now) mutable_list = ["original_contents"] root.add_event("event3", {"name": mutable_list}) self.assertEqual(len(root.events), 4) self.assertEqual(root.events[0].name, "event0") self.assertEqual(root.events[0].attributes, {}) self.assertEqual(root.events[1].name, "event1") self.assertEqual( root.events[1].attributes, {"name": "pluto", "some_bools": (True, False)}, ) self.assertEqual(root.events[2].name, "event2") self.assertEqual( root.events[2].attributes, {"name": ("birthday",)} ) self.assertEqual(root.events[2].timestamp, now) self.assertEqual(root.events[3].name, "event3") self.assertEqual( root.events[3].attributes, {"name": ("original_contents",)} ) mutable_list = ["new_contents"] self.assertEqual( root.events[3].attributes, {"name": ("original_contents",)} ) def test_events_are_immutable(self): event_properties = [ prop for prop in dir(trace.EventBase) if not prop.startswith("_") ] with self.tracer.start_as_current_span("root") as root: root.add_event("event0", {"name": ["birthday"]}) event = root.events[0] for prop in event_properties: with self.assertRaises(AttributeError): setattr(event, prop, "something") def test_event_attributes_are_immutable(self): with self.tracer.start_as_current_span("root") as root: root.add_event("event0", {"name": ["birthday"]}) event = root.events[0] with self.assertRaises(TypeError): event.attributes["name"][0] = "happy" with self.assertRaises(TypeError): event.attributes["name"] = "hello" def test_invalid_event_attributes(self): self.assertEqual(trace_api.get_current_span(), trace_api.INVALID_SPAN) with self.tracer.start_as_current_span("root") as root: with self.assertLogs(level=WARNING): root.add_event( "event0", {"attr1": True, "attr2": ["hi", False]} ) with self.assertLogs(level=WARNING): root.add_event("event0", {"attr1": {}}) with self.assertLogs(level=WARNING): root.add_event("event0", {"attr1": [[True]]}) with self.assertLogs(level=WARNING): root.add_event("event0", {"attr1": [{}], "attr2": [1, 2]}) self.assertEqual(len(root.events), 4) self.assertEqual(root.events[0].attributes, {"attr1": True}) self.assertEqual(root.events[1].attributes, {}) self.assertEqual(root.events[2].attributes, {}) self.assertEqual(root.events[3].attributes, {"attr2": (1, 2)}) def test_links(self): id_generator = RandomIdGenerator() other_context1 = trace_api.SpanContext( trace_id=id_generator.generate_trace_id(), span_id=id_generator.generate_span_id(), is_remote=False, ) other_context2 = trace_api.SpanContext( trace_id=id_generator.generate_trace_id(), span_id=id_generator.generate_span_id(), is_remote=False, ) links = ( trace_api.Link(other_context1), trace_api.Link(other_context2, {"name": "neighbor"}), ) with self.tracer.start_as_current_span("root", links=links) as root: self.assertEqual(len(root.links), 2) self.assertEqual( root.links[0].context.trace_id, other_context1.trace_id ) self.assertEqual( root.links[0].context.span_id, other_context1.span_id ) self.assertEqual(0, len(root.links[0].attributes)) self.assertEqual( root.links[1].context.trace_id, other_context2.trace_id ) self.assertEqual( root.links[1].context.span_id, other_context2.span_id ) self.assertEqual(root.links[1].attributes, {"name": "neighbor"}) with self.assertRaises(TypeError): root.links[1].attributes["name"] = "new_neighbour" def test_add_link(self): id_generator = RandomIdGenerator() other_context = trace_api.SpanContext( trace_id=id_generator.generate_trace_id(), span_id=id_generator.generate_span_id(), is_remote=False, ) with self.tracer.start_as_current_span("root") as root: root.add_link(other_context, {"name": "neighbor"}) self.assertEqual(len(root.links), 1) self.assertEqual( root.links[0].context.trace_id, other_context.trace_id ) self.assertEqual( root.links[0].context.span_id, other_context.span_id ) self.assertEqual(root.links[0].attributes, {"name": "neighbor"}) with self.assertRaises(TypeError): root.links[0].attributes["name"] = "new_neighbour" def test_add_link_with_invalid_span_context(self): other_context = trace_api.INVALID_SPAN_CONTEXT with self.tracer.start_as_current_span("root") as root: root.add_link(other_context) root.add_link(None) self.assertEqual(len(root.links), 0) with self.tracer.start_as_current_span( "root", links=[trace_api.Link(other_context), None] ) as root: self.assertEqual(len(root.links), 0) def test_add_link_with_invalid_span_context_with_attributes(self): invalid_context = trace_api.INVALID_SPAN_CONTEXT with self.tracer.start_as_current_span("root") as root: root.add_link(invalid_context) root.add_link(invalid_context, {"name": "neighbor"}) self.assertEqual(len(root.links), 1) self.assertEqual(root.links[0].attributes, {"name": "neighbor"}) with self.tracer.start_as_current_span( "root", links=[ trace_api.Link(invalid_context, {"name": "neighbor"}), trace_api.Link(invalid_context), ], ) as root: self.assertEqual(len(root.links), 1) def test_add_link_with_invalid_span_context_with_tracestate(self): invalid_context = trace.SpanContext( trace_api.INVALID_TRACE_ID, trace_api.INVALID_SPAN_ID, is_remote=False, trace_state="foo=bar", ) with self.tracer.start_as_current_span("root") as root: root.add_link(invalid_context) root.add_link(trace_api.INVALID_SPAN_CONTEXT) self.assertEqual(len(root.links), 1) self.assertEqual(root.links[0].context.trace_state, "foo=bar") with self.tracer.start_as_current_span( "root", links=[ trace_api.Link(invalid_context), trace_api.Link(trace_api.INVALID_SPAN_CONTEXT), ], ) as root: self.assertEqual(len(root.links), 1) def test_update_name(self): with self.tracer.start_as_current_span("root") as root: # name root.update_name("toor") self.assertEqual(root.name, "toor") def test_start_span(self): """Start twice, end a not started""" span = trace._Span("name", mock.Mock(spec=trace_api.SpanContext)) # end not started span self.assertRaises(RuntimeError, span.end) span.start() start_time = span.start_time with self.assertLogs(level=WARNING): span.start() self.assertEqual(start_time, span.start_time) self.assertIsNotNone(span.status) self.assertIs(span.status.status_code, trace_api.StatusCode.UNSET) # status new_status = trace_api.status.Status( trace_api.StatusCode.ERROR, "Test description" ) span.set_status(new_status) self.assertIs(span.status.status_code, trace_api.StatusCode.ERROR) self.assertIs(span.status.description, "Test description") def test_start_accepts_context(self): # pylint: disable=no-self-use span_processor = mock.Mock(spec=trace.SpanProcessor) span = trace._Span( "name", mock.Mock(spec=trace_api.SpanContext), span_processor=span_processor, ) context = Context() span.start(parent_context=context) span_processor.on_start.assert_called_once_with( span, parent_context=context ) def test_span_override_start_and_end_time(self): """Span sending custom start_time and end_time values""" span = trace._Span("name", mock.Mock(spec=trace_api.SpanContext)) start_time = 123 span.start(start_time) self.assertEqual(start_time, span.start_time) end_time = 456 span.end(end_time) self.assertEqual(end_time, span.end_time) def test_span_set_status(self): span1 = self.tracer.start_span("span1") span1.set_status(Status(status_code=StatusCode.ERROR)) self.assertEqual(span1.status.status_code, StatusCode.ERROR) self.assertEqual(span1.status.description, None) span2 = self.tracer.start_span("span2") span2.set_status( Status(status_code=StatusCode.ERROR, description="desc") ) self.assertEqual(span2.status.status_code, StatusCode.ERROR) self.assertEqual(span2.status.description, "desc") span3 = self.tracer.start_span("span3") span3.set_status(StatusCode.ERROR) self.assertEqual(span3.status.status_code, StatusCode.ERROR) self.assertEqual(span3.status.description, None) span4 = self.tracer.start_span("span4") span4.set_status(StatusCode.ERROR, "span4 desc") self.assertEqual(span4.status.status_code, StatusCode.ERROR) self.assertEqual(span4.status.description, "span4 desc") span5 = self.tracer.start_span("span5") with self.assertLogs(level=WARNING): span5.set_status( Status(status_code=StatusCode.ERROR, description="desc"), description="ignored", ) self.assertEqual(span5.status.status_code, StatusCode.ERROR) self.assertEqual(span5.status.description, "desc") def test_ended_span(self): """Events, attributes are not allowed after span is ended""" root = self.tracer.start_span("root") # everything should be empty at the beginning self.assertEqual(len(root.attributes), 0) self.assertEqual(len(root.events), 0) self.assertEqual(len(root.links), 0) # call end first time root.end() end_time0 = root.end_time # call it a second time with self.assertLogs(level=WARNING): root.end() # end time shouldn't be changed self.assertEqual(end_time0, root.end_time) with self.assertLogs(level=WARNING): root.set_attribute("http.request.method", "GET") self.assertEqual(len(root.attributes), 0) with self.assertLogs(level=WARNING): root.add_event("event1") self.assertEqual(len(root.events), 0) with self.assertLogs(level=WARNING): root.update_name("xxx") self.assertEqual(root.name, "root") new_status = trace_api.status.Status( trace_api.StatusCode.ERROR, "Test description" ) with self.assertLogs(level=WARNING): root.set_status(new_status) self.assertEqual(root.status.status_code, trace_api.StatusCode.UNSET) def test_error_status(self): def error_status_test(context): with self.assertRaises(AssertionError): with context as root: raise AssertionError("unknown") self.assertIs(root.status.status_code, StatusCode.ERROR) self.assertEqual( root.status.description, "AssertionError: unknown" ) error_status_test( trace.TracerProvider().get_tracer(__name__).start_span("root") ) error_status_test( trace.TracerProvider() .get_tracer(__name__) .start_as_current_span("root") ) def test_status_cannot_override_ok(self): def error_status_test(context): with self.assertRaises(AssertionError): with context as root: root.set_status(trace_api.status.Status(StatusCode.OK)) raise AssertionError("unknown") self.assertIs(root.status.status_code, StatusCode.OK) self.assertIsNone(root.status.description) error_status_test( trace.TracerProvider().get_tracer(__name__).start_span("root") ) error_status_test( trace.TracerProvider() .get_tracer(__name__) .start_as_current_span("root") ) def test_status_cannot_set_unset(self): def unset_status_test(context): with self.assertRaises(AssertionError): with context as root: raise AssertionError("unknown") root.set_status(trace_api.status.Status(StatusCode.UNSET)) self.assertIs(root.status.status_code, StatusCode.ERROR) self.assertEqual( root.status.description, "AssertionError: unknown" ) with self.assertLogs(level=WARNING): unset_status_test( trace.TracerProvider().get_tracer(__name__).start_span("root") ) with self.assertLogs(level=WARNING): unset_status_test( trace.TracerProvider() .get_tracer(__name__) .start_as_current_span("root") ) def test_last_status_wins(self): def error_status_test(context): with self.assertRaises(AssertionError): with context as root: raise AssertionError("unknown") root.set_status(trace_api.status.Status(StatusCode.OK)) self.assertIs(root.status.status_code, StatusCode.OK) self.assertIsNone(root.status.description) error_status_test( trace.TracerProvider().get_tracer(__name__).start_span("root") ) error_status_test( trace.TracerProvider() .get_tracer(__name__) .start_as_current_span("root") ) def test_record_exception_fqn(self): span = trace._Span("name", mock.Mock(spec=trace_api.SpanContext)) exception = DummyError("error") exception_type = "tests.trace.test_trace.DummyError" span.record_exception(exception) exception_event = span.events[0] self.assertEqual("exception", exception_event.name) self.assertEqual( "error", exception_event.attributes["exception.message"] ) self.assertEqual( exception_type, exception_event.attributes["exception.type"], ) self.assertIn( "DummyError: error", exception_event.attributes["exception.stacktrace"], ) def test_record_exception(self): span = trace._Span("name", mock.Mock(spec=trace_api.SpanContext)) try: raise ValueError("invalid") except ValueError as err: span.record_exception(err) exception_event = span.events[0] self.assertEqual("exception", exception_event.name) self.assertEqual( "invalid", exception_event.attributes["exception.message"] ) self.assertEqual( "ValueError", exception_event.attributes["exception.type"] ) self.assertIn( "ValueError: invalid", exception_event.attributes["exception.stacktrace"], ) def test_record_exception_with_attributes(self): span = trace._Span("name", mock.Mock(spec=trace_api.SpanContext)) try: raise RuntimeError("error") except RuntimeError as err: attributes = {"has_additional_attributes": True} span.record_exception(err, attributes) exception_event = span.events[0] self.assertEqual("exception", exception_event.name) self.assertEqual( "error", exception_event.attributes["exception.message"] ) self.assertEqual( "RuntimeError", exception_event.attributes["exception.type"] ) self.assertEqual( "False", exception_event.attributes["exception.escaped"] ) self.assertIn( "RuntimeError: error", exception_event.attributes["exception.stacktrace"], ) self.assertIn("has_additional_attributes", exception_event.attributes) self.assertEqual( True, exception_event.attributes["has_additional_attributes"] ) def test_record_exception_escaped(self): span = trace._Span("name", mock.Mock(spec=trace_api.SpanContext)) try: raise RuntimeError("error") except RuntimeError as err: span.record_exception(exception=err, escaped=True) exception_event = span.events[0] self.assertEqual("exception", exception_event.name) self.assertEqual( "error", exception_event.attributes["exception.message"] ) self.assertEqual( "RuntimeError", exception_event.attributes["exception.type"] ) self.assertIn( "RuntimeError: error", exception_event.attributes["exception.stacktrace"], ) self.assertEqual( "True", exception_event.attributes["exception.escaped"] ) def test_record_exception_with_timestamp(self): span = trace._Span("name", mock.Mock(spec=trace_api.SpanContext)) try: raise RuntimeError("error") except RuntimeError as err: timestamp = 1604238587112021089 span.record_exception(err, timestamp=timestamp) exception_event = span.events[0] self.assertEqual("exception", exception_event.name) self.assertEqual( "error", exception_event.attributes["exception.message"] ) self.assertEqual( "RuntimeError", exception_event.attributes["exception.type"] ) self.assertIn( "RuntimeError: error", exception_event.attributes["exception.stacktrace"], ) self.assertEqual(1604238587112021089, exception_event.timestamp) def test_record_exception_with_attributes_and_timestamp(self): span = trace._Span("name", mock.Mock(spec=trace_api.SpanContext)) try: raise RuntimeError("error") except RuntimeError as err: attributes = {"has_additional_attributes": True} timestamp = 1604238587112021089 span.record_exception(err, attributes, timestamp) exception_event = span.events[0] self.assertEqual("exception", exception_event.name) self.assertEqual( "error", exception_event.attributes["exception.message"] ) self.assertEqual( "RuntimeError", exception_event.attributes["exception.type"] ) self.assertIn( "RuntimeError: error", exception_event.attributes["exception.stacktrace"], ) self.assertIn("has_additional_attributes", exception_event.attributes) self.assertEqual( True, exception_event.attributes["has_additional_attributes"] ) self.assertEqual(1604238587112021089, exception_event.timestamp) def test_record_exception_context_manager(self): span = None try: with self.tracer.start_as_current_span("span") as span: raise RuntimeError("example error") except RuntimeError: pass finally: self.assertEqual(len(span.events), 1) event = span.events[0] self.assertEqual("exception", event.name) self.assertEqual( "RuntimeError", event.attributes["exception.type"] ) self.assertEqual( "example error", event.attributes["exception.message"] ) stacktrace = """in test_record_exception_context_manager raise RuntimeError("example error") RuntimeError: example error""" self.assertIn(stacktrace, event.attributes["exception.stacktrace"]) try: with self.tracer.start_as_current_span( "span", record_exception=False ) as span: raise RuntimeError("example error") except RuntimeError: pass finally: self.assertEqual(len(span.events), 0) def test_record_exception_out_of_scope(self): span = trace._Span("name", mock.Mock(spec=trace_api.SpanContext)) out_of_scope_exception = ValueError("invalid") span.record_exception(out_of_scope_exception) exception_event = span.events[0] self.assertEqual("exception", exception_event.name) self.assertEqual( "invalid", exception_event.attributes["exception.message"] ) self.assertEqual( "ValueError", exception_event.attributes["exception.type"] ) self.assertIn( "ValueError: invalid", exception_event.attributes["exception.stacktrace"], ) def span_event_start_fmt(span_processor_name, span_name): return span_processor_name + ":" + span_name + ":start" def span_event_end_fmt(span_processor_name, span_name): return span_processor_name + ":" + span_name + ":end" class MySpanProcessor(trace.SpanProcessor): def __init__(self, name, span_list): self.name = name self.span_list = span_list def on_start( self, span: "trace.Span", parent_context: Optional[Context] = None ) -> None: self.span_list.append(span_event_start_fmt(self.name, span.name)) def on_end(self, span: "trace.ReadableSpan") -> None: self.span_list.append(span_event_end_fmt(self.name, span.name)) class TestSpanProcessor(unittest.TestCase): def test_span_processor(self): tracer_provider = trace.TracerProvider() tracer = tracer_provider.get_tracer(__name__) spans_calls_list = [] # filled by MySpanProcessor expected_list = [] # filled by hand # Span processors are created but not added to the tracer yet sp1 = MySpanProcessor("SP1", spans_calls_list) sp2 = MySpanProcessor("SP2", spans_calls_list) with tracer.start_as_current_span("foo"): with tracer.start_as_current_span("bar"): with tracer.start_as_current_span("baz"): pass # at this point lists must be empty self.assertEqual(len(spans_calls_list), 0) # add single span processor tracer_provider.add_span_processor(sp1) with tracer.start_as_current_span("foo"): expected_list.append(span_event_start_fmt("SP1", "foo")) with tracer.start_as_current_span("bar"): expected_list.append(span_event_start_fmt("SP1", "bar")) with tracer.start_as_current_span("baz"): expected_list.append(span_event_start_fmt("SP1", "baz")) expected_list.append(span_event_end_fmt("SP1", "baz")) expected_list.append(span_event_end_fmt("SP1", "bar")) expected_list.append(span_event_end_fmt("SP1", "foo")) self.assertListEqual(spans_calls_list, expected_list) spans_calls_list.clear() expected_list.clear() # go for multiple span processors tracer_provider.add_span_processor(sp2) with tracer.start_as_current_span("foo"): expected_list.append(span_event_start_fmt("SP1", "foo")) expected_list.append(span_event_start_fmt("SP2", "foo")) with tracer.start_as_current_span("bar"): expected_list.append(span_event_start_fmt("SP1", "bar")) expected_list.append(span_event_start_fmt("SP2", "bar")) with tracer.start_as_current_span("baz"): expected_list.append(span_event_start_fmt("SP1", "baz")) expected_list.append(span_event_start_fmt("SP2", "baz")) expected_list.append(span_event_end_fmt("SP1", "baz")) expected_list.append(span_event_end_fmt("SP2", "baz")) expected_list.append(span_event_end_fmt("SP1", "bar")) expected_list.append(span_event_end_fmt("SP2", "bar")) expected_list.append(span_event_end_fmt("SP1", "foo")) expected_list.append(span_event_end_fmt("SP2", "foo")) # compare if two lists are the same self.assertListEqual(spans_calls_list, expected_list) def test_add_span_processor_after_span_creation(self): tracer_provider = trace.TracerProvider() tracer = tracer_provider.get_tracer(__name__) spans_calls_list = [] # filled by MySpanProcessor expected_list = [] # filled by hand # Span processors are created but not added to the tracer yet sp = MySpanProcessor("SP1", spans_calls_list) with tracer.start_as_current_span("foo"): with tracer.start_as_current_span("bar"): with tracer.start_as_current_span("baz"): # add span processor after spans have been created tracer_provider.add_span_processor(sp) expected_list.append(span_event_end_fmt("SP1", "baz")) expected_list.append(span_event_end_fmt("SP1", "bar")) expected_list.append(span_event_end_fmt("SP1", "foo")) self.assertListEqual(spans_calls_list, expected_list) def test_to_json(self): context = trace_api.SpanContext( trace_id=0x000000000000000000000000DEADBEEF, span_id=0x00000000DEADBEF0, is_remote=False, trace_flags=trace_api.TraceFlags(trace_api.TraceFlags.SAMPLED), ) parent = trace._Span("parent-name", context, resource=Resource({})) span = trace._Span( "span-name", context, resource=Resource({}), parent=parent.context ) self.assertEqual( span.to_json(), """{ "name": "span-name", "context": { "trace_id": "0x000000000000000000000000deadbeef", "span_id": "0x00000000deadbef0", "trace_state": "[]" }, "kind": "SpanKind.INTERNAL", "parent_id": "0x00000000deadbef0", "start_time": null, "end_time": null, "status": { "status_code": "UNSET" }, "attributes": {}, "events": [], "links": [], "resource": { "attributes": {}, "schema_url": "" } }""", ) self.assertEqual( span.to_json(indent=None), '{"name": "span-name", "context": {"trace_id": "0x000000000000000000000000deadbeef", "span_id": "0x00000000deadbef0", "trace_state": "[]"}, "kind": "SpanKind.INTERNAL", "parent_id": "0x00000000deadbef0", "start_time": null, "end_time": null, "status": {"status_code": "UNSET"}, "attributes": {}, "events": [], "links": [], "resource": {"attributes": {}, "schema_url": ""}}', ) def test_attributes_to_json(self): context = trace_api.SpanContext( trace_id=0x000000000000000000000000DEADBEEF, span_id=0x00000000DEADBEF0, is_remote=False, trace_flags=trace_api.TraceFlags(trace_api.TraceFlags.SAMPLED), ) span = trace._Span("span-name", context, resource=Resource({})) span.set_attribute("key", "value") span.add_event("event", {"key2": "value2"}, 123) date_str = ns_to_iso_str(123) self.assertEqual( span.to_json(indent=None), '{"name": "span-name", "context": {"trace_id": "0x000000000000000000000000deadbeef", "span_id": "0x00000000deadbef0", "trace_state": "[]"}, "kind": "SpanKind.INTERNAL", "parent_id": null, "start_time": null, "end_time": null, "status": {"status_code": "UNSET"}, "attributes": {"key": "value"}, "events": [{"name": "event", "timestamp": "' + date_str + '", "attributes": {"key2": "value2"}}], "links": [], "resource": {"attributes": {}, "schema_url": ""}}', ) class TestSpanLimits(unittest.TestCase): # pylint: disable=protected-access long_val = "v" * 1000 def _assert_attr_length(self, attr_val, max_len): if isinstance(attr_val, str): expected = self.long_val if max_len is not None: expected = expected[:max_len] self.assertEqual(attr_val, expected) def test_limits_defaults(self): limits = trace.SpanLimits() self.assertEqual( limits.max_attributes, trace._DEFAULT_OTEL_ATTRIBUTE_COUNT_LIMIT, ) self.assertEqual( limits.max_span_attributes, trace._DEFAULT_OTEL_SPAN_ATTRIBUTE_COUNT_LIMIT, ) self.assertEqual( limits.max_event_attributes, trace._DEFAULT_OTEL_EVENT_ATTRIBUTE_COUNT_LIMIT, ) self.assertEqual( limits.max_link_attributes, trace._DEFAULT_OTEL_LINK_ATTRIBUTE_COUNT_LIMIT, ) self.assertEqual( limits.max_events, trace._DEFAULT_OTEL_SPAN_EVENT_COUNT_LIMIT ) self.assertEqual( limits.max_links, trace._DEFAULT_OTEL_SPAN_LINK_COUNT_LIMIT ) self.assertIsNone(limits.max_attribute_length) self.assertIsNone(limits.max_span_attribute_length) def test_limits_attribute_length_limits_code(self): # global limit unset while span limit is set limits = trace.SpanLimits(max_span_attribute_length=22) self.assertIsNone(limits.max_attribute_length) self.assertEqual(limits.max_span_attribute_length, 22) # span limit falls back to global limit when no value is provided limits = trace.SpanLimits(max_attribute_length=22) self.assertEqual(limits.max_attribute_length, 22) self.assertEqual(limits.max_span_attribute_length, 22) # global and span limits set to different values limits = trace.SpanLimits( max_attribute_length=22, max_span_attribute_length=33 ) self.assertEqual(limits.max_attribute_length, 22) self.assertEqual(limits.max_span_attribute_length, 33) def test_limits_values_code(self): ( max_attributes, max_span_attributes, max_link_attributes, max_event_attributes, max_events, max_links, max_attr_length, max_span_attr_length, ) = ( randint(0, 10000), randint(0, 10000), randint(0, 10000), randint(0, 10000), randint(0, 10000), randint(0, 10000), randint(0, 10000), randint(0, 10000), ) limits = trace.SpanLimits( max_events=max_events, max_links=max_links, max_attributes=max_attributes, max_span_attributes=max_span_attributes, max_event_attributes=max_event_attributes, max_link_attributes=max_link_attributes, max_attribute_length=max_attr_length, max_span_attribute_length=max_span_attr_length, ) self.assertEqual(limits.max_events, max_events) self.assertEqual(limits.max_links, max_links) self.assertEqual(limits.max_attributes, max_attributes) self.assertEqual(limits.max_span_attributes, max_span_attributes) self.assertEqual(limits.max_event_attributes, max_event_attributes) self.assertEqual(limits.max_link_attributes, max_link_attributes) self.assertEqual(limits.max_attribute_length, max_attr_length) self.assertEqual( limits.max_span_attribute_length, max_span_attr_length ) def test_limits_values_env(self): ( max_attributes, max_span_attributes, max_link_attributes, max_event_attributes, max_events, max_links, max_attr_length, max_span_attr_length, ) = ( randint(0, 10000), randint(0, 10000), randint(0, 10000), randint(0, 10000), randint(0, 10000), randint(0, 10000), randint(0, 10000), randint(0, 10000), ) with mock.patch.dict( "os.environ", { OTEL_ATTRIBUTE_COUNT_LIMIT: str(max_attributes), OTEL_SPAN_ATTRIBUTE_COUNT_LIMIT: str(max_span_attributes), OTEL_EVENT_ATTRIBUTE_COUNT_LIMIT: str(max_event_attributes), OTEL_LINK_ATTRIBUTE_COUNT_LIMIT: str(max_link_attributes), OTEL_SPAN_EVENT_COUNT_LIMIT: str(max_events), OTEL_SPAN_LINK_COUNT_LIMIT: str(max_links), OTEL_ATTRIBUTE_VALUE_LENGTH_LIMIT: str(max_attr_length), OTEL_SPAN_ATTRIBUTE_VALUE_LENGTH_LIMIT: str( max_span_attr_length ), }, ): limits = trace.SpanLimits() self.assertEqual(limits.max_events, max_events) self.assertEqual(limits.max_links, max_links) self.assertEqual(limits.max_attributes, max_attributes) self.assertEqual(limits.max_span_attributes, max_span_attributes) self.assertEqual(limits.max_event_attributes, max_event_attributes) self.assertEqual(limits.max_link_attributes, max_link_attributes) self.assertEqual(limits.max_attribute_length, max_attr_length) self.assertEqual( limits.max_span_attribute_length, max_span_attr_length ) @mock.patch.dict( "os.environ", { OTEL_SPAN_ATTRIBUTE_COUNT_LIMIT: "13", OTEL_SPAN_EVENT_COUNT_LIMIT: "7", OTEL_SPAN_LINK_COUNT_LIMIT: "4", OTEL_ATTRIBUTE_VALUE_LENGTH_LIMIT: "11", OTEL_SPAN_ATTRIBUTE_VALUE_LENGTH_LIMIT: "15", }, ) def test_span_limits_env(self): self._test_span_limits( new_tracer(), max_attrs=13, max_events=7, max_links=4, max_attr_len=11, max_span_attr_len=15, ) @mock.patch.dict( "os.environ", { OTEL_ATTRIBUTE_COUNT_LIMIT: "13", OTEL_SPAN_EVENT_COUNT_LIMIT: "7", OTEL_SPAN_LINK_COUNT_LIMIT: "4", OTEL_ATTRIBUTE_VALUE_LENGTH_LIMIT: "11", }, ) def test_span_limits_global_env(self): self._test_span_limits( new_tracer(), max_attrs=13, max_events=7, max_links=4, max_attr_len=11, max_span_attr_len=11, ) @mock.patch.dict( "os.environ", { OTEL_SPAN_ATTRIBUTE_COUNT_LIMIT: "10", OTEL_SPAN_EVENT_COUNT_LIMIT: "20", OTEL_SPAN_LINK_COUNT_LIMIT: "30", OTEL_ATTRIBUTE_VALUE_LENGTH_LIMIT: "40", OTEL_SPAN_ATTRIBUTE_VALUE_LENGTH_LIMIT: "50", }, ) def test_span_limits_default_to_env(self): self._test_span_limits( new_tracer( span_limits=trace.SpanLimits( max_attributes=None, max_events=None, max_links=None, max_attribute_length=None, max_span_attribute_length=None, ) ), max_attrs=10, max_events=20, max_links=30, max_attr_len=40, max_span_attr_len=50, ) def test_span_limits_code(self): self._test_span_limits( new_tracer( span_limits=trace.SpanLimits( max_attributes=11, max_events=15, max_links=13, max_attribute_length=9, max_span_attribute_length=25, ) ), max_attrs=11, max_events=15, max_links=13, max_attr_len=9, max_span_attr_len=25, ) @mock.patch.dict( "os.environ", { OTEL_SPAN_ATTRIBUTE_COUNT_LIMIT: "", OTEL_SPAN_EVENT_COUNT_LIMIT: "", OTEL_SPAN_LINK_COUNT_LIMIT: "", OTEL_SPAN_ATTRIBUTE_VALUE_LENGTH_LIMIT: "", }, ) def test_span_no_limits_env(self): self._test_span_no_limits(new_tracer()) def test_span_no_limits_code(self): self._test_span_no_limits( new_tracer( span_limits=trace.SpanLimits( max_span_attributes=trace.SpanLimits.UNSET, max_links=trace.SpanLimits.UNSET, max_events=trace.SpanLimits.UNSET, max_attribute_length=trace.SpanLimits.UNSET, ) ) ) def test_span_zero_global_limit(self): self._test_span_limits( new_tracer( span_limits=trace.SpanLimits( max_attributes=0, max_events=0, max_links=0, ) ), 0, 0, 0, 0, 0, ) def test_span_zero_global_nonzero_model(self): self._test_span_limits( new_tracer( span_limits=trace.SpanLimits( max_attributes=0, max_events=0, max_links=0, max_span_attributes=15, max_span_attribute_length=25, ) ), 15, 0, 0, 0, 25, ) def test_span_zero_global_unset_model(self): self._test_span_no_limits( new_tracer( span_limits=trace.SpanLimits( max_attributes=0, max_span_attributes=trace.SpanLimits.UNSET, max_links=trace.SpanLimits.UNSET, max_events=trace.SpanLimits.UNSET, max_attribute_length=trace.SpanLimits.UNSET, ) ) ) def test_dropped_attributes(self): span = get_span_with_dropped_attributes_events_links() self.assertEqual(1, span.dropped_links) self.assertEqual(2, span.dropped_attributes) self.assertEqual(3, span.dropped_events) self.assertEqual(2, span.events[0].dropped_attributes) self.assertEqual(2, span.links[0].dropped_attributes) def _test_span_limits( self, tracer, max_attrs, max_events, max_links, max_attr_len, max_span_attr_len, ): id_generator = RandomIdGenerator() some_links = [ trace_api.Link( trace_api.SpanContext( trace_id=id_generator.generate_trace_id(), span_id=id_generator.generate_span_id(), is_remote=False, ), attributes={"k": self.long_val}, ) for _ in range(100) ] some_attrs = { f"init_attribute_{idx}": self.long_val for idx in range(100) } with tracer.start_as_current_span( "root", links=some_links, attributes=some_attrs ) as root: self.assertEqual(len(root.links), max_links) self.assertEqual(len(root.attributes), max_attrs) for idx in range(100): root.set_attribute(f"my_str_attribute_{idx}", self.long_val) root.set_attribute( f"my_byte_attribute_{idx}", self.long_val.encode() ) root.set_attribute( f"my_int_attribute_{idx}", self.long_val.encode() ) root.add_event( f"my_event_{idx}", attributes={"k": self.long_val} ) self.assertEqual(len(root.attributes), max_attrs) self.assertEqual(len(root.events), max_events) for link in root.links: for attr_val in link.attributes.values(): self._assert_attr_length(attr_val, max_attr_len) for event in root.events: for attr_val in event.attributes.values(): self._assert_attr_length(attr_val, max_attr_len) for attr_val in root.attributes.values(): self._assert_attr_length(attr_val, max_span_attr_len) def _test_span_no_limits(self, tracer): num_links = int(trace._DEFAULT_OTEL_SPAN_LINK_COUNT_LIMIT) + randint( 1, 100 ) id_generator = RandomIdGenerator() some_links = [ trace_api.Link( trace_api.SpanContext( trace_id=id_generator.generate_trace_id(), span_id=id_generator.generate_span_id(), is_remote=False, ) ) for _ in range(num_links) ] with tracer.start_as_current_span("root", links=some_links) as root: self.assertEqual(len(root.links), num_links) num_events = int(trace._DEFAULT_OTEL_SPAN_EVENT_COUNT_LIMIT) + randint( 1, 100 ) with tracer.start_as_current_span("root") as root: for idx in range(num_events): root.add_event( f"my_event_{idx}", attributes={"k": self.long_val} ) self.assertEqual(len(root.events), num_events) num_attributes = int( trace._DEFAULT_OTEL_SPAN_ATTRIBUTE_COUNT_LIMIT ) + randint(1, 100) with tracer.start_as_current_span("root") as root: for idx in range(num_attributes): root.set_attribute(f"my_attribute_{idx}", self.long_val) self.assertEqual(len(root.attributes), num_attributes) for attr_val in root.attributes.values(): self.assertEqual(attr_val, self.long_val) def test_invalid_env_vars_raise(self): env_vars = [ OTEL_SPAN_EVENT_COUNT_LIMIT, OTEL_SPAN_LINK_COUNT_LIMIT, OTEL_ATTRIBUTE_COUNT_LIMIT, OTEL_SPAN_ATTRIBUTE_COUNT_LIMIT, OTEL_EVENT_ATTRIBUTE_COUNT_LIMIT, OTEL_LINK_ATTRIBUTE_COUNT_LIMIT, OTEL_ATTRIBUTE_VALUE_LENGTH_LIMIT, ] bad_values = ["bad", "-1"] test_cases = { env_var: bad_value for env_var in env_vars for bad_value in bad_values } for env_var, bad_value in test_cases.items(): with self.subTest(f"Testing {env_var}={bad_value}"): with self.assertRaises(ValueError) as error, patch.dict( "os.environ", {env_var: bad_value}, clear=True ): trace.SpanLimits() expected_msg = f"{env_var} must be a non-negative integer but got {bad_value}" self.assertEqual( expected_msg, str(error.exception), f"Unexpected error message for {env_var}={bad_value}", ) class TestTraceFlags(unittest.TestCase): def test_constant_default(self): self.assertEqual(trace_api.TraceFlags.DEFAULT, 0) def test_constant_sampled(self): self.assertEqual(trace_api.TraceFlags.SAMPLED, 1) def test_get_default(self): self.assertEqual( trace_api.TraceFlags.get_default(), trace_api.TraceFlags.DEFAULT ) def test_sampled_true(self): self.assertTrue(trace_api.TraceFlags(0xF1).sampled) def test_sampled_false(self): self.assertFalse(trace_api.TraceFlags(0xF0).sampled) def test_constant_default_trace_options(self): self.assertEqual( trace_api.DEFAULT_TRACE_OPTIONS, trace_api.TraceFlags.DEFAULT ) class TestParentChildSpanException(unittest.TestCase): def test_parent_child_span_exception(self): """ Tests that a parent span has its status set to ERROR when a child span raises an exception even when the child span has its ``record_exception`` and ``set_status_on_exception`` attributes set to ``False``. """ set_tracer_provider(TracerProvider()) tracer = get_tracer(__name__) exception = Exception("exception") exception_type = exception.__class__.__name__ exception_message = exception.args[0] try: with tracer.start_as_current_span( "parent", ) as parent_span: with tracer.start_as_current_span( "child", record_exception=False, set_status_on_exception=False, ) as child_span: raise exception except Exception: # pylint: disable=broad-exception-caught pass self.assertTrue(child_span.status.is_ok) self.assertIsNone(child_span.status.description) self.assertTupleEqual(child_span.events, ()) self.assertFalse(parent_span.status.is_ok) self.assertEqual( parent_span.status.description, f"{exception_type}: {exception_message}", ) self.assertEqual( parent_span.events[0].attributes["exception.type"], exception_type ) self.assertEqual( parent_span.events[0].attributes["exception.message"], exception_message, ) def test_child_parent_span_exception(self): """ Tests that a child span does not have its status set to ERROR when a parent span raises an exception and the parent span has its ``record_exception`` and ``set_status_on_exception`` attributes set to ``False``. """ set_tracer_provider(TracerProvider()) tracer = get_tracer(__name__) exception = Exception("exception") try: with tracer.start_as_current_span( "parent", record_exception=False, set_status_on_exception=False, ) as parent_span: with tracer.start_as_current_span( "child", ) as child_span: pass raise exception except Exception: # pylint: disable=broad-exception-caught pass self.assertTrue(child_span.status.is_ok) self.assertIsNone(child_span.status.description) self.assertTupleEqual(child_span.events, ()) self.assertTrue(parent_span.status.is_ok) self.assertIsNone(parent_span.status.description) self.assertTupleEqual(parent_span.events, ()) # pylint: disable=protected-access class TestTracerProvider(unittest.TestCase): @patch("opentelemetry.sdk.trace.sampling._get_from_env_or_default") @patch.object(Resource, "create") def test_tracer_provider_init_default(self, resource_patch, sample_patch): tracer_provider = trace.TracerProvider() self.assertTrue( isinstance(tracer_provider.id_generator, RandomIdGenerator) ) resource_patch.assert_called_once() self.assertIsNotNone(tracer_provider._resource) sample_patch.assert_called_once() self.assertIsNotNone(tracer_provider._span_limits) self.assertIsNotNone(tracer_provider._atexit_handler) class TestRandomIdGenerator(unittest.TestCase): _TRACE_ID_MAX_VALUE = 2**128 - 1 _SPAN_ID_MAX_VALUE = 2**64 - 1 @patch( "random.getrandbits", side_effect=[trace_api.INVALID_SPAN_ID, 0x00000000DEADBEF0], ) def test_generate_span_id_avoids_invalid(self, mock_getrandbits): generator = RandomIdGenerator() span_id = generator.generate_span_id() self.assertNotEqual(span_id, trace_api.INVALID_SPAN_ID) mock_getrandbits.assert_any_call(64) self.assertEqual(mock_getrandbits.call_count, 2) @patch( "random.getrandbits", side_effect=[ trace_api.INVALID_TRACE_ID, 0x000000000000000000000000DEADBEEF, ], ) def test_generate_trace_id_avoids_invalid(self, mock_getrandbits): generator = RandomIdGenerator() trace_id = generator.generate_trace_id() self.assertNotEqual(trace_id, trace_api.INVALID_TRACE_ID) mock_getrandbits.assert_any_call(128) self.assertEqual(mock_getrandbits.call_count, 2) python-opentelemetry-1.39.1/opentelemetry-semantic-conventions/000077500000000000000000000000001511654350100250615ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-semantic-conventions/.pylintrc000066400000000000000000000364251511654350100267400ustar00rootroot00000000000000[MASTER] # A comma-separated list of package or module names from where C extensions may # be loaded. Extensions are loading into the active Python interpreter and may # run arbitrary code. extension-pkg-whitelist= # Add list of files or directories to be excluded. They should be base names, not # paths. ignore=CVS,gen,proto # Add files or directories matching the regex patterns to be excluded. The # regex matches against base names, not paths. ignore-patterns= # Python code to execute, usually for sys.path manipulation such as # pygtk.require(). #init-hook= # Use multiple processes to speed up Pylint. Specifying 0 will auto-detect the # number of processors available to use. jobs=0 # Control the amount of potential inferred values when inferring a single # object. This can help the performance when dealing with large functions or # complex, nested conditions. limit-inference-results=100 # List of plugins (as comma separated values of python modules names) to load, # usually to register additional checkers. load-plugins=pylint.extensions.no_self_use # Pickle collected data for later comparisons. persistent=yes # Specify a configuration file. #rcfile= # When enabled, pylint would attempt to guess common misconfiguration and emit # user-friendly hints instead of false-positive error messages. suggestion-mode=yes # Allow loading of arbitrary C extensions. Extensions are imported into the # active Python interpreter and may run arbitrary code. unsafe-load-any-extension=no # Run python dependant checks considering the baseline version py-version=3.9 [MESSAGES CONTROL] # Only show warnings with the listed confidence levels. Leave empty to show # all. Valid levels: HIGH, INFERENCE, INFERENCE_FAILURE, UNDEFINED. confidence= # Disable the message, report, category or checker with the given id(s). You # can either give multiple identifiers separated by comma (,) or put this # option multiple times (only on the command line, not in the configuration # file where it should appear only once). You can also use "--disable=all" to # disable everything first and then reenable specific checks. For example, if # you want to run only the similarities checker, you can use "--disable=all # --enable=similarities". If you want to run only the classes checker, but have # no Warning level messages displayed, use "--disable=all --enable=classes # --disable=W". disable=missing-docstring, fixme, # Warns about FIXME, TODO, etc. comments. too-few-public-methods, # Might be good to re-enable this later. too-many-instance-attributes, too-many-arguments, too-many-positional-arguments, duplicate-code, ungrouped-imports, # Leave this up to isort wrong-import-order, # Leave this up to isort line-too-long, # Leave this up to black exec-used, super-with-arguments, # temp-pylint-upgrade isinstance-second-argument-not-valid-type, # temp-pylint-upgrade raise-missing-from, # temp-pylint-upgrade unused-argument, # temp-pylint-upgrade redefined-builtin, cyclic-import, too-many-lines, # Enable the message, report, category or checker with the given id(s). You can # either give multiple identifier separated by comma (,) or put this option # multiple time (only on the command line, not in the configuration file where # it should appear only once). See also the "--disable" option for examples. # enable=c-extension-no-member [REPORTS] # Python expression which should return a note less than 10 (10 is the highest # note). You have access to the variables errors warning, statement which # respectively contain the number of errors / warnings messages and the total # number of statements analyzed. This is used by the global evaluation report # (RP0004). #evaluation=10.0 - ((float(5 * error + warning + refactor + convention) / statement) * 10) # Template used to display messages. This is a python new-style format string # used to format the message information. See doc for all details. #msg-template= # Set the output format. Available formats are text, parseable, colorized, json # and msvs (visual studio). You can also give a reporter class, e.g. # mypackage.mymodule.MyReporterClass. #output-format=text # Tells whether to display a full report or only the messages. #reports=no # Activate the evaluation score. score=yes [REFACTORING] # Maximum number of nested blocks for function / method body max-nested-blocks=5 # Complete name of functions that never returns. When checking for # inconsistent-return-statements if a never returning function is called then # it will be considered as an explicit return statement and no message will be # printed. never-returning-functions=sys.exit [LOGGING] # Format style used to check logging format string. `old` means using % # formatting, while `new` is for `{}` formatting. logging-format-style=old # Logging modules to check that the string format arguments are in logging # function parameter format. logging-modules=logging [SPELLING] # Limits count of emitted suggestions for spelling mistakes. max-spelling-suggestions=4 # Spelling dictionary name. Available dictionaries: none. To make it working # install python-enchant package.. spelling-dict= # List of comma separated words that should not be checked. spelling-ignore-words= # A path to a file that contains private dictionary; one word per line. spelling-private-dict-file= # Tells whether to store unknown words to indicated private dictionary in # --spelling-private-dict-file option instead of raising a message. spelling-store-unknown-words=no [MISCELLANEOUS] # List of note tags to take in consideration, separated by a comma. notes=FIXME, XXX, TODO [TYPECHECK] # List of decorators that produce context managers, such as # contextlib.contextmanager. Add to this list to register other decorators that # produce valid context managers. contextmanager-decorators=contextlib.contextmanager, _agnosticcontextmanager # List of members which are set dynamically and missed by pylint inference # system, and so shouldn't trigger E1101 when accessed. Python regular # expressions are accepted. generated-members=zipkin_pb2.* # Tells whether missing members accessed in mixin class should be ignored. A # mixin class is detected if its name ends with "mixin" (case insensitive). #ignore-mixin-members=yes # Tells whether to warn about missing members when the owner of the attribute # is inferred to be None. #ignore-none=yes # This flag controls whether pylint should warn about no-member and similar # checks whenever an opaque object is returned when inferring. The inference # can return multiple potential results while evaluating a Python object, but # some branches might not be evaluated, which results in partial inference. In # that case, it might be useful to still emit no-member and other checks for # the rest of the inferred objects. #ignore-on-opaque-inference=yes # List of class names for which member attributes should not be checked (useful # for classes with dynamically set attributes). This supports the use of # qualified names. ignored-classes=optparse.Values,thread._local,_thread._local # List of module names for which member attributes should not be checked # (useful for modules/projects where namespaces are manipulated during runtime # and thus existing member attributes cannot be deduced by static analysis. It # supports qualified module names, as well as Unix pattern matching. ignored-modules= # Show a hint with possible names when a member name was not found. The aspect # of finding the hint is based on edit distance. missing-member-hint=yes # The minimum edit distance a name should have in order to be considered a # similar match for a missing member name. missing-member-hint-distance=1 # The total number of similar names that should be taken in consideration when # showing a hint for a missing member. missing-member-max-choices=1 [VARIABLES] # List of additional names supposed to be defined in builtins. Remember that # you should avoid defining new builtins when possible. additional-builtins= # Tells whether unused global variables should be treated as a violation. allow-global-unused-variables=yes # List of strings which can identify a callback function by name. A callback # name must start or end with one of those strings. callbacks=cb_, _cb # A regular expression matching the name of dummy variables (i.e. expected to # not be used). dummy-variables-rgx=_+$|(_[a-zA-Z0-9_]*[a-zA-Z0-9]+?$)|dummy|^ignored_|^unused_ # Argument names that match this expression will be ignored. Default to name # with leading underscore. ignored-argument-names=_.*|^ignored_|^unused_|^kwargs|^args # Tells whether we should check for unused import in __init__ files. init-import=no # List of qualified module names which can have objects that can redefine # builtins. redefining-builtins-modules=six.moves,past.builtins,future.builtins,builtins,io [FORMAT] # Expected format of line ending, e.g. empty (any line ending), LF or CRLF. expected-line-ending-format=LF # Regexp for a line that is allowed to be longer than the limit. ignore-long-lines=^\s*(# )??$ # Number of spaces of indent required inside a hanging or continued line. indent-after-paren=4 # String used as indentation unit. This is usually " " (4 spaces) or "\t" (1 # tab). indent-string=' ' # Maximum number of characters on a single line. max-line-length=79 # Maximum number of lines in a module. max-module-lines=1000 # Allow the body of a class to be on the same line as the declaration if body # contains single statement. single-line-class-stmt=no # Allow the body of an if to be on the same line as the test if there is no # else. single-line-if-stmt=no [SIMILARITIES] # Ignore comments when computing similarities. ignore-comments=yes # Ignore docstrings when computing similarities. ignore-docstrings=yes # Ignore imports when computing similarities. ignore-imports=no # Minimum lines number of a similarity. min-similarity-lines=4 [BASIC] # Naming style matching correct argument names. argument-naming-style=snake_case # Regular expression matching correct argument names. Overrides argument- # naming-style. #argument-rgx= # Naming style matching correct attribute names. attr-naming-style=snake_case # Regular expression matching correct attribute names. Overrides attr-naming- # style. #attr-rgx= # Bad variable names which should always be refused, separated by a comma. bad-names=foo, bar, baz, toto, tutu, tata # Naming style matching correct class attribute names. class-attribute-naming-style=any # Regular expression matching correct class attribute names. Overrides class- # attribute-naming-style. #class-attribute-rgx= # Naming style matching correct class names. class-naming-style=PascalCase # Regular expression matching correct class names. Overrides class-naming- # style. #class-rgx= # Naming style matching correct constant names. const-naming-style=any # Regular expression matching correct constant names. Overrides const-naming- # style. #const-rgx= # Minimum line length for functions/classes that require docstrings, shorter # ones are exempt. docstring-min-length=-1 # Naming style matching correct function names. function-naming-style=snake_case # Regular expression matching correct function names. Overrides function- # naming-style. #function-rgx= # Good variable names which should always be accepted, separated by a comma. good-names=_, log, logger # Include a hint for the correct naming format with invalid-name. include-naming-hint=yes # Naming style matching correct inline iteration names. inlinevar-naming-style=any # Regular expression matching correct inline iteration names. Overrides # inlinevar-naming-style. #inlinevar-rgx= # Naming style matching correct method names. method-naming-style=snake_case # Regular expression matching correct method names. Overrides method-naming- # style. #method-rgx= # Naming style matching correct module names. module-naming-style=snake_case # Regular expression matching correct module names. Overrides module-naming- # style. #module-rgx= # Colon-delimited sets of names that determine each other's naming style when # the name regexes allow several styles. name-group= # Regular expression which should only match function or class names that do # not require a docstring. no-docstring-rgx=^_ # List of decorators that produce properties, such as abc.abstractproperty. Add # to this list to register other decorators that produce valid properties. # These decorators are taken in consideration only for invalid-name. property-classes=abc.abstractproperty # Naming style matching correct variable names. variable-naming-style=snake_case # Regular expression matching correct variable names. Overrides variable- # naming-style. variable-rgx=(([a-z_][a-z0-9_]{1,})|(_[a-z0-9_]*)|(__[a-z][a-z0-9_]+__))$ [IMPORTS] # Allow wildcard imports from modules that define __all__. allow-wildcard-with-all=no # Analyse import fallback blocks. This can be used to support both Python 2 and # 3 compatible code, which means that the block might have code that exists # only in one or another interpreter, leading to false positives when analysed. analyse-fallback-blocks=yes # Deprecated modules which should not be used, separated by a comma. deprecated-modules=optparse,tkinter.tix # Create a graph of external dependencies in the given file (report RP0402 must # not be disabled). ext-import-graph= # Create a graph of every (i.e. internal and external) dependencies in the # given file (report RP0402 must not be disabled). import-graph= # Create a graph of internal dependencies in the given file (report RP0402 must # not be disabled). int-import-graph= # Force import order to recognize a module as part of the standard # compatibility libraries. known-standard-library=six # Force import order to recognize a module as part of a third party library. known-third-party=enchant [CLASSES] # List of method names used to declare (i.e. assign) instance attributes. defining-attr-methods=__init__, __new__, setUp # List of member names, which should be excluded from the protected access # warning. exclude-protected=_asdict, _fields, _replace, _source, _make, _Span # List of valid names for the first argument in a class method. valid-classmethod-first-arg=cls # List of valid names for the first argument in a metaclass class method. valid-metaclass-classmethod-first-arg=cls [DESIGN] # Maximum number of arguments for function / method. max-args=5 # Maximum number of attributes for a class (see R0902). max-attributes=7 # Maximum number of boolean expressions in an if statement. max-bool-expr=5 # Maximum number of branch for function / method body. max-branches=12 # Maximum number of locals for function / method body. max-locals=15 # Maximum number of parents for a class (see R0901). max-parents=7 # Maximum number of public methods for a class (see R0904). max-public-methods=20 # Maximum number of return / yield for function / method body. max-returns=6 # Maximum number of statements in function / method body. max-statements=50 # Minimum number of public methods for a class (see R0903). min-public-methods=2 [EXCEPTIONS] # Exceptions that will emit a warning when being caught. overgeneral-exceptions=builtins.Exception python-opentelemetry-1.39.1/opentelemetry-semantic-conventions/LICENSE000066400000000000000000000261351511654350100260750ustar00rootroot00000000000000 Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. Copyright [yyyy] [name of copyright owner] Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. python-opentelemetry-1.39.1/opentelemetry-semantic-conventions/README.rst000066400000000000000000000024451511654350100265550ustar00rootroot00000000000000OpenTelemetry Semantic Conventions ================================== |pypi| .. |pypi| image:: https://badge.fury.io/py/opentelemetry-semantic-conventions.svg :target: https://pypi.org/project/opentelemetry-semantic-conventions/ This library contains generated code for the semantic conventions defined by the OpenTelemetry specification. Installation ------------ :: pip install opentelemetry-semantic-conventions Code Generation --------------- These files were generated automatically from code in semconv_. To regenerate the code, run ``../scripts/semconv/generate.sh``. To build against a new release or specific commit of opentelemetry-specification_, update the ``SPEC_VERSION`` variable in ``../scripts/semconv/generate.sh``. Then run the script and commit the changes. .. _opentelemetry-specification: https://github.com/open-telemetry/opentelemetry-specification .. _semconv: https://github.com/open-telemetry/opentelemetry-python/tree/main/scripts/semconv References ---------- * `OpenTelemetry Project `_ * `OpenTelemetry Semantic Conventions Definitions `_ * `generate.sh script `_ python-opentelemetry-1.39.1/opentelemetry-semantic-conventions/pyproject.toml000066400000000000000000000024401511654350100277750ustar00rootroot00000000000000[build-system] requires = ["hatchling"] build-backend = "hatchling.build" [project] name = "opentelemetry-semantic-conventions" dynamic = ["version"] description = "OpenTelemetry Semantic Conventions" readme = "README.rst" license = "Apache-2.0" requires-python = ">=3.9" authors = [ { name = "OpenTelemetry Authors", email = "cncf-opentelemetry-contributors@lists.cncf.io" }, ] classifiers = [ "Development Status :: 5 - Production/Stable", "Framework :: OpenTelemetry", "Intended Audience :: Developers", "Programming Language :: Python", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.9", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", "Programming Language :: Python :: 3.12", "Programming Language :: Python :: 3.13", ] dependencies = [ "opentelemetry-api == 1.39.1", "typing-extensions >= 4.5.0", ] [project.urls] Homepage = "https://github.com/open-telemetry/opentelemetry-python/tree/main/opentelemetry-semantic-conventions" Repository = "https://github.com/open-telemetry/opentelemetry-python" [tool.hatch.version] path = "src/opentelemetry/semconv/version/__init__.py" [tool.hatch.build.targets.sdist] include = [ "/src", "/tests", ] [tool.hatch.build.targets.wheel] packages = ["src/opentelemetry"] python-opentelemetry-1.39.1/opentelemetry-semantic-conventions/src/000077500000000000000000000000001511654350100256505ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-semantic-conventions/src/opentelemetry/000077500000000000000000000000001511654350100305445ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-semantic-conventions/src/opentelemetry/semconv/000077500000000000000000000000001511654350100322165ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-semantic-conventions/src/opentelemetry/semconv/__init__.py000066400000000000000000000000001511654350100343150ustar00rootroot00000000000000_incubating/000077500000000000000000000000001511654350100344215ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-semantic-conventions/src/opentelemetry/semconvattributes/000077500000000000000000000000001511654350100366075ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubatingapp_attributes.py000066400000000000000000000102471511654350100422130ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/attributes# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Final APP_BUILD_ID: Final = "app.build_id" """ Unique identifier for a particular build or compilation of the application. """ APP_INSTALLATION_ID: Final = "app.installation.id" """ A unique identifier representing the installation of an application on a specific device. Note: Its value SHOULD persist across launches of the same application installation, including through application upgrades. It SHOULD change if the application is uninstalled or if all applications of the vendor are uninstalled. Additionally, users might be able to reset this value (e.g. by clearing application data). If an app is installed multiple times on the same device (e.g. in different accounts on Android), each `app.installation.id` SHOULD have a different value. If multiple OpenTelemetry SDKs are used within the same application, they SHOULD use the same value for `app.installation.id`. Hardware IDs (e.g. serial number, IMEI, MAC address) MUST NOT be used as the `app.installation.id`. For iOS, this value SHOULD be equal to the [vendor identifier](https://developer.apple.com/documentation/uikit/uidevice/identifierforvendor). For Android, examples of `app.installation.id` implementations include: - [Firebase Installation ID](https://firebase.google.com/docs/projects/manage-installations). - A globally unique UUID which is persisted across sessions in your application. - [App set ID](https://developer.android.com/identity/app-set-id). - [`Settings.getString(Settings.Secure.ANDROID_ID)`](https://developer.android.com/reference/android/provider/Settings.Secure#ANDROID_ID). More information about Android identifier best practices can be found in the [Android user data IDs guide](https://developer.android.com/training/articles/user-data-ids). """ APP_JANK_FRAME_COUNT: Final = "app.jank.frame_count" """ A number of frame renders that experienced jank. Note: Depending on platform limitations, the value provided MAY be approximation. """ APP_JANK_PERIOD: Final = "app.jank.period" """ The time period, in seconds, for which this jank is being reported. """ APP_JANK_THRESHOLD: Final = "app.jank.threshold" """ The minimum rendering threshold for this jank, in seconds. """ APP_SCREEN_COORDINATE_X: Final = "app.screen.coordinate.x" """ The x (horizontal) coordinate of a screen coordinate, in screen pixels. """ APP_SCREEN_COORDINATE_Y: Final = "app.screen.coordinate.y" """ The y (vertical) component of a screen coordinate, in screen pixels. """ APP_SCREEN_ID: Final = "app.screen.id" """ An identifier that uniquely differentiates this screen from other screens in the same application. Note: A screen represents only the part of the device display drawn by the app. It typically contains multiple widgets or UI components and is larger in scope than individual widgets. Multiple screens can coexist on the same display simultaneously (e.g., split view on tablets). """ APP_SCREEN_NAME: Final = "app.screen.name" """ The name of an application screen. Note: A screen represents only the part of the device display drawn by the app. It typically contains multiple widgets or UI components and is larger in scope than individual widgets. Multiple screens can coexist on the same display simultaneously (e.g., split view on tablets). """ APP_WIDGET_ID: Final = "app.widget.id" """ An identifier that uniquely differentiates this widget from other widgets in the same application. Note: A widget is an application component, typically an on-screen visual GUI element. """ APP_WIDGET_NAME: Final = "app.widget.name" """ The name of an application widget. Note: A widget is an application component, typically an on-screen visual GUI element. """ artifact_attributes.py000066400000000000000000000060441511654350100432300ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/attributes# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Final ARTIFACT_ATTESTATION_FILENAME: Final = "artifact.attestation.filename" """ The provenance filename of the built attestation which directly relates to the build artifact filename. This filename SHOULD accompany the artifact at publish time. See the [SLSA Relationship](https://slsa.dev/spec/v1.0/distributing-provenance#relationship-between-artifacts-and-attestations) specification for more information. """ ARTIFACT_ATTESTATION_HASH: Final = "artifact.attestation.hash" """ The full [hash value (see glossary)](https://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.186-5.pdf), of the built attestation. Some envelopes in the [software attestation space](https://github.com/in-toto/attestation/tree/main/spec) also refer to this as the **digest**. """ ARTIFACT_ATTESTATION_ID: Final = "artifact.attestation.id" """ The id of the build [software attestation](https://slsa.dev/attestation-model). """ ARTIFACT_FILENAME: Final = "artifact.filename" """ The human readable file name of the artifact, typically generated during build and release processes. Often includes the package name and version in the file name. Note: This file name can also act as the [Package Name](https://slsa.dev/spec/v1.0/terminology#package-model) in cases where the package ecosystem maps accordingly. Additionally, the artifact [can be published](https://slsa.dev/spec/v1.0/terminology#software-supply-chain) for others, but that is not a guarantee. """ ARTIFACT_HASH: Final = "artifact.hash" """ The full [hash value (see glossary)](https://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.186-5.pdf), often found in checksum.txt on a release of the artifact and used to verify package integrity. Note: The specific algorithm used to create the cryptographic hash value is not defined. In situations where an artifact has multiple cryptographic hashes, it is up to the implementer to choose which hash value to set here; this should be the most secure hash algorithm that is suitable for the situation and consistent with the corresponding attestation. The implementer can then provide the other hash values through an additional set of attribute extensions as they deem necessary. """ ARTIFACT_PURL: Final = "artifact.purl" """ The [Package URL](https://github.com/package-url/purl-spec) of the [package artifact](https://slsa.dev/spec/v1.0/terminology#package-model) provides a standard way to identify and locate the packaged artifact. """ ARTIFACT_VERSION: Final = "artifact.version" """ The version of the artifact. """ aws_attributes.py000066400000000000000000000335001511654350100422220ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/attributes# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from enum import Enum from typing import Final AWS_BEDROCK_GUARDRAIL_ID: Final = "aws.bedrock.guardrail.id" """ The unique identifier of the AWS Bedrock Guardrail. A [guardrail](https://docs.aws.amazon.com/bedrock/latest/userguide/guardrails.html) helps safeguard and prevent unwanted behavior from model responses or user messages. """ AWS_BEDROCK_KNOWLEDGE_BASE_ID: Final = "aws.bedrock.knowledge_base.id" """ The unique identifier of the AWS Bedrock Knowledge base. A [knowledge base](https://docs.aws.amazon.com/bedrock/latest/userguide/knowledge-base.html) is a bank of information that can be queried by models to generate more relevant responses and augment prompts. """ AWS_DYNAMODB_ATTRIBUTE_DEFINITIONS: Final = ( "aws.dynamodb.attribute_definitions" ) """ The JSON-serialized value of each item in the `AttributeDefinitions` request field. """ AWS_DYNAMODB_ATTRIBUTES_TO_GET: Final = "aws.dynamodb.attributes_to_get" """ The value of the `AttributesToGet` request parameter. """ AWS_DYNAMODB_CONSISTENT_READ: Final = "aws.dynamodb.consistent_read" """ The value of the `ConsistentRead` request parameter. """ AWS_DYNAMODB_CONSUMED_CAPACITY: Final = "aws.dynamodb.consumed_capacity" """ The JSON-serialized value of each item in the `ConsumedCapacity` response field. """ AWS_DYNAMODB_COUNT: Final = "aws.dynamodb.count" """ The value of the `Count` response parameter. """ AWS_DYNAMODB_EXCLUSIVE_START_TABLE: Final = ( "aws.dynamodb.exclusive_start_table" ) """ The value of the `ExclusiveStartTableName` request parameter. """ AWS_DYNAMODB_GLOBAL_SECONDARY_INDEX_UPDATES: Final = ( "aws.dynamodb.global_secondary_index_updates" ) """ The JSON-serialized value of each item in the `GlobalSecondaryIndexUpdates` request field. """ AWS_DYNAMODB_GLOBAL_SECONDARY_INDEXES: Final = ( "aws.dynamodb.global_secondary_indexes" ) """ The JSON-serialized value of each item of the `GlobalSecondaryIndexes` request field. """ AWS_DYNAMODB_INDEX_NAME: Final = "aws.dynamodb.index_name" """ The value of the `IndexName` request parameter. """ AWS_DYNAMODB_ITEM_COLLECTION_METRICS: Final = ( "aws.dynamodb.item_collection_metrics" ) """ The JSON-serialized value of the `ItemCollectionMetrics` response field. """ AWS_DYNAMODB_LIMIT: Final = "aws.dynamodb.limit" """ The value of the `Limit` request parameter. """ AWS_DYNAMODB_LOCAL_SECONDARY_INDEXES: Final = ( "aws.dynamodb.local_secondary_indexes" ) """ The JSON-serialized value of each item of the `LocalSecondaryIndexes` request field. """ AWS_DYNAMODB_PROJECTION: Final = "aws.dynamodb.projection" """ The value of the `ProjectionExpression` request parameter. """ AWS_DYNAMODB_PROVISIONED_READ_CAPACITY: Final = ( "aws.dynamodb.provisioned_read_capacity" ) """ The value of the `ProvisionedThroughput.ReadCapacityUnits` request parameter. """ AWS_DYNAMODB_PROVISIONED_WRITE_CAPACITY: Final = ( "aws.dynamodb.provisioned_write_capacity" ) """ The value of the `ProvisionedThroughput.WriteCapacityUnits` request parameter. """ AWS_DYNAMODB_SCAN_FORWARD: Final = "aws.dynamodb.scan_forward" """ The value of the `ScanIndexForward` request parameter. """ AWS_DYNAMODB_SCANNED_COUNT: Final = "aws.dynamodb.scanned_count" """ The value of the `ScannedCount` response parameter. """ AWS_DYNAMODB_SEGMENT: Final = "aws.dynamodb.segment" """ The value of the `Segment` request parameter. """ AWS_DYNAMODB_SELECT: Final = "aws.dynamodb.select" """ The value of the `Select` request parameter. """ AWS_DYNAMODB_TABLE_COUNT: Final = "aws.dynamodb.table_count" """ The number of items in the `TableNames` response parameter. """ AWS_DYNAMODB_TABLE_NAMES: Final = "aws.dynamodb.table_names" """ The keys in the `RequestItems` object field. """ AWS_DYNAMODB_TOTAL_SEGMENTS: Final = "aws.dynamodb.total_segments" """ The value of the `TotalSegments` request parameter. """ AWS_ECS_CLUSTER_ARN: Final = "aws.ecs.cluster.arn" """ The ARN of an [ECS cluster](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/clusters.html). """ AWS_ECS_CONTAINER_ARN: Final = "aws.ecs.container.arn" """ The Amazon Resource Name (ARN) of an [ECS container instance](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_instances.html). """ AWS_ECS_LAUNCHTYPE: Final = "aws.ecs.launchtype" """ The [launch type](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/launch_types.html) for an ECS task. """ AWS_ECS_TASK_ARN: Final = "aws.ecs.task.arn" """ The ARN of a running [ECS task](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-account-settings.html#ecs-resource-ids). """ AWS_ECS_TASK_FAMILY: Final = "aws.ecs.task.family" """ The family name of the [ECS task definition](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html) used to create the ECS task. """ AWS_ECS_TASK_ID: Final = "aws.ecs.task.id" """ The ID of a running ECS task. The ID MUST be extracted from `task.arn`. """ AWS_ECS_TASK_REVISION: Final = "aws.ecs.task.revision" """ The revision for the task definition used to create the ECS task. """ AWS_EKS_CLUSTER_ARN: Final = "aws.eks.cluster.arn" """ The ARN of an EKS cluster. """ AWS_EXTENDED_REQUEST_ID: Final = "aws.extended_request_id" """ The AWS extended request ID as returned in the response header `x-amz-id-2`. """ AWS_KINESIS_STREAM_NAME: Final = "aws.kinesis.stream_name" """ The name of the AWS Kinesis [stream](https://docs.aws.amazon.com/streams/latest/dev/introduction.html) the request refers to. Corresponds to the `--stream-name` parameter of the Kinesis [describe-stream](https://docs.aws.amazon.com/cli/latest/reference/kinesis/describe-stream.html) operation. """ AWS_LAMBDA_INVOKED_ARN: Final = "aws.lambda.invoked_arn" """ The full invoked ARN as provided on the `Context` passed to the function (`Lambda-Runtime-Invoked-Function-Arn` header on the `/runtime/invocation/next` applicable). Note: This may be different from `cloud.resource_id` if an alias is involved. """ AWS_LAMBDA_RESOURCE_MAPPING_ID: Final = "aws.lambda.resource_mapping.id" """ The UUID of the [AWS Lambda EvenSource Mapping](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-lambda-eventsourcemapping.html). An event source is mapped to a lambda function. It's contents are read by Lambda and used to trigger a function. This isn't available in the lambda execution context or the lambda runtime environtment. This is going to be populated by the AWS SDK for each language when that UUID is present. Some of these operations are Create/Delete/Get/List/Update EventSourceMapping. """ AWS_LOG_GROUP_ARNS: Final = "aws.log.group.arns" """ The Amazon Resource Name(s) (ARN) of the AWS log group(s). Note: See the [log group ARN format documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format). """ AWS_LOG_GROUP_NAMES: Final = "aws.log.group.names" """ The name(s) of the AWS log group(s) an application is writing to. Note: Multiple log groups must be supported for cases like multi-container applications, where a single application has sidecar containers, and each write to their own log group. """ AWS_LOG_STREAM_ARNS: Final = "aws.log.stream.arns" """ The ARN(s) of the AWS log stream(s). Note: See the [log stream ARN format documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format). One log group can contain several log streams, so these ARNs necessarily identify both a log group and a log stream. """ AWS_LOG_STREAM_NAMES: Final = "aws.log.stream.names" """ The name(s) of the AWS log stream(s) an application is writing to. """ AWS_REQUEST_ID: Final = "aws.request_id" """ The AWS request ID as returned in the response headers `x-amzn-requestid`, `x-amzn-request-id` or `x-amz-request-id`. """ AWS_S3_BUCKET: Final = "aws.s3.bucket" """ The S3 bucket name the request refers to. Corresponds to the `--bucket` parameter of the [S3 API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) operations. Note: The `bucket` attribute is applicable to all S3 operations that reference a bucket, i.e. that require the bucket name as a mandatory parameter. This applies to almost all S3 operations except `list-buckets`. """ AWS_S3_COPY_SOURCE: Final = "aws.s3.copy_source" """ The source object (in the form `bucket`/`key`) for the copy operation. Note: The `copy_source` attribute applies to S3 copy operations and corresponds to the `--copy-source` parameter of the [copy-object operation within the S3 API](https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html). This applies in particular to the following operations: - [copy-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html) - [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html). """ AWS_S3_DELETE: Final = "aws.s3.delete" """ The delete request container that specifies the objects to be deleted. Note: The `delete` attribute is only applicable to the [delete-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-object.html) operation. The `delete` attribute corresponds to the `--delete` parameter of the [delete-objects operation within the S3 API](https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-objects.html). """ AWS_S3_KEY: Final = "aws.s3.key" """ The S3 object key the request refers to. Corresponds to the `--key` parameter of the [S3 API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) operations. Note: The `key` attribute is applicable to all object-related S3 operations, i.e. that require the object key as a mandatory parameter. This applies in particular to the following operations: - [copy-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html) - [delete-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-object.html) - [get-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/get-object.html) - [head-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/head-object.html) - [put-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/put-object.html) - [restore-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/restore-object.html) - [select-object-content](https://docs.aws.amazon.com/cli/latest/reference/s3api/select-object-content.html) - [abort-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/abort-multipart-upload.html) - [complete-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/complete-multipart-upload.html) - [create-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/create-multipart-upload.html) - [list-parts](https://docs.aws.amazon.com/cli/latest/reference/s3api/list-parts.html) - [upload-part](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html) - [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html). """ AWS_S3_PART_NUMBER: Final = "aws.s3.part_number" """ The part number of the part being uploaded in a multipart-upload operation. This is a positive integer between 1 and 10,000. Note: The `part_number` attribute is only applicable to the [upload-part](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html) and [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html) operations. The `part_number` attribute corresponds to the `--part-number` parameter of the [upload-part operation within the S3 API](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html). """ AWS_S3_UPLOAD_ID: Final = "aws.s3.upload_id" """ Upload ID that identifies the multipart upload. Note: The `upload_id` attribute applies to S3 multipart-upload operations and corresponds to the `--upload-id` parameter of the [S3 API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) multipart operations. This applies in particular to the following operations: - [abort-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/abort-multipart-upload.html) - [complete-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/complete-multipart-upload.html) - [list-parts](https://docs.aws.amazon.com/cli/latest/reference/s3api/list-parts.html) - [upload-part](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html) - [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html). """ AWS_SECRETSMANAGER_SECRET_ARN: Final = "aws.secretsmanager.secret.arn" """ The ARN of the Secret stored in the Secrets Mangger. """ AWS_SNS_TOPIC_ARN: Final = "aws.sns.topic.arn" """ The ARN of the AWS SNS Topic. An Amazon SNS [topic](https://docs.aws.amazon.com/sns/latest/dg/sns-create-topic.html) is a logical access point that acts as a communication channel. """ AWS_SQS_QUEUE_URL: Final = "aws.sqs.queue.url" """ The URL of the AWS SQS Queue. It's a unique identifier for a queue in Amazon Simple Queue Service (SQS) and is used to access the queue and perform actions on it. """ AWS_STEP_FUNCTIONS_ACTIVITY_ARN: Final = "aws.step_functions.activity.arn" """ The ARN of the AWS Step Functions Activity. """ AWS_STEP_FUNCTIONS_STATE_MACHINE_ARN: Final = ( "aws.step_functions.state_machine.arn" ) """ The ARN of the AWS Step Functions State Machine. """ class AwsEcsLaunchtypeValues(Enum): EC2 = "ec2" """Amazon EC2.""" FARGATE = "fargate" """Amazon Fargate.""" az_attributes.py000066400000000000000000000015011511654350100420360ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/attributes# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Final AZ_NAMESPACE: Final = "az.namespace" """ Deprecated: Replaced by `azure.resource_provider.namespace`. """ AZ_SERVICE_REQUEST_ID: Final = "az.service_request_id" """ Deprecated: Replaced by `azure.service.request.id`. """ azure_attributes.py000066400000000000000000000054521511654350100425630ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/attributes# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from enum import Enum from typing import Final AZURE_CLIENT_ID: Final = "azure.client.id" """ The unique identifier of the client instance. """ AZURE_COSMOSDB_CONNECTION_MODE: Final = "azure.cosmosdb.connection.mode" """ Cosmos client connection mode. """ AZURE_COSMOSDB_CONSISTENCY_LEVEL: Final = "azure.cosmosdb.consistency.level" """ Account or request [consistency level](https://learn.microsoft.com/azure/cosmos-db/consistency-levels). """ AZURE_COSMOSDB_OPERATION_CONTACTED_REGIONS: Final = ( "azure.cosmosdb.operation.contacted_regions" ) """ List of regions contacted during operation in the order that they were contacted. If there is more than one region listed, it indicates that the operation was performed on multiple regions i.e. cross-regional call. Note: Region name matches the format of `displayName` in [Azure Location API](https://learn.microsoft.com/rest/api/resources/subscriptions/list-locations). """ AZURE_COSMOSDB_OPERATION_REQUEST_CHARGE: Final = ( "azure.cosmosdb.operation.request_charge" ) """ The number of request units consumed by the operation. """ AZURE_COSMOSDB_REQUEST_BODY_SIZE: Final = "azure.cosmosdb.request.body.size" """ Request payload size in bytes. """ AZURE_COSMOSDB_RESPONSE_SUB_STATUS_CODE: Final = ( "azure.cosmosdb.response.sub_status_code" ) """ Cosmos DB sub status code. """ AZURE_RESOURCE_PROVIDER_NAMESPACE: Final = "azure.resource_provider.namespace" """ [Azure Resource Provider Namespace](https://learn.microsoft.com/azure/azure-resource-manager/management/azure-services-resource-providers) as recognized by the client. """ AZURE_SERVICE_REQUEST_ID: Final = "azure.service.request.id" """ The unique identifier of the service request. It's generated by the Azure service and returned with the response. """ class AzureCosmosdbConnectionModeValues(Enum): GATEWAY = "gateway" """Gateway (HTTP) connection.""" DIRECT = "direct" """Direct connection.""" class AzureCosmosdbConsistencyLevelValues(Enum): STRONG = "Strong" """Strong.""" BOUNDED_STALENESS = "BoundedStaleness" """Bounded Staleness.""" SESSION = "Session" """Session.""" EVENTUAL = "Eventual" """Eventual.""" CONSISTENT_PREFIX = "ConsistentPrefix" """Consistent Prefix.""" browser_attributes.py000066400000000000000000000042601511654350100431140ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/attributes# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Final BROWSER_BRANDS: Final = "browser.brands" """ Array of brand name and version separated by a space. Note: This value is intended to be taken from the [UA client hints API](https://wicg.github.io/ua-client-hints/#interface) (`navigator.userAgentData.brands`). """ BROWSER_LANGUAGE: Final = "browser.language" """ Preferred language of the user using the browser. Note: This value is intended to be taken from the Navigator API `navigator.language`. """ BROWSER_MOBILE: Final = "browser.mobile" """ A boolean that is true if the browser is running on a mobile device. Note: This value is intended to be taken from the [UA client hints API](https://wicg.github.io/ua-client-hints/#interface) (`navigator.userAgentData.mobile`). If unavailable, this attribute SHOULD be left unset. """ BROWSER_PLATFORM: Final = "browser.platform" """ The platform on which the browser is running. Note: This value is intended to be taken from the [UA client hints API](https://wicg.github.io/ua-client-hints/#interface) (`navigator.userAgentData.platform`). If unavailable, the legacy `navigator.platform` API SHOULD NOT be used instead and this attribute SHOULD be left unset in order for the values to be consistent. The list of possible values is defined in the [W3C User-Agent Client Hints specification](https://wicg.github.io/ua-client-hints/#sec-ch-ua-platform). Note that some (but not all) of these values can overlap with values in the [`os.type` and `os.name` attributes](./os.md). However, for consistency, the values in the `browser.platform` attribute should capture the exact value that the user agent provides. """ cassandra_attributes.py000066400000000000000000000040521511654350100433670ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/attributes# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from enum import Enum from typing import Final CASSANDRA_CONSISTENCY_LEVEL: Final = "cassandra.consistency.level" """ The consistency level of the query. Based on consistency values from [CQL](https://docs.datastax.com/en/cassandra-oss/3.0/cassandra/dml/dmlConfigConsistency.html). """ CASSANDRA_COORDINATOR_DC: Final = "cassandra.coordinator.dc" """ The data center of the coordinating node for a query. """ CASSANDRA_COORDINATOR_ID: Final = "cassandra.coordinator.id" """ The ID of the coordinating node for a query. """ CASSANDRA_PAGE_SIZE: Final = "cassandra.page.size" """ The fetch size used for paging, i.e. how many rows will be returned at once. """ CASSANDRA_QUERY_IDEMPOTENT: Final = "cassandra.query.idempotent" """ Whether or not the query is idempotent. """ CASSANDRA_SPECULATIVE_EXECUTION_COUNT: Final = ( "cassandra.speculative_execution.count" ) """ The number of times a query was speculatively executed. Not set or `0` if the query was not executed speculatively. """ class CassandraConsistencyLevelValues(Enum): ALL = "all" """All.""" EACH_QUORUM = "each_quorum" """Each Quorum.""" QUORUM = "quorum" """Quorum.""" LOCAL_QUORUM = "local_quorum" """Local Quorum.""" ONE = "one" """One.""" TWO = "two" """Two.""" THREE = "three" """Three.""" LOCAL_ONE = "local_one" """Local One.""" ANY = "any" """Any.""" SERIAL = "serial" """Serial.""" LOCAL_SERIAL = "local_serial" """Local Serial.""" cicd_attributes.py000066400000000000000000000131621511654350100423340ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/attributes# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from enum import Enum from typing import Final CICD_PIPELINE_ACTION_NAME: Final = "cicd.pipeline.action.name" """ The kind of action a pipeline run is performing. """ CICD_PIPELINE_NAME: Final = "cicd.pipeline.name" """ The human readable name of the pipeline within a CI/CD system. """ CICD_PIPELINE_RESULT: Final = "cicd.pipeline.result" """ The result of a pipeline run. """ CICD_PIPELINE_RUN_ID: Final = "cicd.pipeline.run.id" """ The unique identifier of a pipeline run within a CI/CD system. """ CICD_PIPELINE_RUN_STATE: Final = "cicd.pipeline.run.state" """ The pipeline run goes through these states during its lifecycle. """ CICD_PIPELINE_RUN_URL_FULL: Final = "cicd.pipeline.run.url.full" """ The [URL](https://wikipedia.org/wiki/URL) of the pipeline run, providing the complete address in order to locate and identify the pipeline run. """ CICD_PIPELINE_TASK_NAME: Final = "cicd.pipeline.task.name" """ The human readable name of a task within a pipeline. Task here most closely aligns with a [computing process](https://wikipedia.org/wiki/Pipeline_(computing)) in a pipeline. Other terms for tasks include commands, steps, and procedures. """ CICD_PIPELINE_TASK_RUN_ID: Final = "cicd.pipeline.task.run.id" """ The unique identifier of a task run within a pipeline. """ CICD_PIPELINE_TASK_RUN_RESULT: Final = "cicd.pipeline.task.run.result" """ The result of a task run. """ CICD_PIPELINE_TASK_RUN_URL_FULL: Final = "cicd.pipeline.task.run.url.full" """ The [URL](https://wikipedia.org/wiki/URL) of the pipeline task run, providing the complete address in order to locate and identify the pipeline task run. """ CICD_PIPELINE_TASK_TYPE: Final = "cicd.pipeline.task.type" """ The type of the task within a pipeline. """ CICD_SYSTEM_COMPONENT: Final = "cicd.system.component" """ The name of a component of the CICD system. """ CICD_WORKER_ID: Final = "cicd.worker.id" """ The unique identifier of a worker within a CICD system. """ CICD_WORKER_NAME: Final = "cicd.worker.name" """ The name of a worker within a CICD system. """ CICD_WORKER_STATE: Final = "cicd.worker.state" """ The state of a CICD worker / agent. """ CICD_WORKER_URL_FULL: Final = "cicd.worker.url.full" """ The [URL](https://wikipedia.org/wiki/URL) of the worker, providing the complete address in order to locate and identify the worker. """ class CicdPipelineActionNameValues(Enum): BUILD = "BUILD" """The pipeline run is executing a build.""" RUN = "RUN" """The pipeline run is executing.""" SYNC = "SYNC" """The pipeline run is executing a sync.""" class CicdPipelineResultValues(Enum): SUCCESS = "success" """The pipeline run finished successfully.""" FAILURE = "failure" """The pipeline run did not finish successfully, eg. due to a compile error or a failing test. Such failures are usually detected by non-zero exit codes of the tools executed in the pipeline run.""" ERROR = "error" """The pipeline run failed due to an error in the CICD system, eg. due to the worker being killed.""" TIMEOUT = "timeout" """A timeout caused the pipeline run to be interrupted.""" CANCELLATION = "cancellation" """The pipeline run was cancelled, eg. by a user manually cancelling the pipeline run.""" SKIP = "skip" """The pipeline run was skipped, eg. due to a precondition not being met.""" class CicdPipelineRunStateValues(Enum): PENDING = "pending" """The run pending state spans from the event triggering the pipeline run until the execution of the run starts (eg. time spent in a queue, provisioning agents, creating run resources).""" EXECUTING = "executing" """The executing state spans the execution of any run tasks (eg. build, test).""" FINALIZING = "finalizing" """The finalizing state spans from when the run has finished executing (eg. cleanup of run resources).""" class CicdPipelineTaskRunResultValues(Enum): SUCCESS = "success" """The task run finished successfully.""" FAILURE = "failure" """The task run did not finish successfully, eg. due to a compile error or a failing test. Such failures are usually detected by non-zero exit codes of the tools executed in the task run.""" ERROR = "error" """The task run failed due to an error in the CICD system, eg. due to the worker being killed.""" TIMEOUT = "timeout" """A timeout caused the task run to be interrupted.""" CANCELLATION = "cancellation" """The task run was cancelled, eg. by a user manually cancelling the task run.""" SKIP = "skip" """The task run was skipped, eg. due to a precondition not being met.""" class CicdPipelineTaskTypeValues(Enum): BUILD = "build" """build.""" TEST = "test" """test.""" DEPLOY = "deploy" """deploy.""" class CicdWorkerStateValues(Enum): AVAILABLE = "available" """The worker is not performing work for the CICD system. It is available to the CICD system to perform work on (online / idle).""" BUSY = "busy" """The worker is performing work for the CICD system.""" OFFLINE = "offline" """The worker is not available to the CICD system (disconnected / down).""" client_attributes.py000066400000000000000000000016271511654350100427130ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/attributes# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Final CLIENT_ADDRESS: Final = "client.address" """ Deprecated in favor of stable :py:const:`opentelemetry.semconv.attributes.client_attributes.CLIENT_ADDRESS`. """ CLIENT_PORT: Final = "client.port" """ Deprecated in favor of stable :py:const:`opentelemetry.semconv.attributes.client_attributes.CLIENT_PORT`. """ cloud_attributes.py000066400000000000000000000151711511654350100425420ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/attributes# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from enum import Enum from typing import Final CLOUD_ACCOUNT_ID: Final = "cloud.account.id" """ The cloud account ID the resource is assigned to. """ CLOUD_AVAILABILITY_ZONE: Final = "cloud.availability_zone" """ Cloud regions often have multiple, isolated locations known as zones to increase availability. Availability zone represents the zone where the resource is running. Note: Availability zones are called "zones" on Alibaba Cloud and Google Cloud. """ CLOUD_PLATFORM: Final = "cloud.platform" """ The cloud platform in use. Note: The prefix of the service SHOULD match the one specified in `cloud.provider`. """ CLOUD_PROVIDER: Final = "cloud.provider" """ Name of the cloud provider. """ CLOUD_REGION: Final = "cloud.region" """ The geographical region within a cloud provider. When associated with a resource, this attribute specifies the region where the resource operates. When calling services or APIs deployed on a cloud, this attribute identifies the region where the called destination is deployed. Note: Refer to your provider's docs to see the available regions, for example [Alibaba Cloud regions](https://www.alibabacloud.com/help/doc-detail/40654.htm), [AWS regions](https://aws.amazon.com/about-aws/global-infrastructure/regions_az/), [Azure regions](https://azure.microsoft.com/global-infrastructure/geographies/), [Google Cloud regions](https://cloud.google.com/about/locations), or [Tencent Cloud regions](https://www.tencentcloud.com/document/product/213/6091). """ CLOUD_RESOURCE_ID: Final = "cloud.resource_id" """ Cloud provider-specific native identifier of the monitored cloud resource (e.g. an [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) on AWS, a [fully qualified resource ID](https://learn.microsoft.com/rest/api/resources/resources/get-by-id) on Azure, a [full resource name](https://google.aip.dev/122#full-resource-names) on GCP). Note: On some cloud providers, it may not be possible to determine the full ID at startup, so it may be necessary to set `cloud.resource_id` as a span attribute instead. The exact value to use for `cloud.resource_id` depends on the cloud provider. The following well-known definitions MUST be used if you set this attribute and they apply: - **AWS Lambda:** The function [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html). Take care not to use the "invoked ARN" directly but replace any [alias suffix](https://docs.aws.amazon.com/lambda/latest/dg/configuration-aliases.html) with the resolved function version, as the same runtime instance may be invocable with multiple different aliases. - **GCP:** The [URI of the resource](https://cloud.google.com/iam/docs/full-resource-names) - **Azure:** The [Fully Qualified Resource ID](https://learn.microsoft.com/rest/api/resources/resources/get-by-id) of the invoked function, *not* the function app, having the form `/subscriptions//resourceGroups//providers/Microsoft.Web/sites//functions/`. This means that a span attribute MUST be used, as an Azure function app can host multiple functions that would usually share a TracerProvider. """ class CloudPlatformValues(Enum): ALIBABA_CLOUD_ECS = "alibaba_cloud_ecs" """Alibaba Cloud Elastic Compute Service.""" ALIBABA_CLOUD_FC = "alibaba_cloud_fc" """Alibaba Cloud Function Compute.""" ALIBABA_CLOUD_OPENSHIFT = "alibaba_cloud_openshift" """Red Hat OpenShift on Alibaba Cloud.""" AWS_EC2 = "aws_ec2" """AWS Elastic Compute Cloud.""" AWS_ECS = "aws_ecs" """AWS Elastic Container Service.""" AWS_EKS = "aws_eks" """AWS Elastic Kubernetes Service.""" AWS_LAMBDA = "aws_lambda" """AWS Lambda.""" AWS_ELASTIC_BEANSTALK = "aws_elastic_beanstalk" """AWS Elastic Beanstalk.""" AWS_APP_RUNNER = "aws_app_runner" """AWS App Runner.""" AWS_OPENSHIFT = "aws_openshift" """Red Hat OpenShift on AWS (ROSA).""" AZURE_VM = "azure.vm" """Azure Virtual Machines.""" AZURE_CONTAINER_APPS = "azure.container_apps" """Azure Container Apps.""" AZURE_CONTAINER_INSTANCES = "azure.container_instances" """Azure Container Instances.""" AZURE_AKS = "azure.aks" """Azure Kubernetes Service.""" AZURE_FUNCTIONS = "azure.functions" """Azure Functions.""" AZURE_APP_SERVICE = "azure.app_service" """Azure App Service.""" AZURE_OPENSHIFT = "azure.openshift" """Azure Red Hat OpenShift.""" GCP_BARE_METAL_SOLUTION = "gcp_bare_metal_solution" """Google Bare Metal Solution (BMS).""" GCP_COMPUTE_ENGINE = "gcp_compute_engine" """Google Cloud Compute Engine (GCE).""" GCP_CLOUD_RUN = "gcp_cloud_run" """Google Cloud Run.""" GCP_KUBERNETES_ENGINE = "gcp_kubernetes_engine" """Google Cloud Kubernetes Engine (GKE).""" GCP_CLOUD_FUNCTIONS = "gcp_cloud_functions" """Google Cloud Functions (GCF).""" GCP_APP_ENGINE = "gcp_app_engine" """Google Cloud App Engine (GAE).""" GCP_OPENSHIFT = "gcp_openshift" """Red Hat OpenShift on Google Cloud.""" IBM_CLOUD_OPENSHIFT = "ibm_cloud_openshift" """Red Hat OpenShift on IBM Cloud.""" ORACLE_CLOUD_COMPUTE = "oracle_cloud_compute" """Compute on Oracle Cloud Infrastructure (OCI).""" ORACLE_CLOUD_OKE = "oracle_cloud_oke" """Kubernetes Engine (OKE) on Oracle Cloud Infrastructure (OCI).""" TENCENT_CLOUD_CVM = "tencent_cloud_cvm" """Tencent Cloud Cloud Virtual Machine (CVM).""" TENCENT_CLOUD_EKS = "tencent_cloud_eks" """Tencent Cloud Elastic Kubernetes Service (EKS).""" TENCENT_CLOUD_SCF = "tencent_cloud_scf" """Tencent Cloud Serverless Cloud Function (SCF).""" class CloudProviderValues(Enum): ALIBABA_CLOUD = "alibaba_cloud" """Alibaba Cloud.""" AWS = "aws" """Amazon Web Services.""" AZURE = "azure" """Microsoft Azure.""" GCP = "gcp" """Google Cloud Platform.""" HEROKU = "heroku" """Heroku Platform as a Service.""" IBM_CLOUD = "ibm_cloud" """IBM Cloud.""" ORACLE_CLOUD = "oracle_cloud" """Oracle Cloud Infrastructure (OCI).""" TENCENT_CLOUD = "tencent_cloud" """Tencent Cloud.""" cloudevents_attributes.py000066400000000000000000000032611511654350100437640ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/attributes# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Final CLOUDEVENTS_EVENT_ID: Final = "cloudevents.event_id" """ The [event_id](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#id) uniquely identifies the event. """ CLOUDEVENTS_EVENT_SOURCE: Final = "cloudevents.event_source" """ The [source](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#source-1) identifies the context in which an event happened. """ CLOUDEVENTS_EVENT_SPEC_VERSION: Final = "cloudevents.event_spec_version" """ The [version of the CloudEvents specification](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#specversion) which the event uses. """ CLOUDEVENTS_EVENT_SUBJECT: Final = "cloudevents.event_subject" """ The [subject](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#subject) of the event in the context of the event producer (identified by source). """ CLOUDEVENTS_EVENT_TYPE: Final = "cloudevents.event_type" """ The [event_type](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#type) contains a value describing the type of event related to the originating occurrence. """ cloudfoundry_attributes.py000066400000000000000000000111561511654350100441500ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/attributes# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Final CLOUDFOUNDRY_APP_ID: Final = "cloudfoundry.app.id" """ The guid of the application. Note: Application instrumentation should use the value from environment variable `VCAP_APPLICATION.application_id`. This is the same value as reported by `cf app --guid`. """ CLOUDFOUNDRY_APP_INSTANCE_ID: Final = "cloudfoundry.app.instance.id" """ The index of the application instance. 0 when just one instance is active. Note: CloudFoundry defines the `instance_id` in the [Loggregator v2 envelope](https://github.com/cloudfoundry/loggregator-api#v2-envelope). It is used for logs and metrics emitted by CloudFoundry. It is supposed to contain the application instance index for applications deployed on the runtime. Application instrumentation should use the value from environment variable `CF_INSTANCE_INDEX`. """ CLOUDFOUNDRY_APP_NAME: Final = "cloudfoundry.app.name" """ The name of the application. Note: Application instrumentation should use the value from environment variable `VCAP_APPLICATION.application_name`. This is the same value as reported by `cf apps`. """ CLOUDFOUNDRY_ORG_ID: Final = "cloudfoundry.org.id" """ The guid of the CloudFoundry org the application is running in. Note: Application instrumentation should use the value from environment variable `VCAP_APPLICATION.org_id`. This is the same value as reported by `cf org --guid`. """ CLOUDFOUNDRY_ORG_NAME: Final = "cloudfoundry.org.name" """ The name of the CloudFoundry organization the app is running in. Note: Application instrumentation should use the value from environment variable `VCAP_APPLICATION.org_name`. This is the same value as reported by `cf orgs`. """ CLOUDFOUNDRY_PROCESS_ID: Final = "cloudfoundry.process.id" """ The UID identifying the process. Note: Application instrumentation should use the value from environment variable `VCAP_APPLICATION.process_id`. It is supposed to be equal to `VCAP_APPLICATION.app_id` for applications deployed to the runtime. For system components, this could be the actual PID. """ CLOUDFOUNDRY_PROCESS_TYPE: Final = "cloudfoundry.process.type" """ The type of process. Note: CloudFoundry applications can consist of multiple jobs. Usually the main process will be of type `web`. There can be additional background tasks or side-cars with different process types. """ CLOUDFOUNDRY_SPACE_ID: Final = "cloudfoundry.space.id" """ The guid of the CloudFoundry space the application is running in. Note: Application instrumentation should use the value from environment variable `VCAP_APPLICATION.space_id`. This is the same value as reported by `cf space --guid`. """ CLOUDFOUNDRY_SPACE_NAME: Final = "cloudfoundry.space.name" """ The name of the CloudFoundry space the application is running in. Note: Application instrumentation should use the value from environment variable `VCAP_APPLICATION.space_name`. This is the same value as reported by `cf spaces`. """ CLOUDFOUNDRY_SYSTEM_ID: Final = "cloudfoundry.system.id" """ A guid or another name describing the event source. Note: CloudFoundry defines the `source_id` in the [Loggregator v2 envelope](https://github.com/cloudfoundry/loggregator-api#v2-envelope). It is used for logs and metrics emitted by CloudFoundry. It is supposed to contain the component name, e.g. "gorouter", for CloudFoundry components. When system components are instrumented, values from the [Bosh spec](https://bosh.io/docs/jobs/#properties-spec) should be used. The `system.id` should be set to `spec.deployment/spec.name`. """ CLOUDFOUNDRY_SYSTEM_INSTANCE_ID: Final = "cloudfoundry.system.instance.id" """ A guid describing the concrete instance of the event source. Note: CloudFoundry defines the `instance_id` in the [Loggregator v2 envelope](https://github.com/cloudfoundry/loggregator-api#v2-envelope). It is used for logs and metrics emitted by CloudFoundry. It is supposed to contain the vm id for CloudFoundry components. When system components are instrumented, values from the [Bosh spec](https://bosh.io/docs/jobs/#properties-spec) should be used. The `system.instance.id` should be set to `spec.id`. """ code_attributes.py000066400000000000000000000037341511654350100423500ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/attributes# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Final CODE_COLUMN: Final = "code.column" """ Deprecated: Replaced by `code.column.number`. """ CODE_COLUMN_NUMBER: Final = "code.column.number" """ Deprecated in favor of stable :py:const:`opentelemetry.semconv.attributes.code_attributes.CODE_COLUMN_NUMBER`. """ CODE_FILE_PATH: Final = "code.file.path" """ Deprecated in favor of stable :py:const:`opentelemetry.semconv.attributes.code_attributes.CODE_FILE_PATH`. """ CODE_FILEPATH: Final = "code.filepath" """ Deprecated: Replaced by `code.file.path`. """ CODE_FUNCTION: Final = "code.function" """ Deprecated: Value should be included in `code.function.name` which is expected to be a fully-qualified name. """ CODE_FUNCTION_NAME: Final = "code.function.name" """ Deprecated in favor of stable :py:const:`opentelemetry.semconv.attributes.code_attributes.CODE_FUNCTION_NAME`. """ CODE_LINE_NUMBER: Final = "code.line.number" """ Deprecated in favor of stable :py:const:`opentelemetry.semconv.attributes.code_attributes.CODE_LINE_NUMBER`. """ CODE_LINENO: Final = "code.lineno" """ Deprecated: Replaced by `code.line.number`. """ CODE_NAMESPACE: Final = "code.namespace" """ Deprecated: Value should be included in `code.function.name` which is expected to be a fully-qualified name. """ CODE_STACKTRACE: Final = "code.stacktrace" """ Deprecated in favor of stable :py:const:`opentelemetry.semconv.attributes.code_attributes.CODE_STACKTRACE`. """ container_attributes.py000066400000000000000000000126151511654350100434160ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/attributes# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from enum import Enum from typing import Final from typing_extensions import deprecated CONTAINER_COMMAND: Final = "container.command" """ The command used to run the container (i.e. the command name). Note: If using embedded credentials or sensitive data, it is recommended to remove them to prevent potential leakage. """ CONTAINER_COMMAND_ARGS: Final = "container.command_args" """ All the command arguments (including the command/executable itself) run by the container. """ CONTAINER_COMMAND_LINE: Final = "container.command_line" """ The full command run by the container as a single string representing the full command. """ CONTAINER_CPU_STATE: Final = "container.cpu.state" """ Deprecated: Replaced by `cpu.mode`. """ CONTAINER_CSI_PLUGIN_NAME: Final = "container.csi.plugin.name" """ The name of the CSI ([Container Storage Interface](https://github.com/container-storage-interface/spec)) plugin used by the volume. Note: This can sometimes be referred to as a "driver" in CSI implementations. This should represent the `name` field of the GetPluginInfo RPC. """ CONTAINER_CSI_VOLUME_ID: Final = "container.csi.volume.id" """ The unique volume ID returned by the CSI ([Container Storage Interface](https://github.com/container-storage-interface/spec)) plugin. Note: This can sometimes be referred to as a "volume handle" in CSI implementations. This should represent the `Volume.volume_id` field in CSI spec. """ CONTAINER_ID: Final = "container.id" """ Container ID. Usually a UUID, as for example used to [identify Docker containers](https://docs.docker.com/engine/containers/run/#container-identification). The UUID might be abbreviated. """ CONTAINER_IMAGE_ID: Final = "container.image.id" """ Runtime specific image identifier. Usually a hash algorithm followed by a UUID. Note: Docker defines a sha256 of the image id; `container.image.id` corresponds to the `Image` field from the Docker container inspect [API](https://docs.docker.com/reference/api/engine/version/v1.43/#tag/Container/operation/ContainerInspect) endpoint. K8s defines a link to the container registry repository with digest `"imageID": "registry.azurecr.io /namespace/service/dockerfile@sha256:bdeabd40c3a8a492eaf9e8e44d0ebbb84bac7ee25ac0cf8a7159d25f62555625"`. The ID is assigned by the container runtime and can vary in different environments. Consider using `oci.manifest.digest` if it is important to identify the same image in different environments/runtimes. """ CONTAINER_IMAGE_NAME: Final = "container.image.name" """ Name of the image the container was built on. """ CONTAINER_IMAGE_REPO_DIGESTS: Final = "container.image.repo_digests" """ Repo digests of the container image as provided by the container runtime. Note: [Docker](https://docs.docker.com/reference/api/engine/version/v1.43/#tag/Image/operation/ImageInspect) and [CRI](https://github.com/kubernetes/cri-api/blob/c75ef5b473bbe2d0a4fc92f82235efd665ea8e9f/pkg/apis/runtime/v1/api.proto#L1237-L1238) report those under the `RepoDigests` field. """ CONTAINER_IMAGE_TAGS: Final = "container.image.tags" """ Container image tags. An example can be found in [Docker Image Inspect](https://docs.docker.com/reference/api/engine/version/v1.43/#tag/Image/operation/ImageInspect). Should be only the `` section of the full name for example from `registry.example.com/my-org/my-image:`. """ CONTAINER_LABEL_TEMPLATE: Final = "container.label" """ Container labels, `` being the label name, the value being the label value. Note: For example, a docker container label `app` with value `nginx` SHOULD be recorded as the `container.label.app` attribute with value `"nginx"`. """ CONTAINER_LABELS_TEMPLATE: Final = "container.labels" """ Deprecated: Replaced by `container.label`. """ CONTAINER_NAME: Final = "container.name" """ Container name used by container runtime. """ CONTAINER_RUNTIME: Final = "container.runtime" """ Deprecated: Replaced by `container.runtime.name`. """ CONTAINER_RUNTIME_DESCRIPTION: Final = "container.runtime.description" """ A description about the runtime which could include, for example details about the CRI/API version being used or other customisations. """ CONTAINER_RUNTIME_NAME: Final = "container.runtime.name" """ The container runtime managing this container. """ CONTAINER_RUNTIME_VERSION: Final = "container.runtime.version" """ The version of the runtime of this process, as returned by the runtime without modification. """ @deprecated( "The attribute container.cpu.state is deprecated - Replaced by `cpu.mode`" ) class ContainerCpuStateValues(Enum): USER = "user" """When tasks of the cgroup are in user mode (Linux). When all container processes are in user mode (Windows).""" SYSTEM = "system" """When CPU is used by the system (host OS).""" KERNEL = "kernel" """When tasks of the cgroup are in kernel mode (Linux). When all container processes are in kernel mode (Windows).""" cpu_attributes.py000066400000000000000000000021401511654350100422130ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/attributes# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from enum import Enum from typing import Final CPU_LOGICAL_NUMBER: Final = "cpu.logical_number" """ The logical CPU number [0..n-1]. """ CPU_MODE: Final = "cpu.mode" """ The mode of the CPU. """ class CpuModeValues(Enum): USER = "user" """User.""" SYSTEM = "system" """System.""" NICE = "nice" """Nice.""" IDLE = "idle" """Idle.""" IOWAIT = "iowait" """IO Wait.""" INTERRUPT = "interrupt" """Interrupt.""" STEAL = "steal" """Steal.""" KERNEL = "kernel" """Kernel.""" cpython_attributes.py000066400000000000000000000016361511654350100431210ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/attributes# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from enum import Enum from typing import Final CPYTHON_GC_GENERATION: Final = "cpython.gc.generation" """ Value of the garbage collector collection generation. """ class CPythonGCGenerationValues(Enum): GENERATION_0 = 0 """Generation 0.""" GENERATION_1 = 1 """Generation 1.""" GENERATION_2 = 2 """Generation 2.""" db_attributes.py000066400000000000000000000430541511654350100420220ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/attributes# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from enum import Enum from typing import Final from typing_extensions import deprecated DB_CASSANDRA_CONSISTENCY_LEVEL: Final = "db.cassandra.consistency_level" """ Deprecated: Replaced by `cassandra.consistency.level`. """ DB_CASSANDRA_COORDINATOR_DC: Final = "db.cassandra.coordinator.dc" """ Deprecated: Replaced by `cassandra.coordinator.dc`. """ DB_CASSANDRA_COORDINATOR_ID: Final = "db.cassandra.coordinator.id" """ Deprecated: Replaced by `cassandra.coordinator.id`. """ DB_CASSANDRA_IDEMPOTENCE: Final = "db.cassandra.idempotence" """ Deprecated: Replaced by `cassandra.query.idempotent`. """ DB_CASSANDRA_PAGE_SIZE: Final = "db.cassandra.page_size" """ Deprecated: Replaced by `cassandra.page.size`. """ DB_CASSANDRA_SPECULATIVE_EXECUTION_COUNT: Final = ( "db.cassandra.speculative_execution_count" ) """ Deprecated: Replaced by `cassandra.speculative_execution.count`. """ DB_CASSANDRA_TABLE: Final = "db.cassandra.table" """ Deprecated: Replaced by `db.collection.name`. """ DB_CLIENT_CONNECTION_POOL_NAME: Final = "db.client.connection.pool.name" """ The name of the connection pool; unique within the instrumented application. In case the connection pool implementation doesn't provide a name, instrumentation SHOULD use a combination of parameters that would make the name unique, for example, combining attributes `server.address`, `server.port`, and `db.namespace`, formatted as `server.address:server.port/db.namespace`. Instrumentations that generate connection pool name following different patterns SHOULD document it. """ DB_CLIENT_CONNECTION_STATE: Final = "db.client.connection.state" """ The state of a connection in the pool. """ DB_CLIENT_CONNECTIONS_POOL_NAME: Final = "db.client.connections.pool.name" """ Deprecated: Replaced by `db.client.connection.pool.name`. """ DB_CLIENT_CONNECTIONS_STATE: Final = "db.client.connections.state" """ Deprecated: Replaced by `db.client.connection.state`. """ DB_COLLECTION_NAME: Final = "db.collection.name" """ Deprecated in favor of stable :py:const:`opentelemetry.semconv.attributes.db_attributes.DB_COLLECTION_NAME`. """ DB_CONNECTION_STRING: Final = "db.connection_string" """ Deprecated: Replaced by `server.address` and `server.port`. """ DB_COSMOSDB_CLIENT_ID: Final = "db.cosmosdb.client_id" """ Deprecated: Replaced by `azure.client.id`. """ DB_COSMOSDB_CONNECTION_MODE: Final = "db.cosmosdb.connection_mode" """ Deprecated: Replaced by `azure.cosmosdb.connection.mode`. """ DB_COSMOSDB_CONSISTENCY_LEVEL: Final = "db.cosmosdb.consistency_level" """ Deprecated: Replaced by `azure.cosmosdb.consistency.level`. """ DB_COSMOSDB_CONTAINER: Final = "db.cosmosdb.container" """ Deprecated: Replaced by `db.collection.name`. """ DB_COSMOSDB_OPERATION_TYPE: Final = "db.cosmosdb.operation_type" """ Deprecated: Removed, no replacement at this time. """ DB_COSMOSDB_REGIONS_CONTACTED: Final = "db.cosmosdb.regions_contacted" """ Deprecated: Replaced by `azure.cosmosdb.operation.contacted_regions`. """ DB_COSMOSDB_REQUEST_CHARGE: Final = "db.cosmosdb.request_charge" """ Deprecated: Replaced by `azure.cosmosdb.operation.request_charge`. """ DB_COSMOSDB_REQUEST_CONTENT_LENGTH: Final = ( "db.cosmosdb.request_content_length" ) """ Deprecated: Replaced by `azure.cosmosdb.request.body.size`. """ DB_COSMOSDB_STATUS_CODE: Final = "db.cosmosdb.status_code" """ Deprecated: Use `db.response.status_code` instead. """ DB_COSMOSDB_SUB_STATUS_CODE: Final = "db.cosmosdb.sub_status_code" """ Deprecated: Replaced by `azure.cosmosdb.response.sub_status_code`. """ DB_ELASTICSEARCH_CLUSTER_NAME: Final = "db.elasticsearch.cluster.name" """ Deprecated: Replaced by `db.namespace`. """ DB_ELASTICSEARCH_NODE_NAME: Final = "db.elasticsearch.node.name" """ Deprecated: Replaced by `elasticsearch.node.name`. """ DB_ELASTICSEARCH_PATH_PARTS_TEMPLATE: Final = "db.elasticsearch.path_parts" """ Deprecated: Replaced by `db.operation.parameter`. """ DB_INSTANCE_ID: Final = "db.instance.id" """ Deprecated: Removed, no general replacement at this time. For Elasticsearch, use `db.elasticsearch.node.name` instead. """ DB_JDBC_DRIVER_CLASSNAME: Final = "db.jdbc.driver_classname" """ Deprecated: Removed, no replacement at this time. """ DB_MONGODB_COLLECTION: Final = "db.mongodb.collection" """ Deprecated: Replaced by `db.collection.name`. """ DB_MSSQL_INSTANCE_NAME: Final = "db.mssql.instance_name" """ Deprecated: Removed, no replacement at this time. """ DB_NAME: Final = "db.name" """ Deprecated: Replaced by `db.namespace`. """ DB_NAMESPACE: Final = "db.namespace" """ Deprecated in favor of stable :py:const:`opentelemetry.semconv.attributes.db_attributes.DB_NAMESPACE`. """ DB_OPERATION: Final = "db.operation" """ Deprecated: Replaced by `db.operation.name`. """ DB_OPERATION_BATCH_SIZE: Final = "db.operation.batch.size" """ Deprecated in favor of stable :py:const:`opentelemetry.semconv.attributes.db_attributes.DB_OPERATION_BATCH_SIZE`. """ DB_OPERATION_NAME: Final = "db.operation.name" """ Deprecated in favor of stable :py:const:`opentelemetry.semconv.attributes.db_attributes.DB_OPERATION_NAME`. """ DB_OPERATION_PARAMETER_TEMPLATE: Final = "db.operation.parameter" """ A database operation parameter, with `` being the parameter name, and the attribute value being a string representation of the parameter value. Note: For example, a client-side maximum number of rows to read from the database MAY be recorded as the `db.operation.parameter.max_rows` attribute. `db.query.text` parameters SHOULD be captured using `db.query.parameter.` instead of `db.operation.parameter.`. """ DB_QUERY_PARAMETER_TEMPLATE: Final = "db.query.parameter" """ A database query parameter, with `` being the parameter name, and the attribute value being a string representation of the parameter value. Note: If a query parameter has no name and instead is referenced only by index, then `` SHOULD be the 0-based index. `db.query.parameter.` SHOULD match up with the parameterized placeholders present in `db.query.text`. It is RECOMMENDED to capture the value as provided by the application without attempting to do any case normalization. `db.query.parameter.` SHOULD NOT be captured on batch operations. Examples: - For a query `SELECT * FROM users where username = %s` with the parameter `"jdoe"`, the attribute `db.query.parameter.0` SHOULD be set to `"jdoe"`. - For a query `"SELECT * FROM users WHERE username = %(userName)s;` with parameter `userName = "jdoe"`, the attribute `db.query.parameter.userName` SHOULD be set to `"jdoe"`. """ DB_QUERY_SUMMARY: Final = "db.query.summary" """ Deprecated in favor of stable :py:const:`opentelemetry.semconv.attributes.db_attributes.DB_QUERY_SUMMARY`. """ DB_QUERY_TEXT: Final = "db.query.text" """ Deprecated in favor of stable :py:const:`opentelemetry.semconv.attributes.db_attributes.DB_QUERY_TEXT`. """ DB_REDIS_DATABASE_INDEX: Final = "db.redis.database_index" """ Deprecated: Uncategorized. """ DB_RESPONSE_RETURNED_ROWS: Final = "db.response.returned_rows" """ Number of rows returned by the operation. """ DB_RESPONSE_STATUS_CODE: Final = "db.response.status_code" """ Deprecated in favor of stable :py:const:`opentelemetry.semconv.attributes.db_attributes.DB_RESPONSE_STATUS_CODE`. """ DB_SQL_TABLE: Final = "db.sql.table" """ Deprecated: Replaced by `db.collection.name`, but only if not extracting the value from `db.query.text`. """ DB_STATEMENT: Final = "db.statement" """ Deprecated: Replaced by `db.query.text`. """ DB_STORED_PROCEDURE_NAME: Final = "db.stored_procedure.name" """ Deprecated in favor of stable :py:const:`opentelemetry.semconv.attributes.db_attributes.DB_STORED_PROCEDURE_NAME`. """ DB_SYSTEM: Final = "db.system" """ Deprecated: Replaced by `db.system.name`. """ DB_SYSTEM_NAME: Final = "db.system.name" """ Deprecated in favor of stable :py:const:`opentelemetry.semconv.attributes.db_attributes.DB_SYSTEM_NAME`. """ DB_USER: Final = "db.user" """ Deprecated: Removed, no replacement at this time. """ @deprecated( "The attribute db.cassandra.consistency_level is deprecated - Replaced by `cassandra.consistency.level`" ) class DbCassandraConsistencyLevelValues(Enum): ALL = "all" """all.""" EACH_QUORUM = "each_quorum" """each_quorum.""" QUORUM = "quorum" """quorum.""" LOCAL_QUORUM = "local_quorum" """local_quorum.""" ONE = "one" """one.""" TWO = "two" """two.""" THREE = "three" """three.""" LOCAL_ONE = "local_one" """local_one.""" ANY = "any" """any.""" SERIAL = "serial" """serial.""" LOCAL_SERIAL = "local_serial" """local_serial.""" class DbClientConnectionStateValues(Enum): IDLE = "idle" """idle.""" USED = "used" """used.""" @deprecated( "The attribute db.client.connections.state is deprecated - Replaced by `db.client.connection.state`" ) class DbClientConnectionsStateValues(Enum): IDLE = "idle" """idle.""" USED = "used" """used.""" @deprecated( "The attribute db.cosmosdb.connection_mode is deprecated - Replaced by `azure.cosmosdb.connection.mode`" ) class DbCosmosdbConnectionModeValues(Enum): GATEWAY = "gateway" """Gateway (HTTP) connection.""" DIRECT = "direct" """Direct connection.""" @deprecated( "The attribute db.cosmosdb.consistency_level is deprecated - Replaced by `azure.cosmosdb.consistency.level`" ) class DbCosmosdbConsistencyLevelValues(Enum): STRONG = "Strong" """strong.""" BOUNDED_STALENESS = "BoundedStaleness" """bounded_staleness.""" SESSION = "Session" """session.""" EVENTUAL = "Eventual" """eventual.""" CONSISTENT_PREFIX = "ConsistentPrefix" """consistent_prefix.""" @deprecated( "The attribute db.cosmosdb.operation_type is deprecated - Removed, no replacement at this time" ) class DbCosmosdbOperationTypeValues(Enum): BATCH = "batch" """batch.""" CREATE = "create" """create.""" DELETE = "delete" """delete.""" EXECUTE = "execute" """execute.""" EXECUTE_JAVASCRIPT = "execute_javascript" """execute_javascript.""" INVALID = "invalid" """invalid.""" HEAD = "head" """head.""" HEAD_FEED = "head_feed" """head_feed.""" PATCH = "patch" """patch.""" QUERY = "query" """query.""" QUERY_PLAN = "query_plan" """query_plan.""" READ = "read" """read.""" READ_FEED = "read_feed" """read_feed.""" REPLACE = "replace" """replace.""" UPSERT = "upsert" """upsert.""" @deprecated( "The attribute db.system is deprecated - Replaced by `db.system.name`" ) class DbSystemValues(Enum): OTHER_SQL = "other_sql" """Some other SQL database. Fallback only. See notes.""" ADABAS = "adabas" """Adabas (Adaptable Database System).""" CACHE = "cache" """Deprecated: Replaced by `intersystems_cache`.""" INTERSYSTEMS_CACHE = "intersystems_cache" """InterSystems Caché.""" CASSANDRA = "cassandra" """Apache Cassandra.""" CLICKHOUSE = "clickhouse" """ClickHouse.""" CLOUDSCAPE = "cloudscape" """Deprecated: Replaced by `other_sql`.""" COCKROACHDB = "cockroachdb" """CockroachDB.""" COLDFUSION = "coldfusion" """Deprecated: Obsoleted.""" COSMOSDB = "cosmosdb" """Microsoft Azure Cosmos DB.""" COUCHBASE = "couchbase" """Couchbase.""" COUCHDB = "couchdb" """CouchDB.""" DB2 = "db2" """IBM Db2.""" DERBY = "derby" """Apache Derby.""" DYNAMODB = "dynamodb" """Amazon DynamoDB.""" EDB = "edb" """EnterpriseDB.""" ELASTICSEARCH = "elasticsearch" """Elasticsearch.""" FILEMAKER = "filemaker" """FileMaker.""" FIREBIRD = "firebird" """Firebird.""" FIRSTSQL = "firstsql" """Deprecated: Replaced by `other_sql`.""" GEODE = "geode" """Apache Geode.""" H2 = "h2" """H2.""" HANADB = "hanadb" """SAP HANA.""" HBASE = "hbase" """Apache HBase.""" HIVE = "hive" """Apache Hive.""" HSQLDB = "hsqldb" """HyperSQL DataBase.""" INFLUXDB = "influxdb" """InfluxDB.""" INFORMIX = "informix" """Informix.""" INGRES = "ingres" """Ingres.""" INSTANTDB = "instantdb" """InstantDB.""" INTERBASE = "interbase" """InterBase.""" MARIADB = "mariadb" """MariaDB.""" MAXDB = "maxdb" """SAP MaxDB.""" MEMCACHED = "memcached" """Memcached.""" MONGODB = "mongodb" """MongoDB.""" MSSQL = "mssql" """Microsoft SQL Server.""" MSSQLCOMPACT = "mssqlcompact" """Deprecated: Replaced by `other_sql`.""" MYSQL = "mysql" """MySQL.""" NEO4J = "neo4j" """Neo4j.""" NETEZZA = "netezza" """Netezza.""" OPENSEARCH = "opensearch" """OpenSearch.""" ORACLE = "oracle" """Oracle Database.""" PERVASIVE = "pervasive" """Pervasive PSQL.""" POINTBASE = "pointbase" """PointBase.""" POSTGRESQL = "postgresql" """PostgreSQL.""" PROGRESS = "progress" """Progress Database.""" REDIS = "redis" """Redis.""" REDSHIFT = "redshift" """Amazon Redshift.""" SPANNER = "spanner" """Cloud Spanner.""" SQLITE = "sqlite" """SQLite.""" SYBASE = "sybase" """Sybase.""" TERADATA = "teradata" """Teradata.""" TRINO = "trino" """Trino.""" VERTICA = "vertica" """Vertica.""" @deprecated( "Deprecated in favor of stable :py:const:`opentelemetry.semconv.attributes.db_attributes.DbSystemNameValues`." ) class DbSystemNameValues(Enum): OTHER_SQL = "other_sql" """Some other SQL database. Fallback only.""" SOFTWAREAG_ADABAS = "softwareag.adabas" """[Adabas (Adaptable Database System)](https://documentation.softwareag.com/?pf=adabas).""" ACTIAN_INGRES = "actian.ingres" """[Actian Ingres](https://www.actian.com/databases/ingres/).""" AWS_DYNAMODB = "aws.dynamodb" """[Amazon DynamoDB](https://aws.amazon.com/pm/dynamodb/).""" AWS_REDSHIFT = "aws.redshift" """[Amazon Redshift](https://aws.amazon.com/redshift/).""" AZURE_COSMOSDB = "azure.cosmosdb" """[Azure Cosmos DB](https://learn.microsoft.com/azure/cosmos-db).""" INTERSYSTEMS_CACHE = "intersystems.cache" """[InterSystems Caché](https://www.intersystems.com/products/cache/).""" CASSANDRA = "cassandra" """[Apache Cassandra](https://cassandra.apache.org/).""" CLICKHOUSE = "clickhouse" """[ClickHouse](https://clickhouse.com/).""" COCKROACHDB = "cockroachdb" """[CockroachDB](https://www.cockroachlabs.com/).""" COUCHBASE = "couchbase" """[Couchbase](https://www.couchbase.com/).""" COUCHDB = "couchdb" """[Apache CouchDB](https://couchdb.apache.org/).""" DERBY = "derby" """[Apache Derby](https://db.apache.org/derby/).""" ELASTICSEARCH = "elasticsearch" """[Elasticsearch](https://www.elastic.co/elasticsearch).""" FIREBIRDSQL = "firebirdsql" """[Firebird](https://www.firebirdsql.org/).""" GCP_SPANNER = "gcp.spanner" """[Google Cloud Spanner](https://cloud.google.com/spanner).""" GEODE = "geode" """[Apache Geode](https://geode.apache.org/).""" H2DATABASE = "h2database" """[H2 Database](https://h2database.com/).""" HBASE = "hbase" """[Apache HBase](https://hbase.apache.org/).""" HIVE = "hive" """[Apache Hive](https://hive.apache.org/).""" HSQLDB = "hsqldb" """[HyperSQL Database](https://hsqldb.org/).""" IBM_DB2 = "ibm.db2" """[IBM Db2](https://www.ibm.com/db2).""" IBM_INFORMIX = "ibm.informix" """[IBM Informix](https://www.ibm.com/products/informix).""" IBM_NETEZZA = "ibm.netezza" """[IBM Netezza](https://www.ibm.com/products/netezza).""" INFLUXDB = "influxdb" """[InfluxDB](https://www.influxdata.com/).""" INSTANTDB = "instantdb" """[Instant](https://www.instantdb.com/).""" MARIADB = "mariadb" """Deprecated in favor of stable :py:const:`opentelemetry.semconv.attributes.db_attributes.DbSystemNameValues.MARIADB`.""" MEMCACHED = "memcached" """[Memcached](https://memcached.org/).""" MONGODB = "mongodb" """[MongoDB](https://www.mongodb.com/).""" MICROSOFT_SQL_SERVER = "microsoft.sql_server" """Deprecated in favor of stable :py:const:`opentelemetry.semconv.attributes.db_attributes.DbSystemNameValues.MICROSOFT_SQL_SERVER`.""" MYSQL = "mysql" """Deprecated in favor of stable :py:const:`opentelemetry.semconv.attributes.db_attributes.DbSystemNameValues.MYSQL`.""" NEO4J = "neo4j" """[Neo4j](https://neo4j.com/).""" OPENSEARCH = "opensearch" """[OpenSearch](https://opensearch.org/).""" ORACLE_DB = "oracle.db" """[Oracle Database](https://www.oracle.com/database/).""" POSTGRESQL = "postgresql" """Deprecated in favor of stable :py:const:`opentelemetry.semconv.attributes.db_attributes.DbSystemNameValues.POSTGRESQL`.""" REDIS = "redis" """[Redis](https://redis.io/).""" SAP_HANA = "sap.hana" """[SAP HANA](https://www.sap.com/products/technology-platform/hana/what-is-sap-hana.html).""" SAP_MAXDB = "sap.maxdb" """[SAP MaxDB](https://maxdb.sap.com/).""" SQLITE = "sqlite" """[SQLite](https://www.sqlite.org/).""" TERADATA = "teradata" """[Teradata](https://www.teradata.com/).""" TRINO = "trino" """[Trino](https://trino.io/).""" deployment_attributes.py000066400000000000000000000033311511654350100436070ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/attributes# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from enum import Enum from typing import Final DEPLOYMENT_ENVIRONMENT: Final = "deployment.environment" """ Deprecated: Replaced by `deployment.environment.name`. """ DEPLOYMENT_ENVIRONMENT_NAME: Final = "deployment.environment.name" """ Name of the [deployment environment](https://wikipedia.org/wiki/Deployment_environment) (aka deployment tier). Note: `deployment.environment.name` does not affect the uniqueness constraints defined through the `service.namespace`, `service.name` and `service.instance.id` resource attributes. This implies that resources carrying the following attribute combinations MUST be considered to be identifying the same service: - `service.name=frontend`, `deployment.environment.name=production` - `service.name=frontend`, `deployment.environment.name=staging`. """ DEPLOYMENT_ID: Final = "deployment.id" """ The id of the deployment. """ DEPLOYMENT_NAME: Final = "deployment.name" """ The name of the deployment. """ DEPLOYMENT_STATUS: Final = "deployment.status" """ The status of the deployment. """ class DeploymentStatusValues(Enum): FAILED = "failed" """failed.""" SUCCEEDED = "succeeded" """succeeded.""" destination_attributes.py000066400000000000000000000021061511654350100437470ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/attributes# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Final DESTINATION_ADDRESS: Final = "destination.address" """ Destination address - domain name if available without reverse DNS lookup; otherwise, IP address or Unix domain socket name. Note: When observed from the source side, and when communicating through an intermediary, `destination.address` SHOULD represent the destination address behind any intermediaries, for example proxies, if it's available. """ DESTINATION_PORT: Final = "destination.port" """ Destination port number. """ device_attributes.py000066400000000000000000000052201511654350100426650ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/attributes# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Final DEVICE_ID: Final = "device.id" """ A unique identifier representing the device. Note: Its value SHOULD be identical for all apps on a device and it SHOULD NOT change if an app is uninstalled and re-installed. However, it might be resettable by the user for all apps on a device. Hardware IDs (e.g. vendor-specific serial number, IMEI or MAC address) MAY be used as values. More information about Android identifier best practices can be found in the [Android user data IDs guide](https://developer.android.com/training/articles/user-data-ids). > [!WARNING] > > This attribute may contain sensitive (PII) information. Caution should be taken when storing personal data or anything which can identify a user. GDPR and data protection laws may apply, > ensure you do your own due diligence. > > Due to these reasons, this identifier is not recommended for consumer applications and will likely result in rejection from both Google Play and App Store. > However, it may be appropriate for specific enterprise scenarios, such as kiosk devices or enterprise-managed devices, with appropriate compliance clearance. > Any instrumentation providing this identifier MUST implement it as an opt-in feature. > > See [`app.installation.id`](/docs/registry/attributes/app.md#app-installation-id) for a more privacy-preserving alternative. """ DEVICE_MANUFACTURER: Final = "device.manufacturer" """ The name of the device manufacturer. Note: The Android OS provides this field via [Build](https://developer.android.com/reference/android/os/Build#MANUFACTURER). iOS apps SHOULD hardcode the value `Apple`. """ DEVICE_MODEL_IDENTIFIER: Final = "device.model.identifier" """ The model identifier for the device. Note: It's recommended this value represents a machine-readable version of the model identifier rather than the market or consumer-friendly name of the device. """ DEVICE_MODEL_NAME: Final = "device.model.name" """ The marketing name for the device model. Note: It's recommended this value represents a human-readable version of the device model rather than a machine-readable alternative. """ disk_attributes.py000066400000000000000000000014751511654350100423700ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/attributes# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from enum import Enum from typing import Final DISK_IO_DIRECTION: Final = "disk.io.direction" """ The disk IO operation direction. """ class DiskIoDirectionValues(Enum): READ = "read" """read.""" WRITE = "write" """write.""" dns_attributes.py000066400000000000000000000016241511654350100422160ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/attributes# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Final DNS_ANSWERS: Final = "dns.answers" """ The list of IPv4 or IPv6 addresses resolved during DNS lookup. """ DNS_QUESTION_NAME: Final = "dns.question.name" """ The name being queried. Note: The name represents the queried domain name as it appears in the DNS query without any additional normalization. """ elasticsearch_attributes.py000066400000000000000000000014031511654350100442370ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/attributes# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Final ELASTICSEARCH_NODE_NAME: Final = "elasticsearch.node.name" """ Represents the human-readable identifier of the node/instance to which a request was routed. """ enduser_attributes.py000066400000000000000000000025621511654350100431010ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/attributes# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Final ENDUSER_ID: Final = "enduser.id" """ Unique identifier of an end user in the system. It maybe a username, email address, or other identifier. Note: Unique identifier of an end user in the system. > [!Warning] > This field contains sensitive (PII) information. """ ENDUSER_PSEUDO_ID: Final = "enduser.pseudo.id" """ Pseudonymous identifier of an end user. This identifier should be a random value that is not directly linked or associated with the end user's actual identity. Note: Pseudonymous identifier of an end user. > [!Warning] > This field contains sensitive (linkable PII) information. """ ENDUSER_ROLE: Final = "enduser.role" """ Deprecated: Use `user.roles` instead. """ ENDUSER_SCOPE: Final = "enduser.scope" """ Deprecated: Removed, no replacement at this time. """ error_attributes.py000066400000000000000000000031271511654350100425630ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/attributes# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from enum import Enum from typing import Final from typing_extensions import deprecated ERROR_MESSAGE: Final = "error.message" """ A message providing more detail about an error in human-readable form. Note: `error.message` should provide additional context and detail about an error. It is NOT RECOMMENDED to duplicate the value of `error.type` in `error.message`. It is also NOT RECOMMENDED to duplicate the value of `exception.message` in `error.message`. `error.message` is NOT RECOMMENDED for metrics or spans due to its unbounded cardinality and overlap with span status. """ ERROR_TYPE: Final = "error.type" """ Deprecated in favor of stable :py:const:`opentelemetry.semconv.attributes.error_attributes.ERROR_TYPE`. """ @deprecated( "Deprecated in favor of stable :py:const:`opentelemetry.semconv.attributes.error_attributes.ErrorTypeValues`." ) class ErrorTypeValues(Enum): OTHER = "_OTHER" """Deprecated in favor of stable :py:const:`opentelemetry.semconv.attributes.error_attributes.ErrorTypeValues.OTHER`.""" event_attributes.py000066400000000000000000000014551511654350100425550ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/attributes# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Final EVENT_NAME: Final = "event.name" """ Deprecated: The value of this attribute MUST now be set as the value of the EventName field on the LogRecord to indicate that the LogRecord represents an Event. """ exception_attributes.py000066400000000000000000000024171511654350100434310ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/attributes# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Final EXCEPTION_ESCAPED: Final = "exception.escaped" """ Deprecated: It's no longer recommended to record exceptions that are handled and do not escape the scope of a span. """ EXCEPTION_MESSAGE: Final = "exception.message" """ Deprecated in favor of stable :py:const:`opentelemetry.semconv.attributes.exception_attributes.EXCEPTION_MESSAGE`. """ EXCEPTION_STACKTRACE: Final = "exception.stacktrace" """ Deprecated in favor of stable :py:const:`opentelemetry.semconv.attributes.exception_attributes.EXCEPTION_STACKTRACE`. """ EXCEPTION_TYPE: Final = "exception.type" """ Deprecated in favor of stable :py:const:`opentelemetry.semconv.attributes.exception_attributes.EXCEPTION_TYPE`. """ faas_attributes.py000066400000000000000000000140411511654350100423410ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/attributes# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from enum import Enum from typing import Final FAAS_COLDSTART: Final = "faas.coldstart" """ A boolean that is true if the serverless function is executed for the first time (aka cold-start). """ FAAS_CRON: Final = "faas.cron" """ A string containing the schedule period as [Cron Expression](https://docs.oracle.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm). """ FAAS_DOCUMENT_COLLECTION: Final = "faas.document.collection" """ The name of the source on which the triggering operation was performed. For example, in Cloud Storage or S3 corresponds to the bucket name, and in Cosmos DB to the database name. """ FAAS_DOCUMENT_NAME: Final = "faas.document.name" """ The document name/table subjected to the operation. For example, in Cloud Storage or S3 is the name of the file, and in Cosmos DB the table name. """ FAAS_DOCUMENT_OPERATION: Final = "faas.document.operation" """ Describes the type of the operation that was performed on the data. """ FAAS_DOCUMENT_TIME: Final = "faas.document.time" """ A string containing the time when the data was accessed in the [ISO 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). """ FAAS_INSTANCE: Final = "faas.instance" """ The execution environment ID as a string, that will be potentially reused for other invocations to the same function/function version. Note: - **AWS Lambda:** Use the (full) log stream name. """ FAAS_INVOCATION_ID: Final = "faas.invocation_id" """ The invocation ID of the current function invocation. """ FAAS_INVOKED_NAME: Final = "faas.invoked_name" """ The name of the invoked function. Note: SHOULD be equal to the `faas.name` resource attribute of the invoked function. """ FAAS_INVOKED_PROVIDER: Final = "faas.invoked_provider" """ The cloud provider of the invoked function. Note: SHOULD be equal to the `cloud.provider` resource attribute of the invoked function. """ FAAS_INVOKED_REGION: Final = "faas.invoked_region" """ The cloud region of the invoked function. Note: SHOULD be equal to the `cloud.region` resource attribute of the invoked function. """ FAAS_MAX_MEMORY: Final = "faas.max_memory" """ The amount of memory available to the serverless function converted to Bytes. Note: It's recommended to set this attribute since e.g. too little memory can easily stop a Java AWS Lambda function from working correctly. On AWS Lambda, the environment variable `AWS_LAMBDA_FUNCTION_MEMORY_SIZE` provides this information (which must be multiplied by 1,048,576). """ FAAS_NAME: Final = "faas.name" """ The name of the single function that this runtime instance executes. Note: This is the name of the function as configured/deployed on the FaaS platform and is usually different from the name of the callback function (which may be stored in the [`code.namespace`/`code.function.name`](/docs/general/attributes.md#source-code-attributes) span attributes). For some cloud providers, the above definition is ambiguous. The following definition of function name MUST be used for this attribute (and consequently the span name) for the listed cloud providers/products: - **Azure:** The full name `/`, i.e., function app name followed by a forward slash followed by the function name (this form can also be seen in the resource JSON for the function). This means that a span attribute MUST be used, as an Azure function app can host multiple functions that would usually share a TracerProvider (see also the `cloud.resource_id` attribute). """ FAAS_TIME: Final = "faas.time" """ A string containing the function invocation time in the [ISO 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). """ FAAS_TRIGGER: Final = "faas.trigger" """ Type of the trigger which caused this function invocation. """ FAAS_VERSION: Final = "faas.version" """ The immutable version of the function being executed. Note: Depending on the cloud provider and platform, use: - **AWS Lambda:** The [function version](https://docs.aws.amazon.com/lambda/latest/dg/configuration-versions.html) (an integer represented as a decimal string). - **Google Cloud Run (Services):** The [revision](https://cloud.google.com/run/docs/managing/revisions) (i.e., the function name plus the revision suffix). - **Google Cloud Functions:** The value of the [`K_REVISION` environment variable](https://cloud.google.com/run/docs/container-contract#services-env-vars). - **Azure Functions:** Not applicable. Do not set this attribute. """ class FaasDocumentOperationValues(Enum): INSERT = "insert" """When a new object is created.""" EDIT = "edit" """When an object is modified.""" DELETE = "delete" """When an object is deleted.""" class FaasInvokedProviderValues(Enum): ALIBABA_CLOUD = "alibaba_cloud" """Alibaba Cloud.""" AWS = "aws" """Amazon Web Services.""" AZURE = "azure" """Microsoft Azure.""" GCP = "gcp" """Google Cloud Platform.""" TENCENT_CLOUD = "tencent_cloud" """Tencent Cloud.""" class FaasTriggerValues(Enum): DATASOURCE = "datasource" """A response to some data source operation such as a database or filesystem read/write.""" HTTP = "http" """To provide an answer to an inbound HTTP request.""" PUBSUB = "pubsub" """A function is set to be executed when messages are sent to a messaging system.""" TIMER = "timer" """A function is scheduled to be executed regularly.""" OTHER = "other" """If none of the others apply.""" feature_flag_attributes.py000066400000000000000000000117461511654350100440640ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/attributes# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from enum import Enum from typing import Final from typing_extensions import deprecated FEATURE_FLAG_CONTEXT_ID: Final = "feature_flag.context.id" """ The unique identifier for the flag evaluation context. For example, the targeting key. """ FEATURE_FLAG_EVALUATION_ERROR_MESSAGE: Final = ( "feature_flag.evaluation.error.message" ) """ Deprecated: Replaced by `error.message`. """ FEATURE_FLAG_EVALUATION_REASON: Final = "feature_flag.evaluation.reason" """ Deprecated: Replaced by `feature_flag.result.reason`. """ FEATURE_FLAG_KEY: Final = "feature_flag.key" """ The lookup key of the feature flag. """ FEATURE_FLAG_PROVIDER_NAME: Final = "feature_flag.provider.name" """ Identifies the feature flag provider. """ FEATURE_FLAG_RESULT_REASON: Final = "feature_flag.result.reason" """ The reason code which shows how a feature flag value was determined. """ FEATURE_FLAG_RESULT_VALUE: Final = "feature_flag.result.value" """ The evaluated value of the feature flag. Note: With some feature flag providers, feature flag results can be quite large or contain private or sensitive details. Because of this, `feature_flag.result.variant` is often the preferred attribute if it is available. It may be desirable to redact or otherwise limit the size and scope of `feature_flag.result.value` if possible. Because the evaluated flag value is unstructured and may be any type, it is left to the instrumentation author to determine how best to achieve this. """ FEATURE_FLAG_RESULT_VARIANT: Final = "feature_flag.result.variant" """ A semantic identifier for an evaluated flag value. Note: A semantic identifier, commonly referred to as a variant, provides a means for referring to a value without including the value itself. This can provide additional context for understanding the meaning behind a value. For example, the variant `red` maybe be used for the value `#c05543`. """ FEATURE_FLAG_SET_ID: Final = "feature_flag.set.id" """ The identifier of the [flag set](https://openfeature.dev/specification/glossary/#flag-set) to which the feature flag belongs. """ FEATURE_FLAG_VARIANT: Final = "feature_flag.variant" """ Deprecated: Replaced by `feature_flag.result.variant`. """ FEATURE_FLAG_VERSION: Final = "feature_flag.version" """ The version of the ruleset used during the evaluation. This may be any stable value which uniquely identifies the ruleset. """ @deprecated( "The attribute feature_flag.evaluation.reason is deprecated - Replaced by `feature_flag.result.reason`" ) class FeatureFlagEvaluationReasonValues(Enum): STATIC = "static" """The resolved value is static (no dynamic evaluation).""" DEFAULT = "default" """The resolved value fell back to a pre-configured value (no dynamic evaluation occurred or dynamic evaluation yielded no result).""" TARGETING_MATCH = "targeting_match" """The resolved value was the result of a dynamic evaluation, such as a rule or specific user-targeting.""" SPLIT = "split" """The resolved value was the result of pseudorandom assignment.""" CACHED = "cached" """The resolved value was retrieved from cache.""" DISABLED = "disabled" """The resolved value was the result of the flag being disabled in the management system.""" UNKNOWN = "unknown" """The reason for the resolved value could not be determined.""" STALE = "stale" """The resolved value is non-authoritative or possibly out of date.""" ERROR = "error" """The resolved value was the result of an error.""" class FeatureFlagResultReasonValues(Enum): STATIC = "static" """The resolved value is static (no dynamic evaluation).""" DEFAULT = "default" """The resolved value fell back to a pre-configured value (no dynamic evaluation occurred or dynamic evaluation yielded no result).""" TARGETING_MATCH = "targeting_match" """The resolved value was the result of a dynamic evaluation, such as a rule or specific user-targeting.""" SPLIT = "split" """The resolved value was the result of pseudorandom assignment.""" CACHED = "cached" """The resolved value was retrieved from cache.""" DISABLED = "disabled" """The resolved value was the result of the flag being disabled in the management system.""" UNKNOWN = "unknown" """The reason for the resolved value could not be determined.""" STALE = "stale" """The resolved value is non-authoritative or possibly out of date.""" ERROR = "error" """The resolved value was the result of an error.""" file_attributes.py000066400000000000000000000077141511654350100423570ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/attributes# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Final FILE_ACCESSED: Final = "file.accessed" """ Time when the file was last accessed, in ISO 8601 format. Note: This attribute might not be supported by some file systems — NFS, FAT32, in embedded OS, etc. """ FILE_ATTRIBUTES: Final = "file.attributes" """ Array of file attributes. Note: Attributes names depend on the OS or file system. Here’s a non-exhaustive list of values expected for this attribute: `archive`, `compressed`, `directory`, `encrypted`, `execute`, `hidden`, `immutable`, `journaled`, `read`, `readonly`, `symbolic link`, `system`, `temporary`, `write`. """ FILE_CHANGED: Final = "file.changed" """ Time when the file attributes or metadata was last changed, in ISO 8601 format. Note: `file.changed` captures the time when any of the file's properties or attributes (including the content) are changed, while `file.modified` captures the timestamp when the file content is modified. """ FILE_CREATED: Final = "file.created" """ Time when the file was created, in ISO 8601 format. Note: This attribute might not be supported by some file systems — NFS, FAT32, in embedded OS, etc. """ FILE_DIRECTORY: Final = "file.directory" """ Directory where the file is located. It should include the drive letter, when appropriate. """ FILE_EXTENSION: Final = "file.extension" """ File extension, excluding the leading dot. Note: When the file name has multiple extensions (example.tar.gz), only the last one should be captured ("gz", not "tar.gz"). """ FILE_FORK_NAME: Final = "file.fork_name" """ Name of the fork. A fork is additional data associated with a filesystem object. Note: On Linux, a resource fork is used to store additional data with a filesystem object. A file always has at least one fork for the data portion, and additional forks may exist. On NTFS, this is analogous to an Alternate Data Stream (ADS), and the default data stream for a file is just called $DATA. Zone.Identifier is commonly used by Windows to track contents downloaded from the Internet. An ADS is typically of the form: C:\\path\\to\\filename.extension:some_fork_name, and some_fork_name is the value that should populate `fork_name`. `filename.extension` should populate `file.name`, and `extension` should populate `file.extension`. The full path, `file.path`, will include the fork name. """ FILE_GROUP_ID: Final = "file.group.id" """ Primary Group ID (GID) of the file. """ FILE_GROUP_NAME: Final = "file.group.name" """ Primary group name of the file. """ FILE_INODE: Final = "file.inode" """ Inode representing the file in the filesystem. """ FILE_MODE: Final = "file.mode" """ Mode of the file in octal representation. """ FILE_MODIFIED: Final = "file.modified" """ Time when the file content was last modified, in ISO 8601 format. """ FILE_NAME: Final = "file.name" """ Name of the file including the extension, without the directory. """ FILE_OWNER_ID: Final = "file.owner.id" """ The user ID (UID) or security identifier (SID) of the file owner. """ FILE_OWNER_NAME: Final = "file.owner.name" """ Username of the file owner. """ FILE_PATH: Final = "file.path" """ Full path to the file, including the file name. It should include the drive letter, when appropriate. """ FILE_SIZE: Final = "file.size" """ File size in bytes. """ FILE_SYMBOLIC_LINK_TARGET_PATH: Final = "file.symbolic_link.target_path" """ Path to the target of a symbolic link. Note: This attribute is only applicable to symbolic links. """ gcp_attributes.py000066400000000000000000000201621511654350100422010ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/attributes# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from enum import Enum from typing import Final GCP_APPHUB_APPLICATION_CONTAINER: Final = "gcp.apphub.application.container" """ The container within GCP where the AppHub application is defined. """ GCP_APPHUB_APPLICATION_ID: Final = "gcp.apphub.application.id" """ The name of the application as configured in AppHub. """ GCP_APPHUB_APPLICATION_LOCATION: Final = "gcp.apphub.application.location" """ The GCP zone or region where the application is defined. """ GCP_APPHUB_SERVICE_CRITICALITY_TYPE: Final = ( "gcp.apphub.service.criticality_type" ) """ Criticality of a service indicates its importance to the business. Note: [See AppHub type enum](https://cloud.google.com/app-hub/docs/reference/rest/v1/Attributes#type). """ GCP_APPHUB_SERVICE_ENVIRONMENT_TYPE: Final = ( "gcp.apphub.service.environment_type" ) """ Environment of a service is the stage of a software lifecycle. Note: [See AppHub environment type](https://cloud.google.com/app-hub/docs/reference/rest/v1/Attributes#type_1). """ GCP_APPHUB_SERVICE_ID: Final = "gcp.apphub.service.id" """ The name of the service as configured in AppHub. """ GCP_APPHUB_WORKLOAD_CRITICALITY_TYPE: Final = ( "gcp.apphub.workload.criticality_type" ) """ Criticality of a workload indicates its importance to the business. Note: [See AppHub type enum](https://cloud.google.com/app-hub/docs/reference/rest/v1/Attributes#type). """ GCP_APPHUB_WORKLOAD_ENVIRONMENT_TYPE: Final = ( "gcp.apphub.workload.environment_type" ) """ Environment of a workload is the stage of a software lifecycle. Note: [See AppHub environment type](https://cloud.google.com/app-hub/docs/reference/rest/v1/Attributes#type_1). """ GCP_APPHUB_WORKLOAD_ID: Final = "gcp.apphub.workload.id" """ The name of the workload as configured in AppHub. """ GCP_APPHUB_DESTINATION_APPLICATION_CONTAINER: Final = ( "gcp.apphub_destination.application.container" ) """ The container within GCP where the AppHub destination application is defined. """ GCP_APPHUB_DESTINATION_APPLICATION_ID: Final = ( "gcp.apphub_destination.application.id" ) """ The name of the destination application as configured in AppHub. """ GCP_APPHUB_DESTINATION_APPLICATION_LOCATION: Final = ( "gcp.apphub_destination.application.location" ) """ The GCP zone or region where the destination application is defined. """ GCP_APPHUB_DESTINATION_SERVICE_CRITICALITY_TYPE: Final = ( "gcp.apphub_destination.service.criticality_type" ) """ Criticality of a destination workload indicates its importance to the business as specified in [AppHub type enum](https://cloud.google.com/app-hub/docs/reference/rest/v1/Attributes#type). """ GCP_APPHUB_DESTINATION_SERVICE_ENVIRONMENT_TYPE: Final = ( "gcp.apphub_destination.service.environment_type" ) """ Software lifecycle stage of a destination service as defined [AppHub environment type](https://cloud.google.com/app-hub/docs/reference/rest/v1/Attributes#type_1). """ GCP_APPHUB_DESTINATION_SERVICE_ID: Final = "gcp.apphub_destination.service.id" """ The name of the destination service as configured in AppHub. """ GCP_APPHUB_DESTINATION_WORKLOAD_CRITICALITY_TYPE: Final = ( "gcp.apphub_destination.workload.criticality_type" ) """ Criticality of a destination workload indicates its importance to the business as specified in [AppHub type enum](https://cloud.google.com/app-hub/docs/reference/rest/v1/Attributes#type). """ GCP_APPHUB_DESTINATION_WORKLOAD_ENVIRONMENT_TYPE: Final = ( "gcp.apphub_destination.workload.environment_type" ) """ Environment of a destination workload is the stage of a software lifecycle as provided in the [AppHub environment type](https://cloud.google.com/app-hub/docs/reference/rest/v1/Attributes#type_1). """ GCP_APPHUB_DESTINATION_WORKLOAD_ID: Final = ( "gcp.apphub_destination.workload.id" ) """ The name of the destination workload as configured in AppHub. """ GCP_CLIENT_SERVICE: Final = "gcp.client.service" """ Identifies the Google Cloud service for which the official client library is intended. Note: Intended to be a stable identifier for Google Cloud client libraries that is uniform across implementation languages. The value should be derived from the canonical service domain for the service; for example, 'foo.googleapis.com' should result in a value of 'foo'. """ GCP_CLOUD_RUN_JOB_EXECUTION: Final = "gcp.cloud_run.job.execution" """ The name of the Cloud Run [execution](https://cloud.google.com/run/docs/managing/job-executions) being run for the Job, as set by the [`CLOUD_RUN_EXECUTION`](https://cloud.google.com/run/docs/container-contract#jobs-env-vars) environment variable. """ GCP_CLOUD_RUN_JOB_TASK_INDEX: Final = "gcp.cloud_run.job.task_index" """ The index for a task within an execution as provided by the [`CLOUD_RUN_TASK_INDEX`](https://cloud.google.com/run/docs/container-contract#jobs-env-vars) environment variable. """ GCP_GCE_INSTANCE_HOSTNAME: Final = "gcp.gce.instance.hostname" """ The hostname of a GCE instance. This is the full value of the default or [custom hostname](https://cloud.google.com/compute/docs/instances/custom-hostname-vm). """ GCP_GCE_INSTANCE_NAME: Final = "gcp.gce.instance.name" """ The instance name of a GCE instance. This is the value provided by `host.name`, the visible name of the instance in the Cloud Console UI, and the prefix for the default hostname of the instance as defined by the [default internal DNS name](https://cloud.google.com/compute/docs/internal-dns#instance-fully-qualified-domain-names). """ class GcpApphubServiceCriticalityTypeValues(Enum): MISSION_CRITICAL = "MISSION_CRITICAL" """Mission critical service.""" HIGH = "HIGH" """High impact.""" MEDIUM = "MEDIUM" """Medium impact.""" LOW = "LOW" """Low impact.""" class GcpApphubServiceEnvironmentTypeValues(Enum): PRODUCTION = "PRODUCTION" """Production environment.""" STAGING = "STAGING" """Staging environment.""" TEST = "TEST" """Test environment.""" DEVELOPMENT = "DEVELOPMENT" """Development environment.""" class GcpApphubWorkloadCriticalityTypeValues(Enum): MISSION_CRITICAL = "MISSION_CRITICAL" """Mission critical service.""" HIGH = "HIGH" """High impact.""" MEDIUM = "MEDIUM" """Medium impact.""" LOW = "LOW" """Low impact.""" class GcpApphubWorkloadEnvironmentTypeValues(Enum): PRODUCTION = "PRODUCTION" """Production environment.""" STAGING = "STAGING" """Staging environment.""" TEST = "TEST" """Test environment.""" DEVELOPMENT = "DEVELOPMENT" """Development environment.""" class GcpApphubDestinationServiceCriticalityTypeValues(Enum): MISSION_CRITICAL = "MISSION_CRITICAL" """Mission critical service.""" HIGH = "HIGH" """High impact.""" MEDIUM = "MEDIUM" """Medium impact.""" LOW = "LOW" """Low impact.""" class GcpApphubDestinationServiceEnvironmentTypeValues(Enum): PRODUCTION = "PRODUCTION" """Production environment.""" STAGING = "STAGING" """Staging environment.""" TEST = "TEST" """Test environment.""" DEVELOPMENT = "DEVELOPMENT" """Development environment.""" class GcpApphubDestinationWorkloadCriticalityTypeValues(Enum): MISSION_CRITICAL = "MISSION_CRITICAL" """Mission critical service.""" HIGH = "HIGH" """High impact.""" MEDIUM = "MEDIUM" """Medium impact.""" LOW = "LOW" """Low impact.""" class GcpApphubDestinationWorkloadEnvironmentTypeValues(Enum): PRODUCTION = "PRODUCTION" """Production environment.""" STAGING = "STAGING" """Staging environment.""" TEST = "TEST" """Test environment.""" DEVELOPMENT = "DEVELOPMENT" """Development environment.""" gen_ai_attributes.py000066400000000000000000000452051511654350100426570ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/attributes# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from enum import Enum from typing import Final from typing_extensions import deprecated GEN_AI_AGENT_DESCRIPTION: Final = "gen_ai.agent.description" """ Free-form description of the GenAI agent provided by the application. """ GEN_AI_AGENT_ID: Final = "gen_ai.agent.id" """ The unique identifier of the GenAI agent. """ GEN_AI_AGENT_NAME: Final = "gen_ai.agent.name" """ Human-readable name of the GenAI agent provided by the application. """ GEN_AI_COMPLETION: Final = "gen_ai.completion" """ Deprecated: Removed, no replacement at this time. """ GEN_AI_CONVERSATION_ID: Final = "gen_ai.conversation.id" """ The unique identifier for a conversation (session, thread), used to store and correlate messages within this conversation. """ GEN_AI_DATA_SOURCE_ID: Final = "gen_ai.data_source.id" """ The data source identifier. Note: Data sources are used by AI agents and RAG applications to store grounding data. A data source may be an external database, object store, document collection, website, or any other storage system used by the GenAI agent or application. The `gen_ai.data_source.id` SHOULD match the identifier used by the GenAI system rather than a name specific to the external storage, such as a database or object store. Semantic conventions referencing `gen_ai.data_source.id` MAY also leverage additional attributes, such as `db.*`, to further identify and describe the data source. """ GEN_AI_EMBEDDINGS_DIMENSION_COUNT: Final = "gen_ai.embeddings.dimension.count" """ The number of dimensions the resulting output embeddings should have. """ GEN_AI_EVALUATION_EXPLANATION: Final = "gen_ai.evaluation.explanation" """ A free-form explanation for the assigned score provided by the evaluator. """ GEN_AI_EVALUATION_NAME: Final = "gen_ai.evaluation.name" """ The name of the evaluation metric used for the GenAI response. """ GEN_AI_EVALUATION_SCORE_LABEL: Final = "gen_ai.evaluation.score.label" """ Human readable label for evaluation. Note: This attribute provides a human-readable interpretation of the evaluation score produced by an evaluator. For example, a score value of 1 could mean "relevant" in one evaluation system and "not relevant" in another, depending on the scoring range and evaluator. The label SHOULD have low cardinality. Possible values depend on the evaluation metric and evaluator used; implementations SHOULD document the possible values. """ GEN_AI_EVALUATION_SCORE_VALUE: Final = "gen_ai.evaluation.score.value" """ The evaluation score returned by the evaluator. """ GEN_AI_INPUT_MESSAGES: Final = "gen_ai.input.messages" """ The chat history provided to the model as an input. Note: Instrumentations MUST follow [Input messages JSON schema](/docs/gen-ai/gen-ai-input-messages.json). When the attribute is recorded on events, it MUST be recorded in structured form. When recorded on spans, it MAY be recorded as a JSON string if structured format is not supported and SHOULD be recorded in structured form otherwise. Messages MUST be provided in the order they were sent to the model. Instrumentations MAY provide a way for users to filter or truncate input messages. > [!Warning] > This attribute is likely to contain sensitive information including user/PII data. See [Recording content on attributes](/docs/gen-ai/gen-ai-spans.md#recording-content-on-attributes) section for more details. """ GEN_AI_OPENAI_REQUEST_RESPONSE_FORMAT: Final = ( "gen_ai.openai.request.response_format" ) """ Deprecated: Replaced by `gen_ai.output.type`. """ GEN_AI_OPENAI_REQUEST_SEED: Final = "gen_ai.openai.request.seed" """ Deprecated: Replaced by `gen_ai.request.seed`. """ GEN_AI_OPENAI_REQUEST_SERVICE_TIER: Final = ( "gen_ai.openai.request.service_tier" ) """ Deprecated: Replaced by `openai.request.service_tier`. """ GEN_AI_OPENAI_RESPONSE_SERVICE_TIER: Final = ( "gen_ai.openai.response.service_tier" ) """ Deprecated: Replaced by `openai.response.service_tier`. """ GEN_AI_OPENAI_RESPONSE_SYSTEM_FINGERPRINT: Final = ( "gen_ai.openai.response.system_fingerprint" ) """ Deprecated: Replaced by `openai.response.system_fingerprint`. """ GEN_AI_OPERATION_NAME: Final = "gen_ai.operation.name" """ The name of the operation being performed. Note: If one of the predefined values applies, but specific system uses a different name it's RECOMMENDED to document it in the semantic conventions for specific GenAI system and use system-specific name in the instrumentation. If a different name is not documented, instrumentation libraries SHOULD use applicable predefined value. """ GEN_AI_OUTPUT_MESSAGES: Final = "gen_ai.output.messages" """ Messages returned by the model where each message represents a specific model response (choice, candidate). Note: Instrumentations MUST follow [Output messages JSON schema](/docs/gen-ai/gen-ai-output-messages.json) Each message represents a single output choice/candidate generated by the model. Each message corresponds to exactly one generation (choice/candidate) and vice versa - one choice cannot be split across multiple messages or one message cannot contain parts from multiple choices. When the attribute is recorded on events, it MUST be recorded in structured form. When recorded on spans, it MAY be recorded as a JSON string if structured format is not supported and SHOULD be recorded in structured form otherwise. Instrumentations MAY provide a way for users to filter or truncate output messages. > [!Warning] > This attribute is likely to contain sensitive information including user/PII data. See [Recording content on attributes](/docs/gen-ai/gen-ai-spans.md#recording-content-on-attributes) section for more details. """ GEN_AI_OUTPUT_TYPE: Final = "gen_ai.output.type" """ Represents the content type requested by the client. Note: This attribute SHOULD be used when the client requests output of a specific type. The model may return zero or more outputs of this type. This attribute specifies the output modality and not the actual output format. For example, if an image is requested, the actual output could be a URL pointing to an image file. Additional output format details may be recorded in the future in the `gen_ai.output.{type}.*` attributes. """ GEN_AI_PROMPT: Final = "gen_ai.prompt" """ Deprecated: Removed, no replacement at this time. """ GEN_AI_PROVIDER_NAME: Final = "gen_ai.provider.name" """ The Generative AI provider as identified by the client or server instrumentation. Note: The attribute SHOULD be set based on the instrumentation's best knowledge and may differ from the actual model provider. Multiple providers, including Azure OpenAI, Gemini, and AI hosting platforms are accessible using the OpenAI REST API and corresponding client libraries, but may proxy or host models from different providers. The `gen_ai.request.model`, `gen_ai.response.model`, and `server.address` attributes may help identify the actual system in use. The `gen_ai.provider.name` attribute acts as a discriminator that identifies the GenAI telemetry format flavor specific to that provider within GenAI semantic conventions. It SHOULD be set consistently with provider-specific attributes and signals. For example, GenAI spans, metrics, and events related to AWS Bedrock should have the `gen_ai.provider.name` set to `aws.bedrock` and include applicable `aws.bedrock.*` attributes and are not expected to include `openai.*` attributes. """ GEN_AI_REQUEST_CHOICE_COUNT: Final = "gen_ai.request.choice.count" """ The target number of candidate completions to return. """ GEN_AI_REQUEST_ENCODING_FORMATS: Final = "gen_ai.request.encoding_formats" """ The encoding formats requested in an embeddings operation, if specified. Note: In some GenAI systems the encoding formats are called embedding types. Also, some GenAI systems only accept a single format per request. """ GEN_AI_REQUEST_FREQUENCY_PENALTY: Final = "gen_ai.request.frequency_penalty" """ The frequency penalty setting for the GenAI request. """ GEN_AI_REQUEST_MAX_TOKENS: Final = "gen_ai.request.max_tokens" """ The maximum number of tokens the model generates for a request. """ GEN_AI_REQUEST_MODEL: Final = "gen_ai.request.model" """ The name of the GenAI model a request is being made to. """ GEN_AI_REQUEST_PRESENCE_PENALTY: Final = "gen_ai.request.presence_penalty" """ The presence penalty setting for the GenAI request. """ GEN_AI_REQUEST_SEED: Final = "gen_ai.request.seed" """ Requests with same seed value more likely to return same result. """ GEN_AI_REQUEST_STOP_SEQUENCES: Final = "gen_ai.request.stop_sequences" """ List of sequences that the model will use to stop generating further tokens. """ GEN_AI_REQUEST_TEMPERATURE: Final = "gen_ai.request.temperature" """ The temperature setting for the GenAI request. """ GEN_AI_REQUEST_TOP_K: Final = "gen_ai.request.top_k" """ The top_k sampling setting for the GenAI request. """ GEN_AI_REQUEST_TOP_P: Final = "gen_ai.request.top_p" """ The top_p sampling setting for the GenAI request. """ GEN_AI_RESPONSE_FINISH_REASONS: Final = "gen_ai.response.finish_reasons" """ Array of reasons the model stopped generating tokens, corresponding to each generation received. """ GEN_AI_RESPONSE_ID: Final = "gen_ai.response.id" """ The unique identifier for the completion. """ GEN_AI_RESPONSE_MODEL: Final = "gen_ai.response.model" """ The name of the model that generated the response. """ GEN_AI_SYSTEM: Final = "gen_ai.system" """ Deprecated: Replaced by `gen_ai.provider.name`. """ GEN_AI_SYSTEM_INSTRUCTIONS: Final = "gen_ai.system_instructions" """ The system message or instructions provided to the GenAI model separately from the chat history. Note: This attribute SHOULD be used when the corresponding provider or API allows to provide system instructions or messages separately from the chat history. Instructions that are part of the chat history SHOULD be recorded in `gen_ai.input.messages` attribute instead. Instrumentations MUST follow [System instructions JSON schema](/docs/gen-ai/gen-ai-system-instructions.json). When recorded on spans, it MAY be recorded as a JSON string if structured format is not supported and SHOULD be recorded in structured form otherwise. Instrumentations MAY provide a way for users to filter or truncate system instructions. > [!Warning] > This attribute may contain sensitive information. See [Recording content on attributes](/docs/gen-ai/gen-ai-spans.md#recording-content-on-attributes) section for more details. """ GEN_AI_TOKEN_TYPE: Final = "gen_ai.token.type" """ The type of token being counted. """ GEN_AI_TOOL_CALL_ARGUMENTS: Final = "gen_ai.tool.call.arguments" """ Parameters passed to the tool call. Note: > [!WARNING] > This attribute may contain sensitive information. It's expected to be an object - in case a serialized string is available to the instrumentation, the instrumentation SHOULD do the best effort to deserialize it to an object. When recorded on spans, it MAY be recorded as a JSON string if structured format is not supported and SHOULD be recorded in structured form otherwise. """ GEN_AI_TOOL_CALL_ID: Final = "gen_ai.tool.call.id" """ The tool call identifier. """ GEN_AI_TOOL_CALL_RESULT: Final = "gen_ai.tool.call.result" """ The result returned by the tool call (if any and if execution was successful). Note: > [!WARNING] > This attribute may contain sensitive information. It's expected to be an object - in case a serialized string is available to the instrumentation, the instrumentation SHOULD do the best effort to deserialize it to an object. When recorded on spans, it MAY be recorded as a JSON string if structured format is not supported and SHOULD be recorded in structured form otherwise. """ GEN_AI_TOOL_DEFINITIONS: Final = "gen_ai.tool.definitions" """ The list of source system tool definitions available to the GenAI agent or model. Note: The value of this attribute matches source system tool definition format. It's expected to be an array of objects where each object represents a tool definition. In case a serialized string is available to the instrumentation, the instrumentation SHOULD do the best effort to deserialize it to an array. When recorded on spans, it MAY be recorded as a JSON string if structured format is not supported and SHOULD be recorded in structured form otherwise. Since this attribute could be large, it's NOT RECOMMENDED to populate it by default. Instrumentations MAY provide a way to enable populating this attribute. """ GEN_AI_TOOL_DESCRIPTION: Final = "gen_ai.tool.description" """ The tool description. """ GEN_AI_TOOL_NAME: Final = "gen_ai.tool.name" """ Name of the tool utilized by the agent. """ GEN_AI_TOOL_TYPE: Final = "gen_ai.tool.type" """ Type of the tool utilized by the agent. Note: Extension: A tool executed on the agent-side to directly call external APIs, bridging the gap between the agent and real-world systems. Agent-side operations involve actions that are performed by the agent on the server or within the agent's controlled environment. Function: A tool executed on the client-side, where the agent generates parameters for a predefined function, and the client executes the logic. Client-side operations are actions taken on the user's end or within the client application. Datastore: A tool used by the agent to access and query structured or unstructured external data for retrieval-augmented tasks or knowledge updates. """ GEN_AI_USAGE_COMPLETION_TOKENS: Final = "gen_ai.usage.completion_tokens" """ Deprecated: Replaced by `gen_ai.usage.output_tokens`. """ GEN_AI_USAGE_INPUT_TOKENS: Final = "gen_ai.usage.input_tokens" """ The number of tokens used in the GenAI input (prompt). """ GEN_AI_USAGE_OUTPUT_TOKENS: Final = "gen_ai.usage.output_tokens" """ The number of tokens used in the GenAI response (completion). """ GEN_AI_USAGE_PROMPT_TOKENS: Final = "gen_ai.usage.prompt_tokens" """ Deprecated: Replaced by `gen_ai.usage.input_tokens`. """ @deprecated( "The attribute gen_ai.openai.request.response_format is deprecated - Replaced by `gen_ai.output.type`" ) class GenAiOpenaiRequestResponseFormatValues(Enum): TEXT = "text" """Text response format.""" JSON_OBJECT = "json_object" """JSON object response format.""" JSON_SCHEMA = "json_schema" """JSON schema response format.""" @deprecated( "The attribute gen_ai.openai.request.service_tier is deprecated - Replaced by `openai.request.service_tier`" ) class GenAiOpenaiRequestServiceTierValues(Enum): AUTO = "auto" """The system will utilize scale tier credits until they are exhausted.""" DEFAULT = "default" """The system will utilize the default scale tier.""" class GenAiOperationNameValues(Enum): CHAT = "chat" """Chat completion operation such as [OpenAI Chat API](https://platform.openai.com/docs/api-reference/chat).""" GENERATE_CONTENT = "generate_content" """Multimodal content generation operation such as [Gemini Generate Content](https://ai.google.dev/api/generate-content).""" TEXT_COMPLETION = "text_completion" """Text completions operation such as [OpenAI Completions API (Legacy)](https://platform.openai.com/docs/api-reference/completions).""" EMBEDDINGS = "embeddings" """Embeddings operation such as [OpenAI Create embeddings API](https://platform.openai.com/docs/api-reference/embeddings/create).""" CREATE_AGENT = "create_agent" """Create GenAI agent.""" INVOKE_AGENT = "invoke_agent" """Invoke GenAI agent.""" EXECUTE_TOOL = "execute_tool" """Execute a tool.""" class GenAiOutputTypeValues(Enum): TEXT = "text" """Plain text.""" JSON = "json" """JSON object with known or unknown schema.""" IMAGE = "image" """Image.""" SPEECH = "speech" """Speech.""" class GenAiProviderNameValues(Enum): OPENAI = "openai" """[OpenAI](https://openai.com/).""" GCP_GEN_AI = "gcp.gen_ai" """Any Google generative AI endpoint.""" GCP_VERTEX_AI = "gcp.vertex_ai" """[Vertex AI](https://cloud.google.com/vertex-ai).""" GCP_GEMINI = "gcp.gemini" """[Gemini](https://cloud.google.com/products/gemini).""" ANTHROPIC = "anthropic" """[Anthropic](https://www.anthropic.com/).""" COHERE = "cohere" """[Cohere](https://cohere.com/).""" AZURE_AI_INFERENCE = "azure.ai.inference" """Azure AI Inference.""" AZURE_AI_OPENAI = "azure.ai.openai" """[Azure OpenAI](https://azure.microsoft.com/products/ai-services/openai-service/).""" IBM_WATSONX_AI = "ibm.watsonx.ai" """[IBM Watsonx AI](https://www.ibm.com/products/watsonx-ai).""" AWS_BEDROCK = "aws.bedrock" """[AWS Bedrock](https://aws.amazon.com/bedrock).""" PERPLEXITY = "perplexity" """[Perplexity](https://www.perplexity.ai/).""" X_AI = "x_ai" """[xAI](https://x.ai/).""" DEEPSEEK = "deepseek" """[DeepSeek](https://www.deepseek.com/).""" GROQ = "groq" """[Groq](https://groq.com/).""" MISTRAL_AI = "mistral_ai" """[Mistral AI](https://mistral.ai/).""" @deprecated( "The attribute gen_ai.system is deprecated - Replaced by `gen_ai.provider.name`" ) class GenAiSystemValues(Enum): OPENAI = "openai" """OpenAI.""" GCP_GEN_AI = "gcp.gen_ai" """Any Google generative AI endpoint.""" GCP_VERTEX_AI = "gcp.vertex_ai" """Vertex AI.""" GCP_GEMINI = "gcp.gemini" """Gemini.""" VERTEX_AI = "vertex_ai" """Deprecated: Replaced by `gcp.vertex_ai`.""" GEMINI = "gemini" """Deprecated: Replaced by `gcp.gemini`.""" ANTHROPIC = "anthropic" """Anthropic.""" COHERE = "cohere" """Cohere.""" AZ_AI_INFERENCE = "az.ai.inference" """Deprecated: Replaced by `azure.ai.inference`.""" AZ_AI_OPENAI = "az.ai.openai" """Deprecated: Replaced by `azure.ai.openai`.""" AZURE_AI_INFERENCE = "azure.ai.inference" """Azure AI Inference.""" AZURE_AI_OPENAI = "azure.ai.openai" """Azure OpenAI.""" IBM_WATSONX_AI = "ibm.watsonx.ai" """IBM Watsonx AI.""" AWS_BEDROCK = "aws.bedrock" """AWS Bedrock.""" PERPLEXITY = "perplexity" """Perplexity.""" XAI = "xai" """xAI.""" DEEPSEEK = "deepseek" """DeepSeek.""" GROQ = "groq" """Groq.""" MISTRAL_AI = "mistral_ai" """Mistral AI.""" class GenAiTokenTypeValues(Enum): INPUT = "input" """Input tokens (prompt, input, etc.).""" COMPLETION = "output" """Deprecated: Replaced by `output`.""" OUTPUT = "output" """Output tokens (completion, response, etc.).""" geo_attributes.py000066400000000000000000000036651511654350100422130ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/attributes# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from enum import Enum from typing import Final GEO_CONTINENT_CODE: Final = "geo.continent.code" """ Two-letter code representing continent’s name. """ GEO_COUNTRY_ISO_CODE: Final = "geo.country.iso_code" """ Two-letter ISO Country Code ([ISO 3166-1 alpha2](https://wikipedia.org/wiki/ISO_3166-1#Codes)). """ GEO_LOCALITY_NAME: Final = "geo.locality.name" """ Locality name. Represents the name of a city, town, village, or similar populated place. """ GEO_LOCATION_LAT: Final = "geo.location.lat" """ Latitude of the geo location in [WGS84](https://wikipedia.org/wiki/World_Geodetic_System#WGS84). """ GEO_LOCATION_LON: Final = "geo.location.lon" """ Longitude of the geo location in [WGS84](https://wikipedia.org/wiki/World_Geodetic_System#WGS84). """ GEO_POSTAL_CODE: Final = "geo.postal_code" """ Postal code associated with the location. Values appropriate for this field may also be known as a postcode or ZIP code and will vary widely from country to country. """ GEO_REGION_ISO_CODE: Final = "geo.region.iso_code" """ Region ISO code ([ISO 3166-2](https://wikipedia.org/wiki/ISO_3166-2)). """ class GeoContinentCodeValues(Enum): AF = "AF" """Africa.""" AN = "AN" """Antarctica.""" AS = "AS" """Asia.""" EU = "EU" """Europe.""" NA = "NA" """North America.""" OC = "OC" """Oceania.""" SA = "SA" """South America.""" graphql_attributes.py000066400000000000000000000022751511654350100430730ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/attributes# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from enum import Enum from typing import Final GRAPHQL_DOCUMENT: Final = "graphql.document" """ The GraphQL document being executed. Note: The value may be sanitized to exclude sensitive information. """ GRAPHQL_OPERATION_NAME: Final = "graphql.operation.name" """ The name of the operation being executed. """ GRAPHQL_OPERATION_TYPE: Final = "graphql.operation.type" """ The type of the operation being executed. """ class GraphqlOperationTypeValues(Enum): QUERY = "query" """GraphQL query.""" MUTATION = "mutation" """GraphQL mutation.""" SUBSCRIPTION = "subscription" """GraphQL subscription.""" heroku_attributes.py000066400000000000000000000016351511654350100427310ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/attributes# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Final HEROKU_APP_ID: Final = "heroku.app.id" """ Unique identifier for the application. """ HEROKU_RELEASE_COMMIT: Final = "heroku.release.commit" """ Commit hash for the current release. """ HEROKU_RELEASE_CREATION_TIMESTAMP: Final = "heroku.release.creation_timestamp" """ Time and date the release was created. """ host_attributes.py000066400000000000000000000070311511654350100424050ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/attributes# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from enum import Enum from typing import Final HOST_ARCH: Final = "host.arch" """ The CPU architecture the host system is running on. """ HOST_CPU_CACHE_L2_SIZE: Final = "host.cpu.cache.l2.size" """ The amount of level 2 memory cache available to the processor (in Bytes). """ HOST_CPU_FAMILY: Final = "host.cpu.family" """ Family or generation of the CPU. """ HOST_CPU_MODEL_ID: Final = "host.cpu.model.id" """ Model identifier. It provides more granular information about the CPU, distinguishing it from other CPUs within the same family. """ HOST_CPU_MODEL_NAME: Final = "host.cpu.model.name" """ Model designation of the processor. """ HOST_CPU_STEPPING: Final = "host.cpu.stepping" """ Stepping or core revisions. """ HOST_CPU_VENDOR_ID: Final = "host.cpu.vendor.id" """ Processor manufacturer identifier. A maximum 12-character string. Note: [CPUID](https://wiki.osdev.org/CPUID) command returns the vendor ID string in EBX, EDX and ECX registers. Writing these to memory in this order results in a 12-character string. """ HOST_ID: Final = "host.id" """ Unique host ID. For Cloud, this must be the instance_id assigned by the cloud provider. For non-containerized systems, this should be the `machine-id`. See the table below for the sources to use to determine the `machine-id` based on operating system. """ HOST_IMAGE_ID: Final = "host.image.id" """ VM image ID or host OS image ID. For Cloud, this value is from the provider. """ HOST_IMAGE_NAME: Final = "host.image.name" """ Name of the VM image or OS install the host was instantiated from. """ HOST_IMAGE_VERSION: Final = "host.image.version" """ The version string of the VM image or host OS as defined in [Version Attributes](/docs/resource/README.md#version-attributes). """ HOST_IP: Final = "host.ip" """ Available IP addresses of the host, excluding loopback interfaces. Note: IPv4 Addresses MUST be specified in dotted-quad notation. IPv6 addresses MUST be specified in the [RFC 5952](https://www.rfc-editor.org/rfc/rfc5952.html) format. """ HOST_MAC: Final = "host.mac" """ Available MAC addresses of the host, excluding loopback interfaces. Note: MAC Addresses MUST be represented in [IEEE RA hexadecimal form](https://standards.ieee.org/wp-content/uploads/import/documents/tutorials/eui.pdf): as hyphen-separated octets in uppercase hexadecimal form from most to least significant. """ HOST_NAME: Final = "host.name" """ Name of the host. On Unix systems, it may contain what the hostname command returns, or the fully qualified hostname, or another name specified by the user. """ HOST_TYPE: Final = "host.type" """ Type of host. For Cloud, this must be the machine type. """ class HostArchValues(Enum): AMD64 = "amd64" """AMD64.""" ARM32 = "arm32" """ARM32.""" ARM64 = "arm64" """ARM64.""" IA64 = "ia64" """Itanium.""" PPC32 = "ppc32" """32-bit PowerPC.""" PPC64 = "ppc64" """64-bit PowerPC.""" S390X = "s390x" """IBM z/Architecture.""" X86 = "x86" """32-bit x86.""" http_attributes.py000066400000000000000000000161441511654350100424140ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/attributes# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from enum import Enum from typing import Final from typing_extensions import deprecated HTTP_CLIENT_IP: Final = "http.client_ip" """ Deprecated: Replaced by `client.address`. """ HTTP_CONNECTION_STATE: Final = "http.connection.state" """ State of the HTTP connection in the HTTP connection pool. """ HTTP_FLAVOR: Final = "http.flavor" """ Deprecated: Split into `network.protocol.name` and `network.protocol.version`. """ HTTP_HOST: Final = "http.host" """ Deprecated: Replaced by one of `server.address`, `client.address` or `http.request.header.host`, depending on the usage. """ HTTP_METHOD: Final = "http.method" """ Deprecated: Replaced by `http.request.method`. """ HTTP_REQUEST_BODY_SIZE: Final = "http.request.body.size" """ The size of the request payload body in bytes. This is the number of bytes transferred excluding headers and is often, but not always, present as the [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) header. For requests using transport encoding, this should be the compressed size. """ HTTP_REQUEST_HEADER_TEMPLATE: Final = "http.request.header" """ Deprecated in favor of stable :py:const:`opentelemetry.semconv.attributes.http_attributes.HTTP_REQUEST_HEADER_TEMPLATE`. """ HTTP_REQUEST_METHOD: Final = "http.request.method" """ Deprecated in favor of stable :py:const:`opentelemetry.semconv.attributes.http_attributes.HTTP_REQUEST_METHOD`. """ HTTP_REQUEST_METHOD_ORIGINAL: Final = "http.request.method_original" """ Deprecated in favor of stable :py:const:`opentelemetry.semconv.attributes.http_attributes.HTTP_REQUEST_METHOD_ORIGINAL`. """ HTTP_REQUEST_RESEND_COUNT: Final = "http.request.resend_count" """ Deprecated in favor of stable :py:const:`opentelemetry.semconv.attributes.http_attributes.HTTP_REQUEST_RESEND_COUNT`. """ HTTP_REQUEST_SIZE: Final = "http.request.size" """ The total size of the request in bytes. This should be the total number of bytes sent over the wire, including the request line (HTTP/1.1), framing (HTTP/2 and HTTP/3), headers, and request body if any. """ HTTP_REQUEST_CONTENT_LENGTH: Final = "http.request_content_length" """ Deprecated: Replaced by `http.request.header.content-length`. """ HTTP_REQUEST_CONTENT_LENGTH_UNCOMPRESSED: Final = ( "http.request_content_length_uncompressed" ) """ Deprecated: Replaced by `http.request.body.size`. """ HTTP_RESPONSE_BODY_SIZE: Final = "http.response.body.size" """ The size of the response payload body in bytes. This is the number of bytes transferred excluding headers and is often, but not always, present as the [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) header. For requests using transport encoding, this should be the compressed size. """ HTTP_RESPONSE_HEADER_TEMPLATE: Final = "http.response.header" """ Deprecated in favor of stable :py:const:`opentelemetry.semconv.attributes.http_attributes.HTTP_RESPONSE_HEADER_TEMPLATE`. """ HTTP_RESPONSE_SIZE: Final = "http.response.size" """ The total size of the response in bytes. This should be the total number of bytes sent over the wire, including the status line (HTTP/1.1), framing (HTTP/2 and HTTP/3), headers, and response body and trailers if any. """ HTTP_RESPONSE_STATUS_CODE: Final = "http.response.status_code" """ Deprecated in favor of stable :py:const:`opentelemetry.semconv.attributes.http_attributes.HTTP_RESPONSE_STATUS_CODE`. """ HTTP_RESPONSE_CONTENT_LENGTH: Final = "http.response_content_length" """ Deprecated: Replaced by `http.response.header.content-length`. """ HTTP_RESPONSE_CONTENT_LENGTH_UNCOMPRESSED: Final = ( "http.response_content_length_uncompressed" ) """ Deprecated: Replaced by `http.response.body.size`. """ HTTP_ROUTE: Final = "http.route" """ Deprecated in favor of stable :py:const:`opentelemetry.semconv.attributes.http_attributes.HTTP_ROUTE`. """ HTTP_SCHEME: Final = "http.scheme" """ Deprecated: Replaced by `url.scheme`. """ HTTP_SERVER_NAME: Final = "http.server_name" """ Deprecated: Replaced by `server.address`. """ HTTP_STATUS_CODE: Final = "http.status_code" """ Deprecated: Replaced by `http.response.status_code`. """ HTTP_TARGET: Final = "http.target" """ Deprecated: Split to `url.path` and `url.query`. """ HTTP_URL: Final = "http.url" """ Deprecated: Replaced by `url.full`. """ HTTP_USER_AGENT: Final = "http.user_agent" """ Deprecated: Replaced by `user_agent.original`. """ class HttpConnectionStateValues(Enum): ACTIVE = "active" """active state.""" IDLE = "idle" """idle state.""" @deprecated( "The attribute http.flavor is deprecated - Split into `network.protocol.name` and `network.protocol.version`" ) class HttpFlavorValues(Enum): HTTP_1_0 = "1.0" """HTTP/1.0.""" HTTP_1_1 = "1.1" """HTTP/1.1.""" HTTP_2_0 = "2.0" """HTTP/2.""" HTTP_3_0 = "3.0" """HTTP/3.""" SPDY = "SPDY" """SPDY protocol.""" QUIC = "QUIC" """QUIC protocol.""" @deprecated( "Deprecated in favor of stable :py:const:`opentelemetry.semconv.attributes.http_attributes.HttpRequestMethodValues`." ) class HttpRequestMethodValues(Enum): CONNECT = "CONNECT" """Deprecated in favor of stable :py:const:`opentelemetry.semconv.attributes.http_attributes.HttpRequestMethodValues.CONNECT`.""" DELETE = "DELETE" """Deprecated in favor of stable :py:const:`opentelemetry.semconv.attributes.http_attributes.HttpRequestMethodValues.DELETE`.""" GET = "GET" """Deprecated in favor of stable :py:const:`opentelemetry.semconv.attributes.http_attributes.HttpRequestMethodValues.GET`.""" HEAD = "HEAD" """Deprecated in favor of stable :py:const:`opentelemetry.semconv.attributes.http_attributes.HttpRequestMethodValues.HEAD`.""" OPTIONS = "OPTIONS" """Deprecated in favor of stable :py:const:`opentelemetry.semconv.attributes.http_attributes.HttpRequestMethodValues.OPTIONS`.""" PATCH = "PATCH" """Deprecated in favor of stable :py:const:`opentelemetry.semconv.attributes.http_attributes.HttpRequestMethodValues.PATCH`.""" POST = "POST" """Deprecated in favor of stable :py:const:`opentelemetry.semconv.attributes.http_attributes.HttpRequestMethodValues.POST`.""" PUT = "PUT" """Deprecated in favor of stable :py:const:`opentelemetry.semconv.attributes.http_attributes.HttpRequestMethodValues.PUT`.""" TRACE = "TRACE" """Deprecated in favor of stable :py:const:`opentelemetry.semconv.attributes.http_attributes.HttpRequestMethodValues.TRACE`.""" QUERY = "QUERY" """QUERY method.""" OTHER = "_OTHER" """Deprecated in favor of stable :py:const:`opentelemetry.semconv.attributes.http_attributes.HttpRequestMethodValues.OTHER`.""" hw_attributes.py000066400000000000000000000133731511654350100420540ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/attributes# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from enum import Enum from typing import Final HW_BATTERY_CAPACITY: Final = "hw.battery.capacity" """ Design capacity in Watts-hours or Amper-hours. """ HW_BATTERY_CHEMISTRY: Final = "hw.battery.chemistry" """ Battery [chemistry](https://schemas.dmtf.org/wbem/cim-html/2.31.0/CIM_Battery.html), e.g. Lithium-Ion, Nickel-Cadmium, etc. """ HW_BATTERY_STATE: Final = "hw.battery.state" """ The current state of the battery. """ HW_BIOS_VERSION: Final = "hw.bios_version" """ BIOS version of the hardware component. """ HW_DRIVER_VERSION: Final = "hw.driver_version" """ Driver version for the hardware component. """ HW_ENCLOSURE_TYPE: Final = "hw.enclosure.type" """ Type of the enclosure (useful for modular systems). """ HW_FIRMWARE_VERSION: Final = "hw.firmware_version" """ Firmware version of the hardware component. """ HW_GPU_TASK: Final = "hw.gpu.task" """ Type of task the GPU is performing. """ HW_ID: Final = "hw.id" """ An identifier for the hardware component, unique within the monitored host. """ HW_LIMIT_TYPE: Final = "hw.limit_type" """ Type of limit for hardware components. """ HW_LOGICAL_DISK_RAID_LEVEL: Final = "hw.logical_disk.raid_level" """ RAID Level of the logical disk. """ HW_LOGICAL_DISK_STATE: Final = "hw.logical_disk.state" """ State of the logical disk space usage. """ HW_MEMORY_TYPE: Final = "hw.memory.type" """ Type of the memory module. """ HW_MODEL: Final = "hw.model" """ Descriptive model name of the hardware component. """ HW_NAME: Final = "hw.name" """ An easily-recognizable name for the hardware component. """ HW_NETWORK_LOGICAL_ADDRESSES: Final = "hw.network.logical_addresses" """ Logical addresses of the adapter (e.g. IP address, or WWPN). """ HW_NETWORK_PHYSICAL_ADDRESS: Final = "hw.network.physical_address" """ Physical address of the adapter (e.g. MAC address, or WWNN). """ HW_PARENT: Final = "hw.parent" """ Unique identifier of the parent component (typically the `hw.id` attribute of the enclosure, or disk controller). """ HW_PHYSICAL_DISK_SMART_ATTRIBUTE: Final = "hw.physical_disk.smart_attribute" """ [S.M.A.R.T.](https://wikipedia.org/wiki/S.M.A.R.T.) (Self-Monitoring, Analysis, and Reporting Technology) attribute of the physical disk. """ HW_PHYSICAL_DISK_STATE: Final = "hw.physical_disk.state" """ State of the physical disk endurance utilization. """ HW_PHYSICAL_DISK_TYPE: Final = "hw.physical_disk.type" """ Type of the physical disk. """ HW_SENSOR_LOCATION: Final = "hw.sensor_location" """ Location of the sensor. """ HW_SERIAL_NUMBER: Final = "hw.serial_number" """ Serial number of the hardware component. """ HW_STATE: Final = "hw.state" """ The current state of the component. """ HW_TAPE_DRIVE_OPERATION_TYPE: Final = "hw.tape_drive.operation_type" """ Type of tape drive operation. """ HW_TYPE: Final = "hw.type" """ Type of the component. Note: Describes the category of the hardware component for which `hw.state` is being reported. For example, `hw.type=temperature` along with `hw.state=degraded` would indicate that the temperature of the hardware component has been reported as `degraded`. """ HW_VENDOR: Final = "hw.vendor" """ Vendor name of the hardware component. """ class HwBatteryStateValues(Enum): CHARGING = "charging" """Charging.""" DISCHARGING = "discharging" """Discharging.""" class HwGpuTaskValues(Enum): DECODER = "decoder" """Decoder.""" ENCODER = "encoder" """Encoder.""" GENERAL = "general" """General.""" class HwLimitTypeValues(Enum): CRITICAL = "critical" """Critical.""" DEGRADED = "degraded" """Degraded.""" HIGH_CRITICAL = "high.critical" """High Critical.""" HIGH_DEGRADED = "high.degraded" """High Degraded.""" LOW_CRITICAL = "low.critical" """Low Critical.""" LOW_DEGRADED = "low.degraded" """Low Degraded.""" MAX = "max" """Maximum.""" THROTTLED = "throttled" """Throttled.""" TURBO = "turbo" """Turbo.""" class HwLogicalDiskStateValues(Enum): USED = "used" """Used.""" FREE = "free" """Free.""" class HwPhysicalDiskStateValues(Enum): REMAINING = "remaining" """Remaining.""" class HwStateValues(Enum): DEGRADED = "degraded" """Degraded.""" FAILED = "failed" """Failed.""" NEEDS_CLEANING = "needs_cleaning" """Needs Cleaning.""" OK = "ok" """OK.""" PREDICTED_FAILURE = "predicted_failure" """Predicted Failure.""" class HwTapeDriveOperationTypeValues(Enum): MOUNT = "mount" """Mount.""" UNMOUNT = "unmount" """Unmount.""" CLEAN = "clean" """Clean.""" class HwTypeValues(Enum): BATTERY = "battery" """Battery.""" CPU = "cpu" """CPU.""" DISK_CONTROLLER = "disk_controller" """Disk controller.""" ENCLOSURE = "enclosure" """Enclosure.""" FAN = "fan" """Fan.""" GPU = "gpu" """GPU.""" LOGICAL_DISK = "logical_disk" """Logical disk.""" MEMORY = "memory" """Memory.""" NETWORK = "network" """Network.""" PHYSICAL_DISK = "physical_disk" """Physical disk.""" POWER_SUPPLY = "power_supply" """Power supply.""" TAPE_DRIVE = "tape_drive" """Tape drive.""" TEMPERATURE = "temperature" """Temperature.""" VOLTAGE = "voltage" """Voltage.""" k8s_attributes.py000066400000000000000000000541101511654350100421350ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/attributes# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from enum import Enum from typing import Final K8S_CLUSTER_NAME: Final = "k8s.cluster.name" """ The name of the cluster. """ K8S_CLUSTER_UID: Final = "k8s.cluster.uid" """ A pseudo-ID for the cluster, set to the UID of the `kube-system` namespace. Note: K8s doesn't have support for obtaining a cluster ID. If this is ever added, we will recommend collecting the `k8s.cluster.uid` through the official APIs. In the meantime, we are able to use the `uid` of the `kube-system` namespace as a proxy for cluster ID. Read on for the rationale. Every object created in a K8s cluster is assigned a distinct UID. The `kube-system` namespace is used by Kubernetes itself and will exist for the lifetime of the cluster. Using the `uid` of the `kube-system` namespace is a reasonable proxy for the K8s ClusterID as it will only change if the cluster is rebuilt. Furthermore, Kubernetes UIDs are UUIDs as standardized by [ISO/IEC 9834-8 and ITU-T X.667](https://www.itu.int/ITU-T/studygroups/com17/oid.html). Which states: > If generated according to one of the mechanisms defined in Rec. > ITU-T X.667 | ISO/IEC 9834-8, a UUID is either guaranteed to be > different from all other UUIDs generated before 3603 A.D., or is > extremely likely to be different (depending on the mechanism chosen). Therefore, UIDs between clusters should be extremely unlikely to conflict. """ K8S_CONTAINER_NAME: Final = "k8s.container.name" """ The name of the Container from Pod specification, must be unique within a Pod. Container runtime usually uses different globally unique name (`container.name`). """ K8S_CONTAINER_RESTART_COUNT: Final = "k8s.container.restart_count" """ Number of times the container was restarted. This attribute can be used to identify a particular container (running or stopped) within a container spec. """ K8S_CONTAINER_STATUS_LAST_TERMINATED_REASON: Final = ( "k8s.container.status.last_terminated_reason" ) """ Last terminated reason of the Container. """ K8S_CONTAINER_STATUS_REASON: Final = "k8s.container.status.reason" """ The reason for the container state. Corresponds to the `reason` field of the: [K8s ContainerStateWaiting](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#containerstatewaiting-v1-core) or [K8s ContainerStateTerminated](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#containerstateterminated-v1-core). """ K8S_CONTAINER_STATUS_STATE: Final = "k8s.container.status.state" """ The state of the container. [K8s ContainerState](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#containerstate-v1-core). """ K8S_CRONJOB_ANNOTATION_TEMPLATE: Final = "k8s.cronjob.annotation" """ The cronjob annotation placed on the CronJob, the `` being the annotation name, the value being the annotation value. Note: Examples: - An annotation `retries` with value `4` SHOULD be recorded as the `k8s.cronjob.annotation.retries` attribute with value `"4"`. - An annotation `data` with empty string value SHOULD be recorded as the `k8s.cronjob.annotation.data` attribute with value `""`. """ K8S_CRONJOB_LABEL_TEMPLATE: Final = "k8s.cronjob.label" """ The label placed on the CronJob, the `` being the label name, the value being the label value. Note: Examples: - A label `type` with value `weekly` SHOULD be recorded as the `k8s.cronjob.label.type` attribute with value `"weekly"`. - A label `automated` with empty string value SHOULD be recorded as the `k8s.cronjob.label.automated` attribute with value `""`. """ K8S_CRONJOB_NAME: Final = "k8s.cronjob.name" """ The name of the CronJob. """ K8S_CRONJOB_UID: Final = "k8s.cronjob.uid" """ The UID of the CronJob. """ K8S_DAEMONSET_ANNOTATION_TEMPLATE: Final = "k8s.daemonset.annotation" """ The annotation placed on the DaemonSet, the `` being the annotation name, the value being the annotation value, even if the value is empty. Note: Examples: - A label `replicas` with value `1` SHOULD be recorded as the `k8s.daemonset.annotation.replicas` attribute with value `"1"`. - A label `data` with empty string value SHOULD be recorded as the `k8s.daemonset.annotation.data` attribute with value `""`. """ K8S_DAEMONSET_LABEL_TEMPLATE: Final = "k8s.daemonset.label" """ The label placed on the DaemonSet, the `` being the label name, the value being the label value, even if the value is empty. Note: Examples: - A label `app` with value `guestbook` SHOULD be recorded as the `k8s.daemonset.label.app` attribute with value `"guestbook"`. - A label `data` with empty string value SHOULD be recorded as the `k8s.daemonset.label.injected` attribute with value `""`. """ K8S_DAEMONSET_NAME: Final = "k8s.daemonset.name" """ The name of the DaemonSet. """ K8S_DAEMONSET_UID: Final = "k8s.daemonset.uid" """ The UID of the DaemonSet. """ K8S_DEPLOYMENT_ANNOTATION_TEMPLATE: Final = "k8s.deployment.annotation" """ The annotation placed on the Deployment, the `` being the annotation name, the value being the annotation value, even if the value is empty. Note: Examples: - A label `replicas` with value `1` SHOULD be recorded as the `k8s.deployment.annotation.replicas` attribute with value `"1"`. - A label `data` with empty string value SHOULD be recorded as the `k8s.deployment.annotation.data` attribute with value `""`. """ K8S_DEPLOYMENT_LABEL_TEMPLATE: Final = "k8s.deployment.label" """ The label placed on the Deployment, the `` being the label name, the value being the label value, even if the value is empty. Note: Examples: - A label `replicas` with value `0` SHOULD be recorded as the `k8s.deployment.label.app` attribute with value `"guestbook"`. - A label `injected` with empty string value SHOULD be recorded as the `k8s.deployment.label.injected` attribute with value `""`. """ K8S_DEPLOYMENT_NAME: Final = "k8s.deployment.name" """ The name of the Deployment. """ K8S_DEPLOYMENT_UID: Final = "k8s.deployment.uid" """ The UID of the Deployment. """ K8S_HPA_METRIC_TYPE: Final = "k8s.hpa.metric.type" """ The type of metric source for the horizontal pod autoscaler. Note: This attribute reflects the `type` field of spec.metrics[] in the HPA. """ K8S_HPA_NAME: Final = "k8s.hpa.name" """ The name of the horizontal pod autoscaler. """ K8S_HPA_SCALETARGETREF_API_VERSION: Final = ( "k8s.hpa.scaletargetref.api_version" ) """ The API version of the target resource to scale for the HorizontalPodAutoscaler. Note: This maps to the `apiVersion` field in the `scaleTargetRef` of the HPA spec. """ K8S_HPA_SCALETARGETREF_KIND: Final = "k8s.hpa.scaletargetref.kind" """ The kind of the target resource to scale for the HorizontalPodAutoscaler. Note: This maps to the `kind` field in the `scaleTargetRef` of the HPA spec. """ K8S_HPA_SCALETARGETREF_NAME: Final = "k8s.hpa.scaletargetref.name" """ The name of the target resource to scale for the HorizontalPodAutoscaler. Note: This maps to the `name` field in the `scaleTargetRef` of the HPA spec. """ K8S_HPA_UID: Final = "k8s.hpa.uid" """ The UID of the horizontal pod autoscaler. """ K8S_HUGEPAGE_SIZE: Final = "k8s.hugepage.size" """ The size (identifier) of the K8s huge page. """ K8S_JOB_ANNOTATION_TEMPLATE: Final = "k8s.job.annotation" """ The annotation placed on the Job, the `` being the annotation name, the value being the annotation value, even if the value is empty. Note: Examples: - A label `number` with value `1` SHOULD be recorded as the `k8s.job.annotation.number` attribute with value `"1"`. - A label `data` with empty string value SHOULD be recorded as the `k8s.job.annotation.data` attribute with value `""`. """ K8S_JOB_LABEL_TEMPLATE: Final = "k8s.job.label" """ The label placed on the Job, the `` being the label name, the value being the label value, even if the value is empty. Note: Examples: - A label `jobtype` with value `ci` SHOULD be recorded as the `k8s.job.label.jobtype` attribute with value `"ci"`. - A label `data` with empty string value SHOULD be recorded as the `k8s.job.label.automated` attribute with value `""`. """ K8S_JOB_NAME: Final = "k8s.job.name" """ The name of the Job. """ K8S_JOB_UID: Final = "k8s.job.uid" """ The UID of the Job. """ K8S_NAMESPACE_ANNOTATION_TEMPLATE: Final = "k8s.namespace.annotation" """ The annotation placed on the Namespace, the `` being the annotation name, the value being the annotation value, even if the value is empty. Note: Examples: - A label `ttl` with value `0` SHOULD be recorded as the `k8s.namespace.annotation.ttl` attribute with value `"0"`. - A label `data` with empty string value SHOULD be recorded as the `k8s.namespace.annotation.data` attribute with value `""`. """ K8S_NAMESPACE_LABEL_TEMPLATE: Final = "k8s.namespace.label" """ The label placed on the Namespace, the `` being the label name, the value being the label value, even if the value is empty. Note: Examples: - A label `kubernetes.io/metadata.name` with value `default` SHOULD be recorded as the `k8s.namespace.label.kubernetes.io/metadata.name` attribute with value `"default"`. - A label `data` with empty string value SHOULD be recorded as the `k8s.namespace.label.data` attribute with value `""`. """ K8S_NAMESPACE_NAME: Final = "k8s.namespace.name" """ The name of the namespace that the pod is running in. """ K8S_NAMESPACE_PHASE: Final = "k8s.namespace.phase" """ The phase of the K8s namespace. Note: This attribute aligns with the `phase` field of the [K8s NamespaceStatus](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#namespacestatus-v1-core). """ K8S_NODE_ANNOTATION_TEMPLATE: Final = "k8s.node.annotation" """ The annotation placed on the Node, the `` being the annotation name, the value being the annotation value, even if the value is empty. Note: Examples: - An annotation `node.alpha.kubernetes.io/ttl` with value `0` SHOULD be recorded as the `k8s.node.annotation.node.alpha.kubernetes.io/ttl` attribute with value `"0"`. - An annotation `data` with empty string value SHOULD be recorded as the `k8s.node.annotation.data` attribute with value `""`. """ K8S_NODE_CONDITION_STATUS: Final = "k8s.node.condition.status" """ The status of the condition, one of True, False, Unknown. Note: This attribute aligns with the `status` field of the [NodeCondition](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#nodecondition-v1-core). """ K8S_NODE_CONDITION_TYPE: Final = "k8s.node.condition.type" """ The condition type of a K8s Node. Note: K8s Node conditions as described by [K8s documentation](https://v1-32.docs.kubernetes.io/docs/reference/node/node-status/#condition). This attribute aligns with the `type` field of the [NodeCondition](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#nodecondition-v1-core) The set of possible values is not limited to those listed here. Managed Kubernetes environments, or custom controllers MAY introduce additional node condition types. When this occurs, the exact value as reported by the Kubernetes API SHOULD be used. """ K8S_NODE_LABEL_TEMPLATE: Final = "k8s.node.label" """ The label placed on the Node, the `` being the label name, the value being the label value, even if the value is empty. Note: Examples: - A label `kubernetes.io/arch` with value `arm64` SHOULD be recorded as the `k8s.node.label.kubernetes.io/arch` attribute with value `"arm64"`. - A label `data` with empty string value SHOULD be recorded as the `k8s.node.label.data` attribute with value `""`. """ K8S_NODE_NAME: Final = "k8s.node.name" """ The name of the Node. """ K8S_NODE_UID: Final = "k8s.node.uid" """ The UID of the Node. """ K8S_POD_ANNOTATION_TEMPLATE: Final = "k8s.pod.annotation" """ The annotation placed on the Pod, the `` being the annotation name, the value being the annotation value. Note: Examples: - An annotation `kubernetes.io/enforce-mountable-secrets` with value `true` SHOULD be recorded as the `k8s.pod.annotation.kubernetes.io/enforce-mountable-secrets` attribute with value `"true"`. - An annotation `mycompany.io/arch` with value `x64` SHOULD be recorded as the `k8s.pod.annotation.mycompany.io/arch` attribute with value `"x64"`. - An annotation `data` with empty string value SHOULD be recorded as the `k8s.pod.annotation.data` attribute with value `""`. """ K8S_POD_LABEL_TEMPLATE: Final = "k8s.pod.label" """ The label placed on the Pod, the `` being the label name, the value being the label value. Note: Examples: - A label `app` with value `my-app` SHOULD be recorded as the `k8s.pod.label.app` attribute with value `"my-app"`. - A label `mycompany.io/arch` with value `x64` SHOULD be recorded as the `k8s.pod.label.mycompany.io/arch` attribute with value `"x64"`. - A label `data` with empty string value SHOULD be recorded as the `k8s.pod.label.data` attribute with value `""`. """ K8S_POD_LABELS_TEMPLATE: Final = "k8s.pod.labels" """ Deprecated: Replaced by `k8s.pod.label`. """ K8S_POD_NAME: Final = "k8s.pod.name" """ The name of the Pod. """ K8S_POD_STATUS_PHASE: Final = "k8s.pod.status.phase" """ The phase for the pod. Corresponds to the `phase` field of the: [K8s PodStatus](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.33/#podstatus-v1-core). """ K8S_POD_STATUS_REASON: Final = "k8s.pod.status.reason" """ The reason for the pod state. Corresponds to the `reason` field of the: [K8s PodStatus](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.33/#podstatus-v1-core). """ K8S_POD_UID: Final = "k8s.pod.uid" """ The UID of the Pod. """ K8S_REPLICASET_ANNOTATION_TEMPLATE: Final = "k8s.replicaset.annotation" """ The annotation placed on the ReplicaSet, the `` being the annotation name, the value being the annotation value, even if the value is empty. Note: Examples: - A label `replicas` with value `0` SHOULD be recorded as the `k8s.replicaset.annotation.replicas` attribute with value `"0"`. - A label `data` with empty string value SHOULD be recorded as the `k8s.replicaset.annotation.data` attribute with value `""`. """ K8S_REPLICASET_LABEL_TEMPLATE: Final = "k8s.replicaset.label" """ The label placed on the ReplicaSet, the `` being the label name, the value being the label value, even if the value is empty. Note: Examples: - A label `app` with value `guestbook` SHOULD be recorded as the `k8s.replicaset.label.app` attribute with value `"guestbook"`. - A label `injected` with empty string value SHOULD be recorded as the `k8s.replicaset.label.injected` attribute with value `""`. """ K8S_REPLICASET_NAME: Final = "k8s.replicaset.name" """ The name of the ReplicaSet. """ K8S_REPLICASET_UID: Final = "k8s.replicaset.uid" """ The UID of the ReplicaSet. """ K8S_REPLICATIONCONTROLLER_NAME: Final = "k8s.replicationcontroller.name" """ The name of the replication controller. """ K8S_REPLICATIONCONTROLLER_UID: Final = "k8s.replicationcontroller.uid" """ The UID of the replication controller. """ K8S_RESOURCEQUOTA_NAME: Final = "k8s.resourcequota.name" """ The name of the resource quota. """ K8S_RESOURCEQUOTA_RESOURCE_NAME: Final = "k8s.resourcequota.resource_name" """ The name of the K8s resource a resource quota defines. Note: The value for this attribute can be either the full `count/[.]` string (e.g., count/deployments.apps, count/pods), or, for certain core Kubernetes resources, just the resource name (e.g., pods, services, configmaps). Both forms are supported by Kubernetes for object count quotas. See [Kubernetes Resource Quotas documentation](https://kubernetes.io/docs/concepts/policy/resource-quotas/#quota-on-object-count) for more details. """ K8S_RESOURCEQUOTA_UID: Final = "k8s.resourcequota.uid" """ The UID of the resource quota. """ K8S_STATEFULSET_ANNOTATION_TEMPLATE: Final = "k8s.statefulset.annotation" """ The annotation placed on the StatefulSet, the `` being the annotation name, the value being the annotation value, even if the value is empty. Note: Examples: - A label `replicas` with value `1` SHOULD be recorded as the `k8s.statefulset.annotation.replicas` attribute with value `"1"`. - A label `data` with empty string value SHOULD be recorded as the `k8s.statefulset.annotation.data` attribute with value `""`. """ K8S_STATEFULSET_LABEL_TEMPLATE: Final = "k8s.statefulset.label" """ The label placed on the StatefulSet, the `` being the label name, the value being the label value, even if the value is empty. Note: Examples: - A label `replicas` with value `0` SHOULD be recorded as the `k8s.statefulset.label.app` attribute with value `"guestbook"`. - A label `injected` with empty string value SHOULD be recorded as the `k8s.statefulset.label.injected` attribute with value `""`. """ K8S_STATEFULSET_NAME: Final = "k8s.statefulset.name" """ The name of the StatefulSet. """ K8S_STATEFULSET_UID: Final = "k8s.statefulset.uid" """ The UID of the StatefulSet. """ K8S_STORAGECLASS_NAME: Final = "k8s.storageclass.name" """ The name of K8s [StorageClass](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#storageclass-v1-storage-k8s-io) object. """ K8S_VOLUME_NAME: Final = "k8s.volume.name" """ The name of the K8s volume. """ K8S_VOLUME_TYPE: Final = "k8s.volume.type" """ The type of the K8s volume. """ class K8sContainerStatusReasonValues(Enum): CONTAINER_CREATING = "ContainerCreating" """The container is being created.""" CRASH_LOOP_BACK_OFF = "CrashLoopBackOff" """The container is in a crash loop back off state.""" CREATE_CONTAINER_CONFIG_ERROR = "CreateContainerConfigError" """There was an error creating the container configuration.""" ERR_IMAGE_PULL = "ErrImagePull" """There was an error pulling the container image.""" IMAGE_PULL_BACK_OFF = "ImagePullBackOff" """The container image pull is in back off state.""" OOM_KILLED = "OOMKilled" """The container was killed due to out of memory.""" COMPLETED = "Completed" """The container has completed execution.""" ERROR = "Error" """There was an error with the container.""" CONTAINER_CANNOT_RUN = "ContainerCannotRun" """The container cannot run.""" class K8sContainerStatusStateValues(Enum): TERMINATED = "terminated" """The container has terminated.""" RUNNING = "running" """The container is running.""" WAITING = "waiting" """The container is waiting.""" class K8sNamespacePhaseValues(Enum): ACTIVE = "active" """Active namespace phase as described by [K8s API](https://pkg.go.dev/k8s.io/api@v0.31.3/core/v1#NamespacePhase).""" TERMINATING = "terminating" """Terminating namespace phase as described by [K8s API](https://pkg.go.dev/k8s.io/api@v0.31.3/core/v1#NamespacePhase).""" class K8sNodeConditionStatusValues(Enum): CONDITION_TRUE = "true" """condition_true.""" CONDITION_FALSE = "false" """condition_false.""" CONDITION_UNKNOWN = "unknown" """condition_unknown.""" class K8sNodeConditionTypeValues(Enum): READY = "Ready" """The node is healthy and ready to accept pods.""" DISK_PRESSURE = "DiskPressure" """Pressure exists on the disk size—that is, if the disk capacity is low.""" MEMORY_PRESSURE = "MemoryPressure" """Pressure exists on the node memory—that is, if the node memory is low.""" PID_PRESSURE = "PIDPressure" """Pressure exists on the processes—that is, if there are too many processes on the node.""" NETWORK_UNAVAILABLE = "NetworkUnavailable" """The network for the node is not correctly configured.""" class K8sPodStatusPhaseValues(Enum): PENDING = "Pending" """The pod has been accepted by the system, but one or more of the containers has not been started. This includes time before being bound to a node, as well as time spent pulling images onto the host.""" RUNNING = "Running" """The pod has been bound to a node and all of the containers have been started. At least one container is still running or is in the process of being restarted.""" SUCCEEDED = "Succeeded" """All containers in the pod have voluntarily terminated with a container exit code of 0, and the system is not going to restart any of these containers.""" FAILED = "Failed" """All containers in the pod have terminated, and at least one container has terminated in a failure (exited with a non-zero exit code or was stopped by the system).""" UNKNOWN = "Unknown" """For some reason the state of the pod could not be obtained, typically due to an error in communicating with the host of the pod.""" class K8sPodStatusReasonValues(Enum): EVICTED = "Evicted" """The pod is evicted.""" NODE_AFFINITY = "NodeAffinity" """The pod is in a status because of its node affinity.""" NODE_LOST = "NodeLost" """The reason on a pod when its state cannot be confirmed as kubelet is unresponsive on the node it is (was) running.""" SHUTDOWN = "Shutdown" """The node is shutdown.""" UNEXPECTED_ADMISSION_ERROR = "UnexpectedAdmissionError" """The pod was rejected admission to the node because of an error during admission that could not be categorized.""" class K8sVolumeTypeValues(Enum): PERSISTENT_VOLUME_CLAIM = "persistentVolumeClaim" """A [persistentVolumeClaim](https://v1-30.docs.kubernetes.io/docs/concepts/storage/volumes/#persistentvolumeclaim) volume.""" CONFIG_MAP = "configMap" """A [configMap](https://v1-30.docs.kubernetes.io/docs/concepts/storage/volumes/#configmap) volume.""" DOWNWARD_API = "downwardAPI" """A [downwardAPI](https://v1-30.docs.kubernetes.io/docs/concepts/storage/volumes/#downwardapi) volume.""" EMPTY_DIR = "emptyDir" """An [emptyDir](https://v1-30.docs.kubernetes.io/docs/concepts/storage/volumes/#emptydir) volume.""" SECRET = "secret" """A [secret](https://v1-30.docs.kubernetes.io/docs/concepts/storage/volumes/#secret) volume.""" LOCAL = "local" """A [local](https://v1-30.docs.kubernetes.io/docs/concepts/storage/volumes/#local) volume.""" linux_attributes.py000066400000000000000000000015671511654350100425770ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/attributes# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from enum import Enum from typing import Final LINUX_MEMORY_SLAB_STATE: Final = "linux.memory.slab.state" """ The Linux Slab memory state. """ class LinuxMemorySlabStateValues(Enum): RECLAIMABLE = "reclaimable" """reclaimable.""" UNRECLAIMABLE = "unreclaimable" """unreclaimable.""" log_attributes.py000066400000000000000000000040511511654350100422100ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/attributes# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from enum import Enum from typing import Final LOG_FILE_NAME: Final = "log.file.name" """ The basename of the file. """ LOG_FILE_NAME_RESOLVED: Final = "log.file.name_resolved" """ The basename of the file, with symlinks resolved. """ LOG_FILE_PATH: Final = "log.file.path" """ The full path to the file. """ LOG_FILE_PATH_RESOLVED: Final = "log.file.path_resolved" """ The full path to the file, with symlinks resolved. """ LOG_IOSTREAM: Final = "log.iostream" """ The stream associated with the log. See below for a list of well-known values. """ LOG_RECORD_ORIGINAL: Final = "log.record.original" """ The complete original Log Record. Note: This value MAY be added when processing a Log Record which was originally transmitted as a string or equivalent data type AND the Body field of the Log Record does not contain the same value. (e.g. a syslog or a log record read from a file.). """ LOG_RECORD_UID: Final = "log.record.uid" """ A unique identifier for the Log Record. Note: If an id is provided, other log records with the same id will be considered duplicates and can be removed safely. This means, that two distinguishable log records MUST have different values. The id MAY be an [Universally Unique Lexicographically Sortable Identifier (ULID)](https://github.com/ulid/spec), but other identifiers (e.g. UUID) may be used as needed. """ class LogIostreamValues(Enum): STDOUT = "stdout" """Logs from stdout stream.""" STDERR = "stderr" """Events from stderr stream.""" mainframe_attributes.py000066400000000000000000000013641511654350100433720ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/attributes# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Final MAINFRAME_LPAR_NAME: Final = "mainframe.lpar.name" """ Name of the logical partition that hosts a systems with a mainframe operating system. """ message_attributes.py000066400000000000000000000024341511654350100430560ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/attributes# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from enum import Enum from typing import Final from typing_extensions import deprecated MESSAGE_COMPRESSED_SIZE: Final = "message.compressed_size" """ Deprecated: Replaced by `rpc.message.compressed_size`. """ MESSAGE_ID: Final = "message.id" """ Deprecated: Replaced by `rpc.message.id`. """ MESSAGE_TYPE: Final = "message.type" """ Deprecated: Replaced by `rpc.message.type`. """ MESSAGE_UNCOMPRESSED_SIZE: Final = "message.uncompressed_size" """ Deprecated: Replaced by `rpc.message.uncompressed_size`. """ @deprecated( "The attribute message.type is deprecated - Replaced by `rpc.message.type`" ) class MessageTypeValues(Enum): SENT = "SENT" """sent.""" RECEIVED = "RECEIVED" """received.""" messaging_attributes.py000066400000000000000000000307101511654350100434050ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/attributes# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from enum import Enum from typing import Final MESSAGING_BATCH_MESSAGE_COUNT: Final = "messaging.batch.message_count" """ The number of messages sent, received, or processed in the scope of the batching operation. Note: Instrumentations SHOULD NOT set `messaging.batch.message_count` on spans that operate with a single message. When a messaging client library supports both batch and single-message API for the same operation, instrumentations SHOULD use `messaging.batch.message_count` for batching APIs and SHOULD NOT use it for single-message APIs. """ MESSAGING_CLIENT_ID: Final = "messaging.client.id" """ A unique identifier for the client that consumes or produces a message. """ MESSAGING_CONSUMER_GROUP_NAME: Final = "messaging.consumer.group.name" """ The name of the consumer group with which a consumer is associated. Note: Semantic conventions for individual messaging systems SHOULD document whether `messaging.consumer.group.name` is applicable and what it means in the context of that system. """ MESSAGING_DESTINATION_ANONYMOUS: Final = "messaging.destination.anonymous" """ A boolean that is true if the message destination is anonymous (could be unnamed or have auto-generated name). """ MESSAGING_DESTINATION_NAME: Final = "messaging.destination.name" """ The message destination name. Note: Destination name SHOULD uniquely identify a specific queue, topic or other entity within the broker. If the broker doesn't have such notion, the destination name SHOULD uniquely identify the broker. """ MESSAGING_DESTINATION_PARTITION_ID: Final = ( "messaging.destination.partition.id" ) """ The identifier of the partition messages are sent to or received from, unique within the `messaging.destination.name`. """ MESSAGING_DESTINATION_SUBSCRIPTION_NAME: Final = ( "messaging.destination.subscription.name" ) """ The name of the destination subscription from which a message is consumed. Note: Semantic conventions for individual messaging systems SHOULD document whether `messaging.destination.subscription.name` is applicable and what it means in the context of that system. """ MESSAGING_DESTINATION_TEMPLATE: Final = "messaging.destination.template" """ Low cardinality representation of the messaging destination name. Note: Destination names could be constructed from templates. An example would be a destination name involving a user name or product id. Although the destination name in this case is of high cardinality, the underlying template is of low cardinality and can be effectively used for grouping and aggregation. """ MESSAGING_DESTINATION_TEMPORARY: Final = "messaging.destination.temporary" """ A boolean that is true if the message destination is temporary and might not exist anymore after messages are processed. """ MESSAGING_DESTINATION_PUBLISH_ANONYMOUS: Final = ( "messaging.destination_publish.anonymous" ) """ Deprecated: Removed. No replacement at this time. """ MESSAGING_DESTINATION_PUBLISH_NAME: Final = ( "messaging.destination_publish.name" ) """ Deprecated: Removed. No replacement at this time. """ MESSAGING_EVENTHUBS_CONSUMER_GROUP: Final = ( "messaging.eventhubs.consumer.group" ) """ Deprecated: Replaced by `messaging.consumer.group.name`. """ MESSAGING_EVENTHUBS_MESSAGE_ENQUEUED_TIME: Final = ( "messaging.eventhubs.message.enqueued_time" ) """ The UTC epoch seconds at which the message has been accepted and stored in the entity. """ MESSAGING_GCP_PUBSUB_MESSAGE_ACK_DEADLINE: Final = ( "messaging.gcp_pubsub.message.ack_deadline" ) """ The ack deadline in seconds set for the modify ack deadline request. """ MESSAGING_GCP_PUBSUB_MESSAGE_ACK_ID: Final = ( "messaging.gcp_pubsub.message.ack_id" ) """ The ack id for a given message. """ MESSAGING_GCP_PUBSUB_MESSAGE_DELIVERY_ATTEMPT: Final = ( "messaging.gcp_pubsub.message.delivery_attempt" ) """ The delivery attempt for a given message. """ MESSAGING_GCP_PUBSUB_MESSAGE_ORDERING_KEY: Final = ( "messaging.gcp_pubsub.message.ordering_key" ) """ The ordering key for a given message. If the attribute is not present, the message does not have an ordering key. """ MESSAGING_KAFKA_CONSUMER_GROUP: Final = "messaging.kafka.consumer.group" """ Deprecated: Replaced by `messaging.consumer.group.name`. """ MESSAGING_KAFKA_DESTINATION_PARTITION: Final = ( "messaging.kafka.destination.partition" ) """ Deprecated: Record string representation of the partition id in `messaging.destination.partition.id` attribute. """ MESSAGING_KAFKA_MESSAGE_KEY: Final = "messaging.kafka.message.key" """ Message keys in Kafka are used for grouping alike messages to ensure they're processed on the same partition. They differ from `messaging.message.id` in that they're not unique. If the key is `null`, the attribute MUST NOT be set. Note: If the key type is not string, it's string representation has to be supplied for the attribute. If the key has no unambiguous, canonical string form, don't include its value. """ MESSAGING_KAFKA_MESSAGE_OFFSET: Final = "messaging.kafka.message.offset" """ Deprecated: Replaced by `messaging.kafka.offset`. """ MESSAGING_KAFKA_MESSAGE_TOMBSTONE: Final = "messaging.kafka.message.tombstone" """ A boolean that is true if the message is a tombstone. """ MESSAGING_KAFKA_OFFSET: Final = "messaging.kafka.offset" """ The offset of a record in the corresponding Kafka partition. """ MESSAGING_MESSAGE_BODY_SIZE: Final = "messaging.message.body.size" """ The size of the message body in bytes. Note: This can refer to both the compressed or uncompressed body size. If both sizes are known, the uncompressed body size should be used. """ MESSAGING_MESSAGE_CONVERSATION_ID: Final = "messaging.message.conversation_id" """ The conversation ID identifying the conversation to which the message belongs, represented as a string. Sometimes called "Correlation ID". """ MESSAGING_MESSAGE_ENVELOPE_SIZE: Final = "messaging.message.envelope.size" """ The size of the message body and metadata in bytes. Note: This can refer to both the compressed or uncompressed size. If both sizes are known, the uncompressed size should be used. """ MESSAGING_MESSAGE_ID: Final = "messaging.message.id" """ A value used by the messaging system as an identifier for the message, represented as a string. """ MESSAGING_OPERATION: Final = "messaging.operation" """ Deprecated: Replaced by `messaging.operation.type`. """ MESSAGING_OPERATION_NAME: Final = "messaging.operation.name" """ The system-specific name of the messaging operation. """ MESSAGING_OPERATION_TYPE: Final = "messaging.operation.type" """ A string identifying the type of the messaging operation. Note: If a custom value is used, it MUST be of low cardinality. """ MESSAGING_RABBITMQ_DESTINATION_ROUTING_KEY: Final = ( "messaging.rabbitmq.destination.routing_key" ) """ RabbitMQ message routing key. """ MESSAGING_RABBITMQ_MESSAGE_DELIVERY_TAG: Final = ( "messaging.rabbitmq.message.delivery_tag" ) """ RabbitMQ message delivery tag. """ MESSAGING_ROCKETMQ_CLIENT_GROUP: Final = "messaging.rocketmq.client_group" """ Deprecated: Replaced by `messaging.consumer.group.name` on the consumer spans. No replacement for producer spans. """ MESSAGING_ROCKETMQ_CONSUMPTION_MODEL: Final = ( "messaging.rocketmq.consumption_model" ) """ Model of message consumption. This only applies to consumer spans. """ MESSAGING_ROCKETMQ_MESSAGE_DELAY_TIME_LEVEL: Final = ( "messaging.rocketmq.message.delay_time_level" ) """ The delay time level for delay message, which determines the message delay time. """ MESSAGING_ROCKETMQ_MESSAGE_DELIVERY_TIMESTAMP: Final = ( "messaging.rocketmq.message.delivery_timestamp" ) """ The timestamp in milliseconds that the delay message is expected to be delivered to consumer. """ MESSAGING_ROCKETMQ_MESSAGE_GROUP: Final = "messaging.rocketmq.message.group" """ It is essential for FIFO message. Messages that belong to the same message group are always processed one by one within the same consumer group. """ MESSAGING_ROCKETMQ_MESSAGE_KEYS: Final = "messaging.rocketmq.message.keys" """ Key(s) of message, another way to mark message besides message id. """ MESSAGING_ROCKETMQ_MESSAGE_TAG: Final = "messaging.rocketmq.message.tag" """ The secondary classifier of message besides topic. """ MESSAGING_ROCKETMQ_MESSAGE_TYPE: Final = "messaging.rocketmq.message.type" """ Type of message. """ MESSAGING_ROCKETMQ_NAMESPACE: Final = "messaging.rocketmq.namespace" """ Namespace of RocketMQ resources, resources in different namespaces are individual. """ MESSAGING_SERVICEBUS_DESTINATION_SUBSCRIPTION_NAME: Final = ( "messaging.servicebus.destination.subscription_name" ) """ Deprecated: Replaced by `messaging.destination.subscription.name`. """ MESSAGING_SERVICEBUS_DISPOSITION_STATUS: Final = ( "messaging.servicebus.disposition_status" ) """ Describes the [settlement type](https://learn.microsoft.com/azure/service-bus-messaging/message-transfers-locks-settlement#peeklock). """ MESSAGING_SERVICEBUS_MESSAGE_DELIVERY_COUNT: Final = ( "messaging.servicebus.message.delivery_count" ) """ Number of deliveries that have been attempted for this message. """ MESSAGING_SERVICEBUS_MESSAGE_ENQUEUED_TIME: Final = ( "messaging.servicebus.message.enqueued_time" ) """ The UTC epoch seconds at which the message has been accepted and stored in the entity. """ MESSAGING_SYSTEM: Final = "messaging.system" """ The messaging system as identified by the client instrumentation. Note: The actual messaging system may differ from the one known by the client. For example, when using Kafka client libraries to communicate with Azure Event Hubs, the `messaging.system` is set to `kafka` based on the instrumentation's best knowledge. """ class MessagingOperationTypeValues(Enum): CREATE = "create" """A message is created. "Create" spans always refer to a single message and are used to provide a unique creation context for messages in batch sending scenarios.""" SEND = "send" """One or more messages are provided for sending to an intermediary. If a single message is sent, the context of the "Send" span can be used as the creation context and no "Create" span needs to be created.""" RECEIVE = "receive" """One or more messages are requested by a consumer. This operation refers to pull-based scenarios, where consumers explicitly call methods of messaging SDKs to receive messages.""" PROCESS = "process" """One or more messages are processed by a consumer.""" SETTLE = "settle" """One or more messages are settled.""" DELIVER = "deliver" """Deprecated: Replaced by `process`.""" PUBLISH = "publish" """Deprecated: Replaced by `send`.""" class MessagingRocketmqConsumptionModelValues(Enum): CLUSTERING = "clustering" """Clustering consumption model.""" BROADCASTING = "broadcasting" """Broadcasting consumption model.""" class MessagingRocketmqMessageTypeValues(Enum): NORMAL = "normal" """Normal message.""" FIFO = "fifo" """FIFO message.""" DELAY = "delay" """Delay message.""" TRANSACTION = "transaction" """Transaction message.""" class MessagingServicebusDispositionStatusValues(Enum): COMPLETE = "complete" """Message is completed.""" ABANDON = "abandon" """Message is abandoned.""" DEAD_LETTER = "dead_letter" """Message is sent to dead letter queue.""" DEFER = "defer" """Message is deferred.""" class MessagingSystemValues(Enum): ACTIVEMQ = "activemq" """Apache ActiveMQ.""" AWS_SNS = "aws.sns" """Amazon Simple Notification Service (SNS).""" AWS_SQS = "aws_sqs" """Amazon Simple Queue Service (SQS).""" EVENTGRID = "eventgrid" """Azure Event Grid.""" EVENTHUBS = "eventhubs" """Azure Event Hubs.""" SERVICEBUS = "servicebus" """Azure Service Bus.""" GCP_PUBSUB = "gcp_pubsub" """Google Cloud Pub/Sub.""" JMS = "jms" """Java Message Service.""" KAFKA = "kafka" """Apache Kafka.""" RABBITMQ = "rabbitmq" """RabbitMQ.""" ROCKETMQ = "rocketmq" """Apache RocketMQ.""" PULSAR = "pulsar" """Apache Pulsar.""" net_attributes.py000066400000000000000000000055751511654350100422310ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/attributes# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from enum import Enum from typing import Final from typing_extensions import deprecated NET_HOST_IP: Final = "net.host.ip" """ Deprecated: Replaced by `network.local.address`. """ NET_HOST_NAME: Final = "net.host.name" """ Deprecated: Replaced by `server.address`. """ NET_HOST_PORT: Final = "net.host.port" """ Deprecated: Replaced by `server.port`. """ NET_PEER_IP: Final = "net.peer.ip" """ Deprecated: Replaced by `network.peer.address`. """ NET_PEER_NAME: Final = "net.peer.name" """ Deprecated: Replaced by `server.address` on client spans and `client.address` on server spans. """ NET_PEER_PORT: Final = "net.peer.port" """ Deprecated: Replaced by `server.port` on client spans and `client.port` on server spans. """ NET_PROTOCOL_NAME: Final = "net.protocol.name" """ Deprecated: Replaced by `network.protocol.name`. """ NET_PROTOCOL_VERSION: Final = "net.protocol.version" """ Deprecated: Replaced by `network.protocol.version`. """ NET_SOCK_FAMILY: Final = "net.sock.family" """ Deprecated: Split to `network.transport` and `network.type`. """ NET_SOCK_HOST_ADDR: Final = "net.sock.host.addr" """ Deprecated: Replaced by `network.local.address`. """ NET_SOCK_HOST_PORT: Final = "net.sock.host.port" """ Deprecated: Replaced by `network.local.port`. """ NET_SOCK_PEER_ADDR: Final = "net.sock.peer.addr" """ Deprecated: Replaced by `network.peer.address`. """ NET_SOCK_PEER_NAME: Final = "net.sock.peer.name" """ Deprecated: Removed. No replacement at this time. """ NET_SOCK_PEER_PORT: Final = "net.sock.peer.port" """ Deprecated: Replaced by `network.peer.port`. """ NET_TRANSPORT: Final = "net.transport" """ Deprecated: Replaced by `network.transport`. """ @deprecated( "The attribute net.sock.family is deprecated - Split to `network.transport` and `network.type`" ) class NetSockFamilyValues(Enum): INET = "inet" """IPv4 address.""" INET6 = "inet6" """IPv6 address.""" UNIX = "unix" """Unix domain socket path.""" @deprecated( "The attribute net.transport is deprecated - Replaced by `network.transport`" ) class NetTransportValues(Enum): IP_TCP = "ip_tcp" """ip_tcp.""" IP_UDP = "ip_udp" """ip_udp.""" PIPE = "pipe" """Named or anonymous pipe.""" INPROC = "inproc" """In-process communication.""" OTHER = "other" """Something else (non IP-based).""" network_attributes.py000066400000000000000000000145411511654350100431250ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/attributes# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from enum import Enum from typing import Final from typing_extensions import deprecated NETWORK_CARRIER_ICC: Final = "network.carrier.icc" """ The ISO 3166-1 alpha-2 2-character country code associated with the mobile carrier network. """ NETWORK_CARRIER_MCC: Final = "network.carrier.mcc" """ The mobile carrier country code. """ NETWORK_CARRIER_MNC: Final = "network.carrier.mnc" """ The mobile carrier network code. """ NETWORK_CARRIER_NAME: Final = "network.carrier.name" """ The name of the mobile carrier. """ NETWORK_CONNECTION_STATE: Final = "network.connection.state" """ The state of network connection. Note: Connection states are defined as part of the [rfc9293](https://datatracker.ietf.org/doc/html/rfc9293#section-3.3.2). """ NETWORK_CONNECTION_SUBTYPE: Final = "network.connection.subtype" """ This describes more details regarding the connection.type. It may be the type of cell technology connection, but it could be used for describing details about a wifi connection. """ NETWORK_CONNECTION_TYPE: Final = "network.connection.type" """ The internet connection type. """ NETWORK_INTERFACE_NAME: Final = "network.interface.name" """ The network interface name. """ NETWORK_IO_DIRECTION: Final = "network.io.direction" """ The network IO operation direction. """ NETWORK_LOCAL_ADDRESS: Final = "network.local.address" """ Deprecated in favor of stable :py:const:`opentelemetry.semconv.attributes.network_attributes.NETWORK_LOCAL_ADDRESS`. """ NETWORK_LOCAL_PORT: Final = "network.local.port" """ Deprecated in favor of stable :py:const:`opentelemetry.semconv.attributes.network_attributes.NETWORK_LOCAL_PORT`. """ NETWORK_PEER_ADDRESS: Final = "network.peer.address" """ Deprecated in favor of stable :py:const:`opentelemetry.semconv.attributes.network_attributes.NETWORK_PEER_ADDRESS`. """ NETWORK_PEER_PORT: Final = "network.peer.port" """ Deprecated in favor of stable :py:const:`opentelemetry.semconv.attributes.network_attributes.NETWORK_PEER_PORT`. """ NETWORK_PROTOCOL_NAME: Final = "network.protocol.name" """ Deprecated in favor of stable :py:const:`opentelemetry.semconv.attributes.network_attributes.NETWORK_PROTOCOL_NAME`. """ NETWORK_PROTOCOL_VERSION: Final = "network.protocol.version" """ Deprecated in favor of stable :py:const:`opentelemetry.semconv.attributes.network_attributes.NETWORK_PROTOCOL_VERSION`. """ NETWORK_TRANSPORT: Final = "network.transport" """ Deprecated in favor of stable :py:const:`opentelemetry.semconv.attributes.network_attributes.NETWORK_TRANSPORT`. """ NETWORK_TYPE: Final = "network.type" """ Deprecated in favor of stable :py:const:`opentelemetry.semconv.attributes.network_attributes.NETWORK_TYPE`. """ class NetworkConnectionStateValues(Enum): CLOSED = "closed" """closed.""" CLOSE_WAIT = "close_wait" """close_wait.""" CLOSING = "closing" """closing.""" ESTABLISHED = "established" """established.""" FIN_WAIT_1 = "fin_wait_1" """fin_wait_1.""" FIN_WAIT_2 = "fin_wait_2" """fin_wait_2.""" LAST_ACK = "last_ack" """last_ack.""" LISTEN = "listen" """listen.""" SYN_RECEIVED = "syn_received" """syn_received.""" SYN_SENT = "syn_sent" """syn_sent.""" TIME_WAIT = "time_wait" """time_wait.""" class NetworkConnectionSubtypeValues(Enum): GPRS = "gprs" """GPRS.""" EDGE = "edge" """EDGE.""" UMTS = "umts" """UMTS.""" CDMA = "cdma" """CDMA.""" EVDO_0 = "evdo_0" """EVDO Rel. 0.""" EVDO_A = "evdo_a" """EVDO Rev. A.""" CDMA2000_1XRTT = "cdma2000_1xrtt" """CDMA2000 1XRTT.""" HSDPA = "hsdpa" """HSDPA.""" HSUPA = "hsupa" """HSUPA.""" HSPA = "hspa" """HSPA.""" IDEN = "iden" """IDEN.""" EVDO_B = "evdo_b" """EVDO Rev. B.""" LTE = "lte" """LTE.""" EHRPD = "ehrpd" """EHRPD.""" HSPAP = "hspap" """HSPAP.""" GSM = "gsm" """GSM.""" TD_SCDMA = "td_scdma" """TD-SCDMA.""" IWLAN = "iwlan" """IWLAN.""" NR = "nr" """5G NR (New Radio).""" NRNSA = "nrnsa" """5G NRNSA (New Radio Non-Standalone).""" LTE_CA = "lte_ca" """LTE CA.""" class NetworkConnectionTypeValues(Enum): WIFI = "wifi" """wifi.""" WIRED = "wired" """wired.""" CELL = "cell" """cell.""" UNAVAILABLE = "unavailable" """unavailable.""" UNKNOWN = "unknown" """unknown.""" class NetworkIoDirectionValues(Enum): TRANSMIT = "transmit" """transmit.""" RECEIVE = "receive" """receive.""" @deprecated( "Deprecated in favor of stable :py:const:`opentelemetry.semconv.attributes.network_attributes.NetworkTransportValues`." ) class NetworkTransportValues(Enum): TCP = "tcp" """Deprecated in favor of stable :py:const:`opentelemetry.semconv.attributes.network_attributes.NetworkTransportValues.TCP`.""" UDP = "udp" """Deprecated in favor of stable :py:const:`opentelemetry.semconv.attributes.network_attributes.NetworkTransportValues.UDP`.""" PIPE = "pipe" """Deprecated in favor of stable :py:const:`opentelemetry.semconv.attributes.network_attributes.NetworkTransportValues.PIPE`.""" UNIX = "unix" """Deprecated in favor of stable :py:const:`opentelemetry.semconv.attributes.network_attributes.NetworkTransportValues.UNIX`.""" QUIC = "quic" """Deprecated in favor of stable :py:const:`opentelemetry.semconv.attributes.network_attributes.NetworkTransportValues.QUIC`.""" @deprecated( "Deprecated in favor of stable :py:const:`opentelemetry.semconv.attributes.network_attributes.NetworkTypeValues`." ) class NetworkTypeValues(Enum): IPV4 = "ipv4" """Deprecated in favor of stable :py:const:`opentelemetry.semconv.attributes.network_attributes.NetworkTypeValues.IPV4`.""" IPV6 = "ipv6" """Deprecated in favor of stable :py:const:`opentelemetry.semconv.attributes.network_attributes.NetworkTypeValues.IPV6`.""" nfs_attributes.py000066400000000000000000000015731511654350100422230ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/attributes# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Final NFS_OPERATION_NAME: Final = "nfs.operation.name" """ NFSv4+ operation name. """ NFS_SERVER_REPCACHE_STATUS: Final = "nfs.server.repcache.status" """ Linux: one of "hit" (NFSD_STATS_RC_HITS), "miss" (NFSD_STATS_RC_MISSES), or "nocache" (NFSD_STATS_RC_NOCACHE -- uncacheable). """ oci_attributes.py000066400000000000000000000022271511654350100422040ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/attributes# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Final OCI_MANIFEST_DIGEST: Final = "oci.manifest.digest" """ The digest of the OCI image manifest. For container images specifically is the digest by which the container image is known. Note: Follows [OCI Image Manifest Specification](https://github.com/opencontainers/image-spec/blob/main/manifest.md), and specifically the [Digest property](https://github.com/opencontainers/image-spec/blob/main/descriptor.md#digests). An example can be found in [Example Image Manifest](https://github.com/opencontainers/image-spec/blob/main/manifest.md#example-image-manifest). """ onc_rpc_attributes.py000066400000000000000000000017151511654350100430560ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/attributes# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Final ONC_RPC_PROCEDURE_NAME: Final = "onc_rpc.procedure.name" """ ONC/Sun RPC procedure name. """ ONC_RPC_PROCEDURE_NUMBER: Final = "onc_rpc.procedure.number" """ ONC/Sun RPC procedure number. """ ONC_RPC_PROGRAM_NAME: Final = "onc_rpc.program.name" """ ONC/Sun RPC program name. """ ONC_RPC_VERSION: Final = "onc_rpc.version" """ ONC/Sun RPC program version. """ openai_attributes.py000066400000000000000000000024201511654350100427000ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/attributes# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from enum import Enum from typing import Final OPENAI_REQUEST_SERVICE_TIER: Final = "openai.request.service_tier" """ The service tier requested. May be a specific tier, default, or auto. """ OPENAI_RESPONSE_SERVICE_TIER: Final = "openai.response.service_tier" """ The service tier used for the response. """ OPENAI_RESPONSE_SYSTEM_FINGERPRINT: Final = ( "openai.response.system_fingerprint" ) """ A fingerprint to track any eventual change in the Generative AI environment. """ class OpenaiRequestServiceTierValues(Enum): AUTO = "auto" """The system will utilize scale tier credits until they are exhausted.""" DEFAULT = "default" """The system will utilize the default scale tier.""" openshift_attributes.py000066400000000000000000000014651511654350100434340ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/attributes# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Final OPENSHIFT_CLUSTERQUOTA_NAME: Final = "openshift.clusterquota.name" """ The name of the cluster quota. """ OPENSHIFT_CLUSTERQUOTA_UID: Final = "openshift.clusterquota.uid" """ The UID of the cluster quota. """ opentracing_attributes.py000066400000000000000000000020301511654350100437330ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/attributes# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from enum import Enum from typing import Final OPENTRACING_REF_TYPE: Final = "opentracing.ref_type" """ Parent-child Reference type. Note: The causal relationship between a child Span and a parent Span. """ class OpentracingRefTypeValues(Enum): CHILD_OF = "child_of" """The parent Span depends on the child Span in some capacity.""" FOLLOWS_FROM = "follows_from" """The parent Span doesn't depend in any way on the result of the child Span.""" os_attributes.py000066400000000000000000000035021511654350100420500ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/attributes# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from enum import Enum from typing import Final OS_BUILD_ID: Final = "os.build_id" """ Unique identifier for a particular build or compilation of the operating system. """ OS_DESCRIPTION: Final = "os.description" """ Human readable (not intended to be parsed) OS version information, like e.g. reported by `ver` or `lsb_release -a` commands. """ OS_NAME: Final = "os.name" """ Human readable operating system name. """ OS_TYPE: Final = "os.type" """ The operating system type. """ OS_VERSION: Final = "os.version" """ The version string of the operating system as defined in [Version Attributes](/docs/resource/README.md#version-attributes). """ class OsTypeValues(Enum): WINDOWS = "windows" """Microsoft Windows.""" LINUX = "linux" """Linux.""" DARWIN = "darwin" """Apple Darwin.""" FREEBSD = "freebsd" """FreeBSD.""" NETBSD = "netbsd" """NetBSD.""" OPENBSD = "openbsd" """OpenBSD.""" DRAGONFLYBSD = "dragonflybsd" """DragonFly BSD.""" HPUX = "hpux" """HP-UX (Hewlett Packard Unix).""" AIX = "aix" """AIX (Advanced Interactive eXecutive).""" SOLARIS = "solaris" """SunOS, Oracle Solaris.""" Z_OS = "z_os" """Deprecated: Replaced by `zos`.""" ZOS = "zos" """IBM z/OS.""" otel_attributes.py000066400000000000000000000151351511654350100423770ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/attributes# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from enum import Enum from typing import Final from typing_extensions import deprecated OTEL_COMPONENT_NAME: Final = "otel.component.name" """ A name uniquely identifying the instance of the OpenTelemetry component within its containing SDK instance. Note: Implementations SHOULD ensure a low cardinality for this attribute, even across application or SDK restarts. E.g. implementations MUST NOT use UUIDs as values for this attribute. Implementations MAY achieve these goals by following a `/` pattern, e.g. `batching_span_processor/0`. Hereby `otel.component.type` refers to the corresponding attribute value of the component. The value of `instance-counter` MAY be automatically assigned by the component and uniqueness within the enclosing SDK instance MUST be guaranteed. For example, `` MAY be implemented by using a monotonically increasing counter (starting with `0`), which is incremented every time an instance of the given component type is started. With this implementation, for example the first Batching Span Processor would have `batching_span_processor/0` as `otel.component.name`, the second one `batching_span_processor/1` and so on. These values will therefore be reused in the case of an application restart. """ OTEL_COMPONENT_TYPE: Final = "otel.component.type" """ A name identifying the type of the OpenTelemetry component. Note: If none of the standardized values apply, implementations SHOULD use the language-defined name of the type. E.g. for Java the fully qualified classname SHOULD be used in this case. """ OTEL_LIBRARY_NAME: Final = "otel.library.name" """ Deprecated: Replaced by `otel.scope.name`. """ OTEL_LIBRARY_VERSION: Final = "otel.library.version" """ Deprecated: Replaced by `otel.scope.version`. """ OTEL_SCOPE_NAME: Final = "otel.scope.name" """ Deprecated in favor of stable :py:const:`opentelemetry.semconv.attributes.otel_attributes.OTEL_SCOPE_NAME`. """ OTEL_SCOPE_SCHEMA_URL: Final = "otel.scope.schema_url" """ The schema URL of the instrumentation scope. """ OTEL_SCOPE_VERSION: Final = "otel.scope.version" """ Deprecated in favor of stable :py:const:`opentelemetry.semconv.attributes.otel_attributes.OTEL_SCOPE_VERSION`. """ OTEL_SPAN_PARENT_ORIGIN: Final = "otel.span.parent.origin" """ Determines whether the span has a parent span, and if so, [whether it is a remote parent](https://opentelemetry.io/docs/specs/otel/trace/api/#isremote). """ OTEL_SPAN_SAMPLING_RESULT: Final = "otel.span.sampling_result" """ The result value of the sampler for this span. """ OTEL_STATUS_CODE: Final = "otel.status_code" """ Deprecated in favor of stable :py:const:`opentelemetry.semconv.attributes.otel_attributes.OTEL_STATUS_CODE`. """ OTEL_STATUS_DESCRIPTION: Final = "otel.status_description" """ Deprecated in favor of stable :py:const:`opentelemetry.semconv.attributes.otel_attributes.OTEL_STATUS_DESCRIPTION`. """ class OtelComponentTypeValues(Enum): BATCHING_SPAN_PROCESSOR = "batching_span_processor" """The builtin SDK batching span processor.""" SIMPLE_SPAN_PROCESSOR = "simple_span_processor" """The builtin SDK simple span processor.""" BATCHING_LOG_PROCESSOR = "batching_log_processor" """The builtin SDK batching log record processor.""" SIMPLE_LOG_PROCESSOR = "simple_log_processor" """The builtin SDK simple log record processor.""" OTLP_GRPC_SPAN_EXPORTER = "otlp_grpc_span_exporter" """OTLP span exporter over gRPC with protobuf serialization.""" OTLP_HTTP_SPAN_EXPORTER = "otlp_http_span_exporter" """OTLP span exporter over HTTP with protobuf serialization.""" OTLP_HTTP_JSON_SPAN_EXPORTER = "otlp_http_json_span_exporter" """OTLP span exporter over HTTP with JSON serialization.""" ZIPKIN_HTTP_SPAN_EXPORTER = "zipkin_http_span_exporter" """Zipkin span exporter over HTTP.""" OTLP_GRPC_LOG_EXPORTER = "otlp_grpc_log_exporter" """OTLP log record exporter over gRPC with protobuf serialization.""" OTLP_HTTP_LOG_EXPORTER = "otlp_http_log_exporter" """OTLP log record exporter over HTTP with protobuf serialization.""" OTLP_HTTP_JSON_LOG_EXPORTER = "otlp_http_json_log_exporter" """OTLP log record exporter over HTTP with JSON serialization.""" PERIODIC_METRIC_READER = "periodic_metric_reader" """The builtin SDK periodically exporting metric reader.""" OTLP_GRPC_METRIC_EXPORTER = "otlp_grpc_metric_exporter" """OTLP metric exporter over gRPC with protobuf serialization.""" OTLP_HTTP_METRIC_EXPORTER = "otlp_http_metric_exporter" """OTLP metric exporter over HTTP with protobuf serialization.""" OTLP_HTTP_JSON_METRIC_EXPORTER = "otlp_http_json_metric_exporter" """OTLP metric exporter over HTTP with JSON serialization.""" PROMETHEUS_HTTP_TEXT_METRIC_EXPORTER = ( "prometheus_http_text_metric_exporter" ) """Prometheus metric exporter over HTTP with the default text-based format.""" class OtelSpanParentOriginValues(Enum): NONE = "none" """The span does not have a parent, it is a root span.""" LOCAL = "local" """The span has a parent and the parent's span context [isRemote()](https://opentelemetry.io/docs/specs/otel/trace/api/#isremote) is false.""" REMOTE = "remote" """The span has a parent and the parent's span context [isRemote()](https://opentelemetry.io/docs/specs/otel/trace/api/#isremote) is true.""" class OtelSpanSamplingResultValues(Enum): DROP = "DROP" """The span is not sampled and not recording.""" RECORD_ONLY = "RECORD_ONLY" """The span is not sampled, but recording.""" RECORD_AND_SAMPLE = "RECORD_AND_SAMPLE" """The span is sampled and recording.""" @deprecated( "Deprecated in favor of stable :py:const:`opentelemetry.semconv.attributes.otel_attributes.OtelStatusCodeValues`." ) class OtelStatusCodeValues(Enum): OK = "OK" """Deprecated in favor of stable :py:const:`opentelemetry.semconv.attributes.otel_attributes.OtelStatusCodeValues.OK`.""" ERROR = "ERROR" """Deprecated in favor of stable :py:const:`opentelemetry.semconv.attributes.otel_attributes.OtelStatusCodeValues.ERROR`.""" other_attributes.py000066400000000000000000000016711511654350100425550ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/attributes# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from enum import Enum from typing import Final from typing_extensions import deprecated STATE: Final = "state" """ Deprecated: Replaced by `db.client.connection.state`. """ @deprecated( "The attribute state is deprecated - Replaced by `db.client.connection.state`" ) class StateValues(Enum): IDLE = "idle" """idle.""" USED = "used" """used.""" peer_attributes.py000066400000000000000000000021511511654350100423610ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/attributes# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Final PEER_SERVICE: Final = "peer.service" """ The [`service.name`](/docs/resource/README.md#service) of the remote service. SHOULD be equal to the actual `service.name` resource attribute of the remote service if any. Note: Examples of `peer.service` that users may specify: - A Redis cache of auth tokens as `peer.service="AuthTokenCache"`. - A gRPC service `rpc.service="io.opentelemetry.AuthService"` may be hosted in both a gateway, `peer.service="ExternalApiService"` and a backend, `peer.service="AuthService"`. """ pool_attributes.py000066400000000000000000000013041511654350100423760ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/attributes# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Final POOL_NAME: Final = "pool.name" """ Deprecated: Replaced by `db.client.connection.pool.name`. """ pprof_attributes.py000066400000000000000000000033341511654350100425600ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/attributes# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Final PPROF_LOCATION_IS_FOLDED: Final = "pprof.location.is_folded" """ Provides an indication that multiple symbols map to this location's address, for example due to identical code folding by the linker. In that case the line information represents one of the multiple symbols. This field must be recomputed when the symbolization state of the profile changes. """ PPROF_MAPPING_HAS_FILENAMES: Final = "pprof.mapping.has_filenames" """ Indicates that there are filenames related to this mapping. """ PPROF_MAPPING_HAS_FUNCTIONS: Final = "pprof.mapping.has_functions" """ Indicates that there are functions related to this mapping. """ PPROF_MAPPING_HAS_INLINE_FRAMES: Final = "pprof.mapping.has_inline_frames" """ Indicates that there are inline frames related to this mapping. """ PPROF_MAPPING_HAS_LINE_NUMBERS: Final = "pprof.mapping.has_line_numbers" """ Indicates that there are line numbers related to this mapping. """ PPROF_PROFILE_COMMENT: Final = "pprof.profile.comment" """ Free-form text associated with the profile. This field should not be used to store any machine-readable information, it is only for human-friendly content. """ process_attributes.py000066400000000000000000000204641511654350100431130ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/attributes# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from enum import Enum from typing import Final from typing_extensions import deprecated PROCESS_ARGS_COUNT: Final = "process.args_count" """ Length of the process.command_args array. Note: This field can be useful for querying or performing bucket analysis on how many arguments were provided to start a process. More arguments may be an indication of suspicious activity. """ PROCESS_COMMAND: Final = "process.command" """ The command used to launch the process (i.e. the command name). On Linux based systems, can be set to the zeroth string in `proc/[pid]/cmdline`. On Windows, can be set to the first parameter extracted from `GetCommandLineW`. """ PROCESS_COMMAND_ARGS: Final = "process.command_args" """ All the command arguments (including the command/executable itself) as received by the process. On Linux-based systems (and some other Unixoid systems supporting procfs), can be set according to the list of null-delimited strings extracted from `proc/[pid]/cmdline`. For libc-based executables, this would be the full argv vector passed to `main`. SHOULD NOT be collected by default unless there is sanitization that excludes sensitive data. """ PROCESS_COMMAND_LINE: Final = "process.command_line" """ The full command used to launch the process as a single string representing the full command. On Windows, can be set to the result of `GetCommandLineW`. Do not set this if you have to assemble it just for monitoring; use `process.command_args` instead. SHOULD NOT be collected by default unless there is sanitization that excludes sensitive data. """ PROCESS_CONTEXT_SWITCH_TYPE: Final = "process.context_switch.type" """ Specifies whether the context switches for this data point were voluntary or involuntary. """ PROCESS_CPU_STATE: Final = "process.cpu.state" """ Deprecated: Replaced by `cpu.mode`. """ PROCESS_CREATION_TIME: Final = "process.creation.time" """ The date and time the process was created, in ISO 8601 format. """ PROCESS_ENVIRONMENT_VARIABLE_TEMPLATE: Final = "process.environment_variable" """ Process environment variables, `` being the environment variable name, the value being the environment variable value. Note: Examples: - an environment variable `USER` with value `"ubuntu"` SHOULD be recorded as the `process.environment_variable.USER` attribute with value `"ubuntu"`. - an environment variable `PATH` with value `"/usr/local/bin:/usr/bin"` SHOULD be recorded as the `process.environment_variable.PATH` attribute with value `"/usr/local/bin:/usr/bin"`. """ PROCESS_EXECUTABLE_BUILD_ID_GNU: Final = "process.executable.build_id.gnu" """ The GNU build ID as found in the `.note.gnu.build-id` ELF section (hex string). """ PROCESS_EXECUTABLE_BUILD_ID_GO: Final = "process.executable.build_id.go" """ The Go build ID as retrieved by `go tool buildid `. """ PROCESS_EXECUTABLE_BUILD_ID_HTLHASH: Final = ( "process.executable.build_id.htlhash" ) """ Profiling specific build ID for executables. See the OTel specification for Profiles for more information. """ PROCESS_EXECUTABLE_BUILD_ID_PROFILING: Final = ( "process.executable.build_id.profiling" ) """ Deprecated: Replaced by `process.executable.build_id.htlhash`. """ PROCESS_EXECUTABLE_NAME: Final = "process.executable.name" """ The name of the process executable. On Linux based systems, this SHOULD be set to the base name of the target of `/proc/[pid]/exe`. On Windows, this SHOULD be set to the base name of `GetProcessImageFileNameW`. """ PROCESS_EXECUTABLE_PATH: Final = "process.executable.path" """ The full path to the process executable. On Linux based systems, can be set to the target of `proc/[pid]/exe`. On Windows, can be set to the result of `GetProcessImageFileNameW`. """ PROCESS_EXIT_CODE: Final = "process.exit.code" """ The exit code of the process. """ PROCESS_EXIT_TIME: Final = "process.exit.time" """ The date and time the process exited, in ISO 8601 format. """ PROCESS_GROUP_LEADER_PID: Final = "process.group_leader.pid" """ The PID of the process's group leader. This is also the process group ID (PGID) of the process. """ PROCESS_INTERACTIVE: Final = "process.interactive" """ Whether the process is connected to an interactive shell. """ PROCESS_LINUX_CGROUP: Final = "process.linux.cgroup" """ The control group associated with the process. Note: Control groups (cgroups) are a kernel feature used to organize and manage process resources. This attribute provides the path(s) to the cgroup(s) associated with the process, which should match the contents of the [/proc/\\[PID\\]/cgroup](https://man7.org/linux/man-pages/man7/cgroups.7.html) file. """ PROCESS_OWNER: Final = "process.owner" """ The username of the user that owns the process. """ PROCESS_PAGING_FAULT_TYPE: Final = "process.paging.fault_type" """ Deprecated: Replaced by `system.paging.fault.type`. """ PROCESS_PARENT_PID: Final = "process.parent_pid" """ Parent Process identifier (PPID). """ PROCESS_PID: Final = "process.pid" """ Process identifier (PID). """ PROCESS_REAL_USER_ID: Final = "process.real_user.id" """ The real user ID (RUID) of the process. """ PROCESS_REAL_USER_NAME: Final = "process.real_user.name" """ The username of the real user of the process. """ PROCESS_RUNTIME_DESCRIPTION: Final = "process.runtime.description" """ An additional description about the runtime of the process, for example a specific vendor customization of the runtime environment. """ PROCESS_RUNTIME_NAME: Final = "process.runtime.name" """ The name of the runtime of this process. """ PROCESS_RUNTIME_VERSION: Final = "process.runtime.version" """ The version of the runtime of this process, as returned by the runtime without modification. """ PROCESS_SAVED_USER_ID: Final = "process.saved_user.id" """ The saved user ID (SUID) of the process. """ PROCESS_SAVED_USER_NAME: Final = "process.saved_user.name" """ The username of the saved user. """ PROCESS_SESSION_LEADER_PID: Final = "process.session_leader.pid" """ The PID of the process's session leader. This is also the session ID (SID) of the process. """ PROCESS_STATE: Final = "process.state" """ The process state, e.g., [Linux Process State Codes](https://man7.org/linux/man-pages/man1/ps.1.html#PROCESS_STATE_CODES). """ PROCESS_TITLE: Final = "process.title" """ Process title (proctitle). Note: In many Unix-like systems, process title (proctitle), is the string that represents the name or command line of a running process, displayed by system monitoring tools like ps, top, and htop. """ PROCESS_USER_ID: Final = "process.user.id" """ The effective user ID (EUID) of the process. """ PROCESS_USER_NAME: Final = "process.user.name" """ The username of the effective user of the process. """ PROCESS_VPID: Final = "process.vpid" """ Virtual process identifier. Note: The process ID within a PID namespace. This is not necessarily unique across all processes on the host but it is unique within the process namespace that the process exists within. """ PROCESS_WORKING_DIRECTORY: Final = "process.working_directory" """ The working directory of the process. """ class ProcessContextSwitchTypeValues(Enum): VOLUNTARY = "voluntary" """voluntary.""" INVOLUNTARY = "involuntary" """involuntary.""" @deprecated( "The attribute process.cpu.state is deprecated - Replaced by `cpu.mode`" ) class ProcessCpuStateValues(Enum): SYSTEM = "system" """system.""" USER = "user" """user.""" WAIT = "wait" """wait.""" @deprecated( "The attribute process.paging.fault_type is deprecated - Replaced by `system.paging.fault.type`" ) class ProcessPagingFaultTypeValues(Enum): MAJOR = "major" """major.""" MINOR = "minor" """minor.""" class ProcessStateValues(Enum): RUNNING = "running" """running.""" SLEEPING = "sleeping" """sleeping.""" STOPPED = "stopped" """stopped.""" DEFUNCT = "defunct" """defunct.""" profile_attributes.py000066400000000000000000000040231511654350100430660ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/attributes# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from enum import Enum from typing import Final PROFILE_FRAME_TYPE: Final = "profile.frame.type" """ Describes the interpreter or compiler of a single frame. """ class ProfileFrameTypeValues(Enum): DOTNET = "dotnet" """[.NET](https://wikipedia.org/wiki/.NET).""" JVM = "jvm" """[JVM](https://wikipedia.org/wiki/Java_virtual_machine).""" KERNEL = "kernel" """[Kernel](https://wikipedia.org/wiki/Kernel_(operating_system)).""" NATIVE = "native" """Can be one of but not limited to [C](https://wikipedia.org/wiki/C_(programming_language)), [C++](https://wikipedia.org/wiki/C%2B%2B), [Go](https://wikipedia.org/wiki/Go_(programming_language)) or [Rust](https://wikipedia.org/wiki/Rust_(programming_language)). If possible, a more precise value MUST be used.""" PERL = "perl" """[Perl](https://wikipedia.org/wiki/Perl).""" PHP = "php" """[PHP](https://wikipedia.org/wiki/PHP).""" CPYTHON = "cpython" """[Python](https://wikipedia.org/wiki/Python_(programming_language)).""" RUBY = "ruby" """[Ruby](https://wikipedia.org/wiki/Ruby_(programming_language)).""" V8JS = "v8js" """[V8JS](https://wikipedia.org/wiki/V8_(JavaScript_engine)).""" BEAM = "beam" """[Erlang](https://en.wikipedia.org/wiki/BEAM_(Erlang_virtual_machine)).""" GO = "go" """[Go](https://wikipedia.org/wiki/Go_(programming_language)),.""" RUST = "rust" """[Rust](https://wikipedia.org/wiki/Rust_(programming_language)).""" rpc_attributes.py000066400000000000000000000170351511654350100422210ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/attributes# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from enum import Enum from typing import Final RPC_CONNECT_RPC_ERROR_CODE: Final = "rpc.connect_rpc.error_code" """ The [error codes](https://connectrpc.com//docs/protocol/#error-codes) of the Connect request. Error codes are always string values. """ RPC_CONNECT_RPC_REQUEST_METADATA_TEMPLATE: Final = ( "rpc.connect_rpc.request.metadata" ) """ Connect request metadata, `` being the normalized Connect Metadata key (lowercase), the value being the metadata values. Note: Instrumentations SHOULD require an explicit configuration of which metadata values are to be captured. Including all request metadata values can be a security risk - explicit configuration helps avoid leaking sensitive information. For example, a property `my-custom-key` with value `["1.2.3.4", "1.2.3.5"]` SHOULD be recorded as the `rpc.connect_rpc.request.metadata.my-custom-key` attribute with value `["1.2.3.4", "1.2.3.5"]`. """ RPC_CONNECT_RPC_RESPONSE_METADATA_TEMPLATE: Final = ( "rpc.connect_rpc.response.metadata" ) """ Connect response metadata, `` being the normalized Connect Metadata key (lowercase), the value being the metadata values. Note: Instrumentations SHOULD require an explicit configuration of which metadata values are to be captured. Including all response metadata values can be a security risk - explicit configuration helps avoid leaking sensitive information. For example, a property `my-custom-key` with value `"attribute_value"` SHOULD be recorded as the `rpc.connect_rpc.response.metadata.my-custom-key` attribute with value `["attribute_value"]`. """ RPC_GRPC_REQUEST_METADATA_TEMPLATE: Final = "rpc.grpc.request.metadata" """ gRPC request metadata, `` being the normalized gRPC Metadata key (lowercase), the value being the metadata values. Note: Instrumentations SHOULD require an explicit configuration of which metadata values are to be captured. Including all request metadata values can be a security risk - explicit configuration helps avoid leaking sensitive information. For example, a property `my-custom-key` with value `["1.2.3.4", "1.2.3.5"]` SHOULD be recorded as `rpc.grpc.request.metadata.my-custom-key` attribute with value `["1.2.3.4", "1.2.3.5"]`. """ RPC_GRPC_RESPONSE_METADATA_TEMPLATE: Final = "rpc.grpc.response.metadata" """ gRPC response metadata, `` being the normalized gRPC Metadata key (lowercase), the value being the metadata values. Note: Instrumentations SHOULD require an explicit configuration of which metadata values are to be captured. Including all response metadata values can be a security risk - explicit configuration helps avoid leaking sensitive information. For example, a property `my-custom-key` with value `["attribute_value"]` SHOULD be recorded as the `rpc.grpc.response.metadata.my-custom-key` attribute with value `["attribute_value"]`. """ RPC_GRPC_STATUS_CODE: Final = "rpc.grpc.status_code" """ The [numeric status code](https://github.com/grpc/grpc/blob/v1.33.2/doc/statuscodes.md) of the gRPC request. """ RPC_JSONRPC_ERROR_CODE: Final = "rpc.jsonrpc.error_code" """ `error.code` property of response if it is an error response. """ RPC_JSONRPC_ERROR_MESSAGE: Final = "rpc.jsonrpc.error_message" """ `error.message` property of response if it is an error response. """ RPC_JSONRPC_REQUEST_ID: Final = "rpc.jsonrpc.request_id" """ `id` property of request or response. Since protocol allows id to be int, string, `null` or missing (for notifications), value is expected to be cast to string for simplicity. Use empty string in case of `null` value. Omit entirely if this is a notification. """ RPC_JSONRPC_VERSION: Final = "rpc.jsonrpc.version" """ Protocol version as in `jsonrpc` property of request/response. Since JSON-RPC 1.0 doesn't specify this, the value can be omitted. """ RPC_MESSAGE_COMPRESSED_SIZE: Final = "rpc.message.compressed_size" """ Compressed size of the message in bytes. """ RPC_MESSAGE_ID: Final = "rpc.message.id" """ MUST be calculated as two different counters starting from `1` one for sent messages and one for received message. Note: This way we guarantee that the values will be consistent between different implementations. """ RPC_MESSAGE_TYPE: Final = "rpc.message.type" """ Whether this is a received or sent message. """ RPC_MESSAGE_UNCOMPRESSED_SIZE: Final = "rpc.message.uncompressed_size" """ Uncompressed size of the message in bytes. """ RPC_METHOD: Final = "rpc.method" """ This is the logical name of the method from the RPC interface perspective. """ RPC_SERVICE: Final = "rpc.service" """ The full (logical) name of the service being called, including its package name, if applicable. """ RPC_SYSTEM: Final = "rpc.system" """ A string identifying the remoting system. See below for a list of well-known identifiers. """ class RpcConnectRpcErrorCodeValues(Enum): CANCELLED = "cancelled" """cancelled.""" UNKNOWN = "unknown" """unknown.""" INVALID_ARGUMENT = "invalid_argument" """invalid_argument.""" DEADLINE_EXCEEDED = "deadline_exceeded" """deadline_exceeded.""" NOT_FOUND = "not_found" """not_found.""" ALREADY_EXISTS = "already_exists" """already_exists.""" PERMISSION_DENIED = "permission_denied" """permission_denied.""" RESOURCE_EXHAUSTED = "resource_exhausted" """resource_exhausted.""" FAILED_PRECONDITION = "failed_precondition" """failed_precondition.""" ABORTED = "aborted" """aborted.""" OUT_OF_RANGE = "out_of_range" """out_of_range.""" UNIMPLEMENTED = "unimplemented" """unimplemented.""" INTERNAL = "internal" """internal.""" UNAVAILABLE = "unavailable" """unavailable.""" DATA_LOSS = "data_loss" """data_loss.""" UNAUTHENTICATED = "unauthenticated" """unauthenticated.""" class RpcGrpcStatusCodeValues(Enum): OK = 0 """OK.""" CANCELLED = 1 """CANCELLED.""" UNKNOWN = 2 """UNKNOWN.""" INVALID_ARGUMENT = 3 """INVALID_ARGUMENT.""" DEADLINE_EXCEEDED = 4 """DEADLINE_EXCEEDED.""" NOT_FOUND = 5 """NOT_FOUND.""" ALREADY_EXISTS = 6 """ALREADY_EXISTS.""" PERMISSION_DENIED = 7 """PERMISSION_DENIED.""" RESOURCE_EXHAUSTED = 8 """RESOURCE_EXHAUSTED.""" FAILED_PRECONDITION = 9 """FAILED_PRECONDITION.""" ABORTED = 10 """ABORTED.""" OUT_OF_RANGE = 11 """OUT_OF_RANGE.""" UNIMPLEMENTED = 12 """UNIMPLEMENTED.""" INTERNAL = 13 """INTERNAL.""" UNAVAILABLE = 14 """UNAVAILABLE.""" DATA_LOSS = 15 """DATA_LOSS.""" UNAUTHENTICATED = 16 """UNAUTHENTICATED.""" class RpcMessageTypeValues(Enum): SENT = "SENT" """sent.""" RECEIVED = "RECEIVED" """received.""" class RpcSystemValues(Enum): GRPC = "grpc" """gRPC.""" JAVA_RMI = "java_rmi" """Java RMI.""" DOTNET_WCF = "dotnet_wcf" """.NET WCF.""" APACHE_DUBBO = "apache_dubbo" """Apache Dubbo.""" CONNECT_RPC = "connect_rpc" """Connect RPC.""" ONC_RPC = "onc_rpc" """[ONC RPC (Sun RPC)](https://datatracker.ietf.org/doc/html/rfc5531).""" JSONRPC = "jsonrpc" """JSON-RPC.""" security_rule_attributes.py000066400000000000000000000037171511654350100443350ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/attributes# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Final SECURITY_RULE_CATEGORY: Final = "security_rule.category" """ A categorization value keyword used by the entity using the rule for detection of this event. """ SECURITY_RULE_DESCRIPTION: Final = "security_rule.description" """ The description of the rule generating the event. """ SECURITY_RULE_LICENSE: Final = "security_rule.license" """ Name of the license under which the rule used to generate this event is made available. """ SECURITY_RULE_NAME: Final = "security_rule.name" """ The name of the rule or signature generating the event. """ SECURITY_RULE_REFERENCE: Final = "security_rule.reference" """ Reference URL to additional information about the rule used to generate this event. Note: The URL can point to the vendor’s documentation about the rule. If that’s not available, it can also be a link to a more general page describing this type of alert. """ SECURITY_RULE_RULESET_NAME: Final = "security_rule.ruleset.name" """ Name of the ruleset, policy, group, or parent category in which the rule used to generate this event is a member. """ SECURITY_RULE_UUID: Final = "security_rule.uuid" """ A rule ID that is unique within the scope of a set or group of agents, observers, or other entities using the rule for detection of this event. """ SECURITY_RULE_VERSION: Final = "security_rule.version" """ The version / revision of the rule being used for analysis. """ server_attributes.py000066400000000000000000000016271511654350100427430ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/attributes# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Final SERVER_ADDRESS: Final = "server.address" """ Deprecated in favor of stable :py:const:`opentelemetry.semconv.attributes.server_attributes.SERVER_ADDRESS`. """ SERVER_PORT: Final = "server.port" """ Deprecated in favor of stable :py:const:`opentelemetry.semconv.attributes.server_attributes.SERVER_PORT`. """ service_attributes.py000066400000000000000000000070311511654350100430700ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/attributes# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Final SERVICE_INSTANCE_ID: Final = "service.instance.id" """ The string ID of the service instance. Note: MUST be unique for each instance of the same `service.namespace,service.name` pair (in other words `service.namespace,service.name,service.instance.id` triplet MUST be globally unique). The ID helps to distinguish instances of the same service that exist at the same time (e.g. instances of a horizontally scaled service). Implementations, such as SDKs, are recommended to generate a random Version 1 or Version 4 [RFC 4122](https://www.ietf.org/rfc/rfc4122.txt) UUID, but are free to use an inherent unique ID as the source of this value if stability is desirable. In that case, the ID SHOULD be used as source of a UUID Version 5 and SHOULD use the following UUID as the namespace: `4d63009a-8d0f-11ee-aad7-4c796ed8e320`. UUIDs are typically recommended, as only an opaque value for the purposes of identifying a service instance is needed. Similar to what can be seen in the man page for the [`/etc/machine-id`](https://www.freedesktop.org/software/systemd/man/latest/machine-id.html) file, the underlying data, such as pod name and namespace should be treated as confidential, being the user's choice to expose it or not via another resource attribute. For applications running behind an application server (like unicorn), we do not recommend using one identifier for all processes participating in the application. Instead, it's recommended each division (e.g. a worker thread in unicorn) to have its own instance.id. It's not recommended for a Collector to set `service.instance.id` if it can't unambiguously determine the service instance that is generating that telemetry. For instance, creating an UUID based on `pod.name` will likely be wrong, as the Collector might not know from which container within that pod the telemetry originated. However, Collectors can set the `service.instance.id` if they can unambiguously determine the service instance for that telemetry. This is typically the case for scraping receivers, as they know the target address and port. """ SERVICE_NAME: Final = "service.name" """ Deprecated in favor of stable :py:const:`opentelemetry.semconv.attributes.service_attributes.SERVICE_NAME`. """ SERVICE_NAMESPACE: Final = "service.namespace" """ A namespace for `service.name`. Note: A string value having a meaning that helps to distinguish a group of services, for example the team name that owns a group of services. `service.name` is expected to be unique within the same namespace. If `service.namespace` is not specified in the Resource then `service.name` is expected to be unique for all services that have no explicit namespace defined (so the empty/unspecified namespace is simply one more valid namespace). Zero-length namespace string is assumed equal to unspecified namespace. """ SERVICE_VERSION: Final = "service.version" """ Deprecated in favor of stable :py:const:`opentelemetry.semconv.attributes.service_attributes.SERVICE_VERSION`. """ session_attributes.py000066400000000000000000000014401511654350100431110ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/attributes# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Final SESSION_ID: Final = "session.id" """ A unique id to identify a session. """ SESSION_PREVIOUS_ID: Final = "session.previous_id" """ The previous `session.id` for this user, when known. """ source_attributes.py000066400000000000000000000020431511654350100427260ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/attributes# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Final SOURCE_ADDRESS: Final = "source.address" """ Source address - domain name if available without reverse DNS lookup; otherwise, IP address or Unix domain socket name. Note: When observed from the destination side, and when communicating through an intermediary, `source.address` SHOULD represent the source address behind any intermediaries, for example proxies, if it's available. """ SOURCE_PORT: Final = "source.port" """ Source port number. """ system_attributes.py000066400000000000000000000120541511654350100427550ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/attributes# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from enum import Enum from typing import Final from typing_extensions import deprecated SYSTEM_CPU_LOGICAL_NUMBER: Final = "system.cpu.logical_number" """ Deprecated: Replaced by `cpu.logical_number`. """ SYSTEM_CPU_STATE: Final = "system.cpu.state" """ Deprecated: Replaced by `cpu.mode`. """ SYSTEM_DEVICE: Final = "system.device" """ The device identifier. """ SYSTEM_FILESYSTEM_MODE: Final = "system.filesystem.mode" """ The filesystem mode. """ SYSTEM_FILESYSTEM_MOUNTPOINT: Final = "system.filesystem.mountpoint" """ The filesystem mount path. """ SYSTEM_FILESYSTEM_STATE: Final = "system.filesystem.state" """ The filesystem state. """ SYSTEM_FILESYSTEM_TYPE: Final = "system.filesystem.type" """ The filesystem type. """ SYSTEM_MEMORY_STATE: Final = "system.memory.state" """ The memory state. """ SYSTEM_NETWORK_STATE: Final = "system.network.state" """ Deprecated: Replaced by `network.connection.state`. """ SYSTEM_PAGING_DIRECTION: Final = "system.paging.direction" """ The paging access direction. """ SYSTEM_PAGING_FAULT_TYPE: Final = "system.paging.fault.type" """ The paging fault type. """ SYSTEM_PAGING_STATE: Final = "system.paging.state" """ The memory paging state. """ SYSTEM_PAGING_TYPE: Final = "system.paging.type" """ Deprecated: Replaced by `system.paging.fault.type`. """ SYSTEM_PROCESS_STATUS: Final = "system.process.status" """ Deprecated: Replaced by `process.state`. """ SYSTEM_PROCESSES_STATUS: Final = "system.processes.status" """ Deprecated: Replaced by `process.state`. """ @deprecated( "The attribute system.cpu.state is deprecated - Replaced by `cpu.mode`" ) class SystemCpuStateValues(Enum): USER = "user" """user.""" SYSTEM = "system" """system.""" NICE = "nice" """nice.""" IDLE = "idle" """idle.""" IOWAIT = "iowait" """iowait.""" INTERRUPT = "interrupt" """interrupt.""" STEAL = "steal" """steal.""" class SystemFilesystemStateValues(Enum): USED = "used" """used.""" FREE = "free" """free.""" RESERVED = "reserved" """reserved.""" class SystemFilesystemTypeValues(Enum): FAT32 = "fat32" """fat32.""" EXFAT = "exfat" """exfat.""" NTFS = "ntfs" """ntfs.""" REFS = "refs" """refs.""" HFSPLUS = "hfsplus" """hfsplus.""" EXT4 = "ext4" """ext4.""" class SystemMemoryStateValues(Enum): USED = "used" """Actual used virtual memory in bytes.""" FREE = "free" """free.""" SHARED = "shared" """Deprecated: Removed, report shared memory usage with `metric.system.memory.shared` metric.""" BUFFERS = "buffers" """buffers.""" CACHED = "cached" """cached.""" @deprecated( "The attribute system.network.state is deprecated - Replaced by `network.connection.state`" ) class SystemNetworkStateValues(Enum): CLOSE = "close" """close.""" CLOSE_WAIT = "close_wait" """close_wait.""" CLOSING = "closing" """closing.""" DELETE = "delete" """delete.""" ESTABLISHED = "established" """established.""" FIN_WAIT_1 = "fin_wait_1" """fin_wait_1.""" FIN_WAIT_2 = "fin_wait_2" """fin_wait_2.""" LAST_ACK = "last_ack" """last_ack.""" LISTEN = "listen" """listen.""" SYN_RECV = "syn_recv" """syn_recv.""" SYN_SENT = "syn_sent" """syn_sent.""" TIME_WAIT = "time_wait" """time_wait.""" class SystemPagingDirectionValues(Enum): IN = "in" """in.""" OUT = "out" """out.""" class SystemPagingFaultTypeValues(Enum): MAJOR = "major" """major.""" MINOR = "minor" """minor.""" class SystemPagingStateValues(Enum): USED = "used" """used.""" FREE = "free" """free.""" @deprecated( "The attribute system.paging.type is deprecated - Replaced by `system.paging.fault.type`" ) class SystemPagingTypeValues(Enum): MAJOR = "major" """major.""" MINOR = "minor" """minor.""" @deprecated( "The attribute system.process.status is deprecated - Replaced by `process.state`" ) class SystemProcessStatusValues(Enum): RUNNING = "running" """running.""" SLEEPING = "sleeping" """sleeping.""" STOPPED = "stopped" """stopped.""" DEFUNCT = "defunct" """defunct.""" @deprecated( "The attribute system.processes.status is deprecated - Replaced by `process.state`" ) class SystemProcessesStatusValues(Enum): RUNNING = "running" """running.""" SLEEPING = "sleeping" """sleeping.""" STOPPED = "stopped" """stopped.""" DEFUNCT = "defunct" """defunct.""" telemetry_attributes.py000066400000000000000000000073111511654350100434430ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/attributes# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from enum import Enum from typing import Final from typing_extensions import deprecated TELEMETRY_DISTRO_NAME: Final = "telemetry.distro.name" """ The name of the auto instrumentation agent or distribution, if used. Note: Official auto instrumentation agents and distributions SHOULD set the `telemetry.distro.name` attribute to a string starting with `opentelemetry-`, e.g. `opentelemetry-java-instrumentation`. """ TELEMETRY_DISTRO_VERSION: Final = "telemetry.distro.version" """ The version string of the auto instrumentation agent or distribution, if used. """ TELEMETRY_SDK_LANGUAGE: Final = "telemetry.sdk.language" """ Deprecated in favor of stable :py:const:`opentelemetry.semconv.attributes.telemetry_attributes.TELEMETRY_SDK_LANGUAGE`. """ TELEMETRY_SDK_NAME: Final = "telemetry.sdk.name" """ Deprecated in favor of stable :py:const:`opentelemetry.semconv.attributes.telemetry_attributes.TELEMETRY_SDK_NAME`. """ TELEMETRY_SDK_VERSION: Final = "telemetry.sdk.version" """ Deprecated in favor of stable :py:const:`opentelemetry.semconv.attributes.telemetry_attributes.TELEMETRY_SDK_VERSION`. """ @deprecated( "Deprecated in favor of stable :py:const:`opentelemetry.semconv.attributes.telemetry_attributes.TelemetrySdkLanguageValues`." ) class TelemetrySdkLanguageValues(Enum): CPP = "cpp" """Deprecated in favor of stable :py:const:`opentelemetry.semconv.attributes.telemetry_attributes.TelemetrySdkLanguageValues.CPP`.""" DOTNET = "dotnet" """Deprecated in favor of stable :py:const:`opentelemetry.semconv.attributes.telemetry_attributes.TelemetrySdkLanguageValues.DOTNET`.""" ERLANG = "erlang" """Deprecated in favor of stable :py:const:`opentelemetry.semconv.attributes.telemetry_attributes.TelemetrySdkLanguageValues.ERLANG`.""" GO = "go" """Deprecated in favor of stable :py:const:`opentelemetry.semconv.attributes.telemetry_attributes.TelemetrySdkLanguageValues.GO`.""" JAVA = "java" """Deprecated in favor of stable :py:const:`opentelemetry.semconv.attributes.telemetry_attributes.TelemetrySdkLanguageValues.JAVA`.""" NODEJS = "nodejs" """Deprecated in favor of stable :py:const:`opentelemetry.semconv.attributes.telemetry_attributes.TelemetrySdkLanguageValues.NODEJS`.""" PHP = "php" """Deprecated in favor of stable :py:const:`opentelemetry.semconv.attributes.telemetry_attributes.TelemetrySdkLanguageValues.PHP`.""" PYTHON = "python" """Deprecated in favor of stable :py:const:`opentelemetry.semconv.attributes.telemetry_attributes.TelemetrySdkLanguageValues.PYTHON`.""" RUBY = "ruby" """Deprecated in favor of stable :py:const:`opentelemetry.semconv.attributes.telemetry_attributes.TelemetrySdkLanguageValues.RUBY`.""" RUST = "rust" """Deprecated in favor of stable :py:const:`opentelemetry.semconv.attributes.telemetry_attributes.TelemetrySdkLanguageValues.RUST`.""" SWIFT = "swift" """Deprecated in favor of stable :py:const:`opentelemetry.semconv.attributes.telemetry_attributes.TelemetrySdkLanguageValues.SWIFT`.""" WEBJS = "webjs" """Deprecated in favor of stable :py:const:`opentelemetry.semconv.attributes.telemetry_attributes.TelemetrySdkLanguageValues.WEBJS`.""" test_attributes.py000066400000000000000000000030411511654350100424040ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/attributes# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from enum import Enum from typing import Final TEST_CASE_NAME: Final = "test.case.name" """ The fully qualified human readable name of the [test case](https://wikipedia.org/wiki/Test_case). """ TEST_CASE_RESULT_STATUS: Final = "test.case.result.status" """ The status of the actual test case result from test execution. """ TEST_SUITE_NAME: Final = "test.suite.name" """ The human readable name of a [test suite](https://wikipedia.org/wiki/Test_suite). """ TEST_SUITE_RUN_STATUS: Final = "test.suite.run.status" """ The status of the test suite run. """ class TestCaseResultStatusValues(Enum): PASS = "pass" """pass.""" FAIL = "fail" """fail.""" class TestSuiteRunStatusValues(Enum): SUCCESS = "success" """success.""" FAILURE = "failure" """failure.""" SKIPPED = "skipped" """skipped.""" ABORTED = "aborted" """aborted.""" TIMED_OUT = "timed_out" """timed_out.""" IN_PROGRESS = "in_progress" """in_progress.""" thread_attributes.py000066400000000000000000000026671511654350100427110ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/attributes# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Final THREAD_ID: Final = "thread.id" """ Current "managed" thread ID (as opposed to OS thread ID). Note: Examples of where the value can be extracted from: | Language or platform | Source | | --- | --- | | JVM | `Thread.currentThread().threadId()` | | .NET | `Thread.CurrentThread.ManagedThreadId` | | Python | `threading.current_thread().ident` | | Ruby | `Thread.current.object_id` | | C++ | `std::this_thread::get_id()` | | Erlang | `erlang:self()` |. """ THREAD_NAME: Final = "thread.name" """ Current thread name. Note: Examples of where the value can be extracted from: | Language or platform | Source | | --- | --- | | JVM | `Thread.currentThread().getName()` | | .NET | `Thread.CurrentThread.Name` | | Python | `threading.current_thread().name` | | Ruby | `Thread.current.name` | | Erlang | `erlang:process_info(self(), registered_name)` |. """ tls_attributes.py000066400000000000000000000150001511654350100422250ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/attributes# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from enum import Enum from typing import Final TLS_CIPHER: Final = "tls.cipher" """ String indicating the [cipher](https://datatracker.ietf.org/doc/html/rfc5246#appendix-A.5) used during the current connection. Note: The values allowed for `tls.cipher` MUST be one of the `Descriptions` of the [registered TLS Cipher Suits](https://www.iana.org/assignments/tls-parameters/tls-parameters.xhtml#table-tls-parameters-4). """ TLS_CLIENT_CERTIFICATE: Final = "tls.client.certificate" """ PEM-encoded stand-alone certificate offered by the client. This is usually mutually-exclusive of `client.certificate_chain` since this value also exists in that list. """ TLS_CLIENT_CERTIFICATE_CHAIN: Final = "tls.client.certificate_chain" """ Array of PEM-encoded certificates that make up the certificate chain offered by the client. This is usually mutually-exclusive of `client.certificate` since that value should be the first certificate in the chain. """ TLS_CLIENT_HASH_MD5: Final = "tls.client.hash.md5" """ Certificate fingerprint using the MD5 digest of DER-encoded version of certificate offered by the client. For consistency with other hash values, this value should be formatted as an uppercase hash. """ TLS_CLIENT_HASH_SHA1: Final = "tls.client.hash.sha1" """ Certificate fingerprint using the SHA1 digest of DER-encoded version of certificate offered by the client. For consistency with other hash values, this value should be formatted as an uppercase hash. """ TLS_CLIENT_HASH_SHA256: Final = "tls.client.hash.sha256" """ Certificate fingerprint using the SHA256 digest of DER-encoded version of certificate offered by the client. For consistency with other hash values, this value should be formatted as an uppercase hash. """ TLS_CLIENT_ISSUER: Final = "tls.client.issuer" """ Distinguished name of [subject](https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6) of the issuer of the x.509 certificate presented by the client. """ TLS_CLIENT_JA3: Final = "tls.client.ja3" """ A hash that identifies clients based on how they perform an SSL/TLS handshake. """ TLS_CLIENT_NOT_AFTER: Final = "tls.client.not_after" """ Date/Time indicating when client certificate is no longer considered valid. """ TLS_CLIENT_NOT_BEFORE: Final = "tls.client.not_before" """ Date/Time indicating when client certificate is first considered valid. """ TLS_CLIENT_SERVER_NAME: Final = "tls.client.server_name" """ Deprecated: Replaced by `server.address`. """ TLS_CLIENT_SUBJECT: Final = "tls.client.subject" """ Distinguished name of subject of the x.509 certificate presented by the client. """ TLS_CLIENT_SUPPORTED_CIPHERS: Final = "tls.client.supported_ciphers" """ Array of ciphers offered by the client during the client hello. """ TLS_CURVE: Final = "tls.curve" """ String indicating the curve used for the given cipher, when applicable. """ TLS_ESTABLISHED: Final = "tls.established" """ Boolean flag indicating if the TLS negotiation was successful and transitioned to an encrypted tunnel. """ TLS_NEXT_PROTOCOL: Final = "tls.next_protocol" """ String indicating the protocol being tunneled. Per the values in the [IANA registry](https://www.iana.org/assignments/tls-extensiontype-values/tls-extensiontype-values.xhtml#alpn-protocol-ids), this string should be lower case. """ TLS_PROTOCOL_NAME: Final = "tls.protocol.name" """ Normalized lowercase protocol name parsed from original string of the negotiated [SSL/TLS protocol version](https://docs.openssl.org/1.1.1/man3/SSL_get_version/#return-values). """ TLS_PROTOCOL_VERSION: Final = "tls.protocol.version" """ Numeric part of the version parsed from the original string of the negotiated [SSL/TLS protocol version](https://docs.openssl.org/1.1.1/man3/SSL_get_version/#return-values). """ TLS_RESUMED: Final = "tls.resumed" """ Boolean flag indicating if this TLS connection was resumed from an existing TLS negotiation. """ TLS_SERVER_CERTIFICATE: Final = "tls.server.certificate" """ PEM-encoded stand-alone certificate offered by the server. This is usually mutually-exclusive of `server.certificate_chain` since this value also exists in that list. """ TLS_SERVER_CERTIFICATE_CHAIN: Final = "tls.server.certificate_chain" """ Array of PEM-encoded certificates that make up the certificate chain offered by the server. This is usually mutually-exclusive of `server.certificate` since that value should be the first certificate in the chain. """ TLS_SERVER_HASH_MD5: Final = "tls.server.hash.md5" """ Certificate fingerprint using the MD5 digest of DER-encoded version of certificate offered by the server. For consistency with other hash values, this value should be formatted as an uppercase hash. """ TLS_SERVER_HASH_SHA1: Final = "tls.server.hash.sha1" """ Certificate fingerprint using the SHA1 digest of DER-encoded version of certificate offered by the server. For consistency with other hash values, this value should be formatted as an uppercase hash. """ TLS_SERVER_HASH_SHA256: Final = "tls.server.hash.sha256" """ Certificate fingerprint using the SHA256 digest of DER-encoded version of certificate offered by the server. For consistency with other hash values, this value should be formatted as an uppercase hash. """ TLS_SERVER_ISSUER: Final = "tls.server.issuer" """ Distinguished name of [subject](https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6) of the issuer of the x.509 certificate presented by the client. """ TLS_SERVER_JA3S: Final = "tls.server.ja3s" """ A hash that identifies servers based on how they perform an SSL/TLS handshake. """ TLS_SERVER_NOT_AFTER: Final = "tls.server.not_after" """ Date/Time indicating when server certificate is no longer considered valid. """ TLS_SERVER_NOT_BEFORE: Final = "tls.server.not_before" """ Date/Time indicating when server certificate is first considered valid. """ TLS_SERVER_SUBJECT: Final = "tls.server.subject" """ Distinguished name of subject of the x.509 certificate presented by the server. """ class TlsProtocolNameValues(Enum): SSL = "ssl" """ssl.""" TLS = "tls" """tls.""" url_attributes.py000066400000000000000000000101241511654350100422270ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/attributes# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Final URL_DOMAIN: Final = "url.domain" """ Domain extracted from the `url.full`, such as "opentelemetry.io". Note: In some cases a URL may refer to an IP and/or port directly, without a domain name. In this case, the IP address would go to the domain field. If the URL contains a [literal IPv6 address](https://www.rfc-editor.org/rfc/rfc2732#section-2) enclosed by `[` and `]`, the `[` and `]` characters should also be captured in the domain field. """ URL_EXTENSION: Final = "url.extension" """ The file extension extracted from the `url.full`, excluding the leading dot. Note: The file extension is only set if it exists, as not every url has a file extension. When the file name has multiple extensions `example.tar.gz`, only the last one should be captured `gz`, not `tar.gz`. """ URL_FRAGMENT: Final = "url.fragment" """ Deprecated in favor of stable :py:const:`opentelemetry.semconv.attributes.url_attributes.URL_FRAGMENT`. """ URL_FULL: Final = "url.full" """ Deprecated in favor of stable :py:const:`opentelemetry.semconv.attributes.url_attributes.URL_FULL`. """ URL_ORIGINAL: Final = "url.original" """ Unmodified original URL as seen in the event source. Note: In network monitoring, the observed URL may be a full URL, whereas in access logs, the URL is often just represented as a path. This field is meant to represent the URL as it was observed, complete or not. `url.original` might contain credentials passed via URL in form of `https://username:password@www.example.com/`. In such case password and username SHOULD NOT be redacted and attribute's value SHOULD remain the same. """ URL_PATH: Final = "url.path" """ Deprecated in favor of stable :py:const:`opentelemetry.semconv.attributes.url_attributes.URL_PATH`. """ URL_PORT: Final = "url.port" """ Port extracted from the `url.full`. """ URL_QUERY: Final = "url.query" """ Deprecated in favor of stable :py:const:`opentelemetry.semconv.attributes.url_attributes.URL_QUERY`. """ URL_REGISTERED_DOMAIN: Final = "url.registered_domain" """ The highest registered url domain, stripped of the subdomain. Note: This value can be determined precisely with the [public suffix list](https://publicsuffix.org/). For example, the registered domain for `foo.example.com` is `example.com`. Trying to approximate this by simply taking the last two labels will not work well for TLDs such as `co.uk`. """ URL_SCHEME: Final = "url.scheme" """ Deprecated in favor of stable :py:const:`opentelemetry.semconv.attributes.url_attributes.URL_SCHEME`. """ URL_SUBDOMAIN: Final = "url.subdomain" """ The subdomain portion of a fully qualified domain name includes all of the names except the host name under the registered_domain. In a partially qualified domain, or if the qualification level of the full name cannot be determined, subdomain contains all of the names below the registered domain. Note: The subdomain portion of `www.east.mydomain.co.uk` is `east`. If the domain has multiple levels of subdomain, such as `sub2.sub1.example.com`, the subdomain field should contain `sub2.sub1`, with no trailing period. """ URL_TEMPLATE: Final = "url.template" """ The low-cardinality template of an [absolute path reference](https://www.rfc-editor.org/rfc/rfc3986#section-4.2). """ URL_TOP_LEVEL_DOMAIN: Final = "url.top_level_domain" """ The effective top level domain (eTLD), also known as the domain suffix, is the last part of the domain name. For example, the top level domain for example.com is `com`. Note: This value can be determined precisely with the [public suffix list](https://publicsuffix.org/). """ user_agent_attributes.py000066400000000000000000000060351511654350100435670ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/attributes# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from enum import Enum from typing import Final USER_AGENT_NAME: Final = "user_agent.name" """ Name of the user-agent extracted from original. Usually refers to the browser's name. Note: [Example](https://www.whatsmyua.info) of extracting browser's name from original string. In the case of using a user-agent for non-browser products, such as microservices with multiple names/versions inside the `user_agent.original`, the most significant name SHOULD be selected. In such a scenario it should align with `user_agent.version`. """ USER_AGENT_ORIGINAL: Final = "user_agent.original" """ Deprecated in favor of stable :py:const:`opentelemetry.semconv.attributes.user_agent_attributes.USER_AGENT_ORIGINAL`. """ USER_AGENT_OS_NAME: Final = "user_agent.os.name" """ Human readable operating system name. Note: For mapping user agent strings to OS names, libraries such as [ua-parser](https://github.com/ua-parser) can be utilized. """ USER_AGENT_OS_VERSION: Final = "user_agent.os.version" """ The version string of the operating system as defined in [Version Attributes](/docs/resource/README.md#version-attributes). Note: For mapping user agent strings to OS versions, libraries such as [ua-parser](https://github.com/ua-parser) can be utilized. """ USER_AGENT_SYNTHETIC_TYPE: Final = "user_agent.synthetic.type" """ Specifies the category of synthetic traffic, such as tests or bots. Note: This attribute MAY be derived from the contents of the `user_agent.original` attribute. Components that populate the attribute are responsible for determining what they consider to be synthetic bot or test traffic. This attribute can either be set for self-identification purposes, or on telemetry detected to be generated as a result of a synthetic request. This attribute is useful for distinguishing between genuine client traffic and synthetic traffic generated by bots or tests. """ USER_AGENT_VERSION: Final = "user_agent.version" """ Version of the user-agent extracted from original. Usually refers to the browser's version. Note: [Example](https://www.whatsmyua.info) of extracting browser's version from original string. In the case of using a user-agent for non-browser products, such as microservices with multiple names/versions inside the `user_agent.original`, the most significant version SHOULD be selected. In such a scenario it should align with `user_agent.name`. """ class UserAgentSyntheticTypeValues(Enum): BOT = "bot" """Bot source.""" TEST = "test" """Synthetic test source.""" user_attributes.py000066400000000000000000000022401511654350100424030ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/attributes# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Final USER_EMAIL: Final = "user.email" """ User email address. """ USER_FULL_NAME: Final = "user.full_name" """ User's full name. """ USER_HASH: Final = "user.hash" """ Unique user hash to correlate information for a user in anonymized form. Note: Useful if `user.id` or `user.name` contain confidential information and cannot be used. """ USER_ID: Final = "user.id" """ Unique identifier of the user. """ USER_NAME: Final = "user.name" """ Short name or login/username of the user. """ USER_ROLES: Final = "user.roles" """ Array of user roles at the time of the event. """ vcs_attributes.py000066400000000000000000000220061511654350100422220ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/attributes# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from enum import Enum from typing import Final from typing_extensions import deprecated VCS_CHANGE_ID: Final = "vcs.change.id" """ The ID of the change (pull request/merge request/changelist) if applicable. This is usually a unique (within repository) identifier generated by the VCS system. """ VCS_CHANGE_STATE: Final = "vcs.change.state" """ The state of the change (pull request/merge request/changelist). """ VCS_CHANGE_TITLE: Final = "vcs.change.title" """ The human readable title of the change (pull request/merge request/changelist). This title is often a brief summary of the change and may get merged in to a ref as the commit summary. """ VCS_LINE_CHANGE_TYPE: Final = "vcs.line_change.type" """ The type of line change being measured on a branch or change. """ VCS_OWNER_NAME: Final = "vcs.owner.name" """ The group owner within the version control system. """ VCS_PROVIDER_NAME: Final = "vcs.provider.name" """ The name of the version control system provider. """ VCS_REF_BASE_NAME: Final = "vcs.ref.base.name" """ The name of the [reference](https://git-scm.com/docs/gitglossary#def_ref) such as **branch** or **tag** in the repository. Note: `base` refers to the starting point of a change. For example, `main` would be the base reference of type branch if you've created a new reference of type branch from it and created new commits. """ VCS_REF_BASE_REVISION: Final = "vcs.ref.base.revision" """ The revision, literally [revised version](https://www.merriam-webster.com/dictionary/revision), The revision most often refers to a commit object in Git, or a revision number in SVN. Note: `base` refers to the starting point of a change. For example, `main` would be the base reference of type branch if you've created a new reference of type branch from it and created new commits. The revision can be a full [hash value (see glossary)](https://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.186-5.pdf), of the recorded change to a ref within a repository pointing to a commit [commit](https://git-scm.com/docs/git-commit) object. It does not necessarily have to be a hash; it can simply define a [revision number](https://svnbook.red-bean.com/en/1.7/svn.tour.revs.specifiers.html) which is an integer that is monotonically increasing. In cases where it is identical to the `ref.base.name`, it SHOULD still be included. It is up to the implementer to decide which value to set as the revision based on the VCS system and situational context. """ VCS_REF_BASE_TYPE: Final = "vcs.ref.base.type" """ The type of the [reference](https://git-scm.com/docs/gitglossary#def_ref) in the repository. Note: `base` refers to the starting point of a change. For example, `main` would be the base reference of type branch if you've created a new reference of type branch from it and created new commits. """ VCS_REF_HEAD_NAME: Final = "vcs.ref.head.name" """ The name of the [reference](https://git-scm.com/docs/gitglossary#def_ref) such as **branch** or **tag** in the repository. Note: `head` refers to where you are right now; the current reference at a given time. """ VCS_REF_HEAD_REVISION: Final = "vcs.ref.head.revision" """ The revision, literally [revised version](https://www.merriam-webster.com/dictionary/revision), The revision most often refers to a commit object in Git, or a revision number in SVN. Note: `head` refers to where you are right now; the current reference at a given time.The revision can be a full [hash value (see glossary)](https://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.186-5.pdf), of the recorded change to a ref within a repository pointing to a commit [commit](https://git-scm.com/docs/git-commit) object. It does not necessarily have to be a hash; it can simply define a [revision number](https://svnbook.red-bean.com/en/1.7/svn.tour.revs.specifiers.html) which is an integer that is monotonically increasing. In cases where it is identical to the `ref.head.name`, it SHOULD still be included. It is up to the implementer to decide which value to set as the revision based on the VCS system and situational context. """ VCS_REF_HEAD_TYPE: Final = "vcs.ref.head.type" """ The type of the [reference](https://git-scm.com/docs/gitglossary#def_ref) in the repository. Note: `head` refers to where you are right now; the current reference at a given time. """ VCS_REF_TYPE: Final = "vcs.ref.type" """ The type of the [reference](https://git-scm.com/docs/gitglossary#def_ref) in the repository. """ VCS_REPOSITORY_CHANGE_ID: Final = "vcs.repository.change.id" """ Deprecated: Replaced by `vcs.change.id`. """ VCS_REPOSITORY_CHANGE_TITLE: Final = "vcs.repository.change.title" """ Deprecated: Replaced by `vcs.change.title`. """ VCS_REPOSITORY_NAME: Final = "vcs.repository.name" """ The human readable name of the repository. It SHOULD NOT include any additional identifier like Group/SubGroup in GitLab or organization in GitHub. Note: Due to it only being the name, it can clash with forks of the same repository if collecting telemetry across multiple orgs or groups in the same backends. """ VCS_REPOSITORY_REF_NAME: Final = "vcs.repository.ref.name" """ Deprecated: Replaced by `vcs.ref.head.name`. """ VCS_REPOSITORY_REF_REVISION: Final = "vcs.repository.ref.revision" """ Deprecated: Replaced by `vcs.ref.head.revision`. """ VCS_REPOSITORY_REF_TYPE: Final = "vcs.repository.ref.type" """ Deprecated: Replaced by `vcs.ref.head.type`. """ VCS_REPOSITORY_URL_FULL: Final = "vcs.repository.url.full" """ The [canonical URL](https://support.google.com/webmasters/answer/10347851) of the repository providing the complete HTTP(S) address in order to locate and identify the repository through a browser. Note: In Git Version Control Systems, the canonical URL SHOULD NOT include the `.git` extension. """ VCS_REVISION_DELTA_DIRECTION: Final = "vcs.revision_delta.direction" """ The type of revision comparison. """ class VcsChangeStateValues(Enum): OPEN = "open" """Open means the change is currently active and under review. It hasn't been merged into the target branch yet, and it's still possible to make changes or add comments.""" WIP = "wip" """WIP (work-in-progress, draft) means the change is still in progress and not yet ready for a full review. It might still undergo significant changes.""" CLOSED = "closed" """Closed means the merge request has been closed without merging. This can happen for various reasons, such as the changes being deemed unnecessary, the issue being resolved in another way, or the author deciding to withdraw the request.""" MERGED = "merged" """Merged indicates that the change has been successfully integrated into the target codebase.""" class VcsLineChangeTypeValues(Enum): ADDED = "added" """How many lines were added.""" REMOVED = "removed" """How many lines were removed.""" class VcsProviderNameValues(Enum): GITHUB = "github" """[GitHub](https://github.com).""" GITLAB = "gitlab" """[GitLab](https://gitlab.com).""" GITTEA = "gittea" """Deprecated: Replaced by `gitea`.""" GITEA = "gitea" """[Gitea](https://gitea.io).""" BITBUCKET = "bitbucket" """[Bitbucket](https://bitbucket.org).""" class VcsRefBaseTypeValues(Enum): BRANCH = "branch" """[branch](https://git-scm.com/docs/gitglossary#Documentation/gitglossary.txt-aiddefbranchabranch).""" TAG = "tag" """[tag](https://git-scm.com/docs/gitglossary#Documentation/gitglossary.txt-aiddeftagatag).""" class VcsRefHeadTypeValues(Enum): BRANCH = "branch" """[branch](https://git-scm.com/docs/gitglossary#Documentation/gitglossary.txt-aiddefbranchabranch).""" TAG = "tag" """[tag](https://git-scm.com/docs/gitglossary#Documentation/gitglossary.txt-aiddeftagatag).""" class VcsRefTypeValues(Enum): BRANCH = "branch" """[branch](https://git-scm.com/docs/gitglossary#Documentation/gitglossary.txt-aiddefbranchabranch).""" TAG = "tag" """[tag](https://git-scm.com/docs/gitglossary#Documentation/gitglossary.txt-aiddeftagatag).""" @deprecated( "The attribute vcs.repository.ref.type is deprecated - Replaced by `vcs.ref.head.type`" ) class VcsRepositoryRefTypeValues(Enum): BRANCH = "branch" """[branch](https://git-scm.com/docs/gitglossary#Documentation/gitglossary.txt-aiddefbranchabranch).""" TAG = "tag" """[tag](https://git-scm.com/docs/gitglossary#Documentation/gitglossary.txt-aiddeftagatag).""" class VcsRevisionDeltaDirectionValues(Enum): BEHIND = "behind" """How many revisions the change is behind the target ref.""" AHEAD = "ahead" """How many revisions the change is ahead of the target ref.""" webengine_attributes.py000066400000000000000000000016411511654350100433740ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/attributes# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Final WEBENGINE_DESCRIPTION: Final = "webengine.description" """ Additional description of the web engine (e.g. detailed version and edition information). """ WEBENGINE_NAME: Final = "webengine.name" """ The name of the web engine. """ WEBENGINE_VERSION: Final = "webengine.version" """ The version of the web engine. """ zos_attributes.py000066400000000000000000000016551511654350100422510ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/attributes# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Final ZOS_SMF_ID: Final = "zos.smf.id" """ The System Management Facility (SMF) Identifier uniquely identified a z/OS system within a SYSPLEX or mainframe environment and is used for system and performance analysis. """ ZOS_SYSPLEX_NAME: Final = "zos.sysplex.name" """ The name of the SYSPLEX to which the z/OS system belongs too. """ metrics/000077500000000000000000000000001511654350100360675ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubatingazure_metrics.py000066400000000000000000000036141511654350100413210ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/metrics# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Final from opentelemetry.metrics import Histogram, Meter, UpDownCounter AZURE_COSMOSDB_CLIENT_ACTIVE_INSTANCE_COUNT: Final = ( "azure.cosmosdb.client.active_instance.count" ) """ Number of active client instances Instrument: updowncounter Unit: {instance} """ def create_azure_cosmosdb_client_active_instance_count( meter: Meter, ) -> UpDownCounter: """Number of active client instances""" return meter.create_up_down_counter( name=AZURE_COSMOSDB_CLIENT_ACTIVE_INSTANCE_COUNT, description="Number of active client instances.", unit="{instance}", ) AZURE_COSMOSDB_CLIENT_OPERATION_REQUEST_CHARGE: Final = ( "azure.cosmosdb.client.operation.request_charge" ) """ [Request units](https://learn.microsoft.com/azure/cosmos-db/request-units) consumed by the operation Instrument: histogram Unit: {request_unit} """ def create_azure_cosmosdb_client_operation_request_charge( meter: Meter, ) -> Histogram: """[Request units](https://learn.microsoft.com/azure/cosmos-db/request-units) consumed by the operation""" return meter.create_histogram( name=AZURE_COSMOSDB_CLIENT_OPERATION_REQUEST_CHARGE, description="[Request units](https://learn.microsoft.com/azure/cosmos-db/request-units) consumed by the operation.", unit="{request_unit}", ) cicd_metrics.py000066400000000000000000000071101511654350100410700ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/metrics# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Final from opentelemetry.metrics import Counter, Histogram, Meter, UpDownCounter CICD_PIPELINE_RUN_ACTIVE: Final = "cicd.pipeline.run.active" """ The number of pipeline runs currently active in the system by state Instrument: updowncounter Unit: {run} """ def create_cicd_pipeline_run_active(meter: Meter) -> UpDownCounter: """The number of pipeline runs currently active in the system by state""" return meter.create_up_down_counter( name=CICD_PIPELINE_RUN_ACTIVE, description="The number of pipeline runs currently active in the system by state.", unit="{run}", ) CICD_PIPELINE_RUN_DURATION: Final = "cicd.pipeline.run.duration" """ Duration of a pipeline run grouped by pipeline, state and result Instrument: histogram Unit: s """ def create_cicd_pipeline_run_duration(meter: Meter) -> Histogram: """Duration of a pipeline run grouped by pipeline, state and result""" return meter.create_histogram( name=CICD_PIPELINE_RUN_DURATION, description="Duration of a pipeline run grouped by pipeline, state and result.", unit="s", ) CICD_PIPELINE_RUN_ERRORS: Final = "cicd.pipeline.run.errors" """ The number of errors encountered in pipeline runs (eg. compile, test failures) Instrument: counter Unit: {error} Note: There might be errors in a pipeline run that are non fatal (eg. they are suppressed) or in a parallel stage multiple stages could have a fatal error. This means that this error count might not be the same as the count of metric `cicd.pipeline.run.duration` with run result `failure`. """ def create_cicd_pipeline_run_errors(meter: Meter) -> Counter: """The number of errors encountered in pipeline runs (eg. compile, test failures)""" return meter.create_counter( name=CICD_PIPELINE_RUN_ERRORS, description="The number of errors encountered in pipeline runs (eg. compile, test failures).", unit="{error}", ) CICD_SYSTEM_ERRORS: Final = "cicd.system.errors" """ The number of errors in a component of the CICD system (eg. controller, scheduler, agent) Instrument: counter Unit: {error} Note: Errors in pipeline run execution are explicitly excluded. Ie a test failure is not counted in this metric. """ def create_cicd_system_errors(meter: Meter) -> Counter: """The number of errors in a component of the CICD system (eg. controller, scheduler, agent)""" return meter.create_counter( name=CICD_SYSTEM_ERRORS, description="The number of errors in a component of the CICD system (eg. controller, scheduler, agent).", unit="{error}", ) CICD_WORKER_COUNT: Final = "cicd.worker.count" """ The number of workers on the CICD system by state Instrument: updowncounter Unit: {count} """ def create_cicd_worker_count(meter: Meter) -> UpDownCounter: """The number of workers on the CICD system by state""" return meter.create_up_down_counter( name=CICD_WORKER_COUNT, description="The number of workers on the CICD system by state.", unit="{count}", ) container_metrics.py000066400000000000000000000243351511654350100421600ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/metrics# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import ( Callable, Final, Generator, Iterable, Optional, Sequence, Union, ) from opentelemetry.metrics import ( CallbackOptions, Counter, Meter, ObservableGauge, Observation, UpDownCounter, ) # pylint: disable=invalid-name CallbackT = Union[ Callable[[CallbackOptions], Iterable[Observation]], Generator[Iterable[Observation], CallbackOptions, None], ] CONTAINER_CPU_TIME: Final = "container.cpu.time" """ Total CPU time consumed Instrument: counter Unit: s Note: Total CPU time consumed by the specific container on all available CPU cores. """ def create_container_cpu_time(meter: Meter) -> Counter: """Total CPU time consumed""" return meter.create_counter( name=CONTAINER_CPU_TIME, description="Total CPU time consumed.", unit="s", ) CONTAINER_CPU_USAGE: Final = "container.cpu.usage" """ Container's CPU usage, measured in cpus. Range from 0 to the number of allocatable CPUs Instrument: gauge Unit: {cpu} Note: CPU usage of the specific container on all available CPU cores, averaged over the sample window. """ def create_container_cpu_usage( meter: Meter, callbacks: Optional[Sequence[CallbackT]] ) -> ObservableGauge: """Container's CPU usage, measured in cpus. Range from 0 to the number of allocatable CPUs""" return meter.create_observable_gauge( name=CONTAINER_CPU_USAGE, callbacks=callbacks, description="Container's CPU usage, measured in cpus. Range from 0 to the number of allocatable CPUs.", unit="{cpu}", ) CONTAINER_DISK_IO: Final = "container.disk.io" """ Disk bytes for the container Instrument: counter Unit: By Note: The total number of bytes read/written successfully (aggregated from all disks). """ def create_container_disk_io(meter: Meter) -> Counter: """Disk bytes for the container""" return meter.create_counter( name=CONTAINER_DISK_IO, description="Disk bytes for the container.", unit="By", ) CONTAINER_FILESYSTEM_AVAILABLE: Final = "container.filesystem.available" """ Container filesystem available bytes Instrument: updowncounter Unit: By Note: In K8s, this metric is derived from the [FsStats.AvailableBytes](https://pkg.go.dev/k8s.io/kubelet@v0.33.0/pkg/apis/stats/v1alpha1#FsStats) field of the [ContainerStats.Rootfs](https://pkg.go.dev/k8s.io/kubelet@v0.33.0/pkg/apis/stats/v1alpha1#ContainerStats) of the Kubelet's stats API. """ def create_container_filesystem_available(meter: Meter) -> UpDownCounter: """Container filesystem available bytes""" return meter.create_up_down_counter( name=CONTAINER_FILESYSTEM_AVAILABLE, description="Container filesystem available bytes.", unit="By", ) CONTAINER_FILESYSTEM_CAPACITY: Final = "container.filesystem.capacity" """ Container filesystem capacity Instrument: updowncounter Unit: By Note: In K8s, this metric is derived from the [FsStats.CapacityBytes](https://pkg.go.dev/k8s.io/kubelet@v0.33.0/pkg/apis/stats/v1alpha1#FsStats) field of the [ContainerStats.Rootfs](https://pkg.go.dev/k8s.io/kubelet@v0.33.0/pkg/apis/stats/v1alpha1#ContainerStats) of the Kubelet's stats API. """ def create_container_filesystem_capacity(meter: Meter) -> UpDownCounter: """Container filesystem capacity""" return meter.create_up_down_counter( name=CONTAINER_FILESYSTEM_CAPACITY, description="Container filesystem capacity.", unit="By", ) CONTAINER_FILESYSTEM_USAGE: Final = "container.filesystem.usage" """ Container filesystem usage Instrument: updowncounter Unit: By Note: This may not equal capacity - available. In K8s, this metric is derived from the [FsStats.UsedBytes](https://pkg.go.dev/k8s.io/kubelet@v0.33.0/pkg/apis/stats/v1alpha1#FsStats) field of the [ContainerStats.Rootfs](https://pkg.go.dev/k8s.io/kubelet@v0.33.0/pkg/apis/stats/v1alpha1#ContainerStats) of the Kubelet's stats API. """ def create_container_filesystem_usage(meter: Meter) -> UpDownCounter: """Container filesystem usage""" return meter.create_up_down_counter( name=CONTAINER_FILESYSTEM_USAGE, description="Container filesystem usage.", unit="By", ) CONTAINER_MEMORY_AVAILABLE: Final = "container.memory.available" """ Container memory available Instrument: updowncounter Unit: By Note: Available memory for use. This is defined as the memory limit - workingSetBytes. If memory limit is undefined, the available bytes is omitted. In general, this metric can be derived from [cadvisor](https://github.com/google/cadvisor/blob/v0.53.0/docs/storage/prometheus.md#prometheus-container-metrics) and by subtracting the `container_memory_working_set_bytes` metric from the `container_spec_memory_limit_bytes` metric. In K8s, this metric is derived from the [MemoryStats.AvailableBytes](https://pkg.go.dev/k8s.io/kubelet@v0.34.0/pkg/apis/stats/v1alpha1#MemoryStats) field of the [PodStats.Memory](https://pkg.go.dev/k8s.io/kubelet@v0.34.0/pkg/apis/stats/v1alpha1#PodStats) of the Kubelet's stats API. """ def create_container_memory_available(meter: Meter) -> UpDownCounter: """Container memory available""" return meter.create_up_down_counter( name=CONTAINER_MEMORY_AVAILABLE, description="Container memory available.", unit="By", ) CONTAINER_MEMORY_PAGING_FAULTS: Final = "container.memory.paging.faults" """ Container memory paging faults Instrument: counter Unit: {fault} Note: In general, this metric can be derived from [cadvisor](https://github.com/google/cadvisor/blob/v0.53.0/docs/storage/prometheus.md#prometheus-container-metrics) and specifically the `container_memory_failures_total{failure_type=pgfault, scope=container}` and `container_memory_failures_total{failure_type=pgmajfault, scope=container}`metric. In K8s, this metric is derived from the [MemoryStats.PageFaults](https://pkg.go.dev/k8s.io/kubelet@v0.34.0/pkg/apis/stats/v1alpha1#MemoryStats) and [MemoryStats.MajorPageFaults](https://pkg.go.dev/k8s.io/kubelet@v0.34.0/pkg/apis/stats/v1alpha1#MemoryStats) field of the [PodStats.Memory](https://pkg.go.dev/k8s.io/kubelet@v0.34.0/pkg/apis/stats/v1alpha1#PodStats) of the Kubelet's stats API. """ def create_container_memory_paging_faults(meter: Meter) -> Counter: """Container memory paging faults""" return meter.create_counter( name=CONTAINER_MEMORY_PAGING_FAULTS, description="Container memory paging faults.", unit="{fault}", ) CONTAINER_MEMORY_RSS: Final = "container.memory.rss" """ Container memory RSS Instrument: updowncounter Unit: By Note: In general, this metric can be derived from [cadvisor](https://github.com/google/cadvisor/blob/v0.53.0/docs/storage/prometheus.md#prometheus-container-metrics) and specifically the `container_memory_rss` metric. In K8s, this metric is derived from the [MemoryStats.RSSBytes](https://pkg.go.dev/k8s.io/kubelet@v0.34.0/pkg/apis/stats/v1alpha1#MemoryStats) field of the [PodStats.Memory](https://pkg.go.dev/k8s.io/kubelet@v0.34.0/pkg/apis/stats/v1alpha1#PodStats) of the Kubelet's stats API. """ def create_container_memory_rss(meter: Meter) -> UpDownCounter: """Container memory RSS""" return meter.create_up_down_counter( name=CONTAINER_MEMORY_RSS, description="Container memory RSS.", unit="By", ) CONTAINER_MEMORY_USAGE: Final = "container.memory.usage" """ Memory usage of the container Instrument: counter Unit: By Note: Memory usage of the container. """ def create_container_memory_usage(meter: Meter) -> Counter: """Memory usage of the container""" return meter.create_counter( name=CONTAINER_MEMORY_USAGE, description="Memory usage of the container.", unit="By", ) CONTAINER_MEMORY_WORKING_SET: Final = "container.memory.working_set" """ Container memory working set Instrument: updowncounter Unit: By Note: In general, this metric can be derived from [cadvisor](https://github.com/google/cadvisor/blob/v0.53.0/docs/storage/prometheus.md#prometheus-container-metrics) and specifically the `container_memory_working_set_bytes` metric. In K8s, this metric is derived from the [MemoryStats.WorkingSetBytes](https://pkg.go.dev/k8s.io/kubelet@v0.34.0/pkg/apis/stats/v1alpha1#MemoryStats) field of the [PodStats.Memory](https://pkg.go.dev/k8s.io/kubelet@v0.34.0/pkg/apis/stats/v1alpha1#PodStats) of the Kubelet's stats API. """ def create_container_memory_working_set(meter: Meter) -> UpDownCounter: """Container memory working set""" return meter.create_up_down_counter( name=CONTAINER_MEMORY_WORKING_SET, description="Container memory working set.", unit="By", ) CONTAINER_NETWORK_IO: Final = "container.network.io" """ Network bytes for the container Instrument: counter Unit: By Note: The number of bytes sent/received on all network interfaces by the container. """ def create_container_network_io(meter: Meter) -> Counter: """Network bytes for the container""" return meter.create_counter( name=CONTAINER_NETWORK_IO, description="Network bytes for the container.", unit="By", ) CONTAINER_UPTIME: Final = "container.uptime" """ The time the container has been running Instrument: gauge Unit: s Note: Instrumentations SHOULD use a gauge with type `double` and measure uptime in seconds as a floating point number with the highest precision available. The actual accuracy would depend on the instrumentation and operating system. """ def create_container_uptime( meter: Meter, callbacks: Optional[Sequence[CallbackT]] ) -> ObservableGauge: """The time the container has been running""" return meter.create_observable_gauge( name=CONTAINER_UPTIME, callbacks=callbacks, description="The time the container has been running.", unit="s", ) cpu_metrics.py000066400000000000000000000043141511654350100407600ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/metrics# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import ( Callable, Final, Generator, Iterable, Optional, Sequence, Union, ) from opentelemetry.metrics import ( CallbackOptions, Counter, Meter, ObservableGauge, Observation, ) # pylint: disable=invalid-name CallbackT = Union[ Callable[[CallbackOptions], Iterable[Observation]], Generator[Iterable[Observation], CallbackOptions, None], ] CPU_FREQUENCY: Final = "cpu.frequency" """ Deprecated: Replaced by `system.cpu.frequency`. """ def create_cpu_frequency( meter: Meter, callbacks: Optional[Sequence[CallbackT]] ) -> ObservableGauge: """Deprecated. Use `system.cpu.frequency` instead""" return meter.create_observable_gauge( name=CPU_FREQUENCY, callbacks=callbacks, description="Deprecated. Use `system.cpu.frequency` instead.", unit="{Hz}", ) CPU_TIME: Final = "cpu.time" """ Deprecated: Replaced by `system.cpu.time`. """ def create_cpu_time(meter: Meter) -> Counter: """Deprecated. Use `system.cpu.time` instead""" return meter.create_counter( name=CPU_TIME, description="Deprecated. Use `system.cpu.time` instead.", unit="s", ) CPU_UTILIZATION: Final = "cpu.utilization" """ Deprecated: Replaced by `system.cpu.utilization`. """ def create_cpu_utilization( meter: Meter, callbacks: Optional[Sequence[CallbackT]] ) -> ObservableGauge: """Deprecated. Use `system.cpu.utilization` instead""" return meter.create_observable_gauge( name=CPU_UTILIZATION, callbacks=callbacks, description="Deprecated. Use `system.cpu.utilization` instead.", unit="1", ) cpython_metrics.py000066400000000000000000000052631511654350100416610ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/metrics# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Final from opentelemetry.metrics import Counter, Meter CPYTHON_GC_COLLECTED_OBJECTS: Final = "cpython.gc.collected_objects" """ The total number of objects collected inside a generation since interpreter start Instrument: counter Unit: {object} Note: This metric reports data from [`gc.stats()`](https://docs.python.org/3/library/gc.html#gc.get_stats). """ def create_cpython_gc_collected_objects(meter: Meter) -> Counter: """The total number of objects collected inside a generation since interpreter start""" return meter.create_counter( name=CPYTHON_GC_COLLECTED_OBJECTS, description="The total number of objects collected inside a generation since interpreter start.", unit="{object}", ) CPYTHON_GC_COLLECTIONS: Final = "cpython.gc.collections" """ The number of times a generation was collected since interpreter start Instrument: counter Unit: {collection} Note: This metric reports data from [`gc.stats()`](https://docs.python.org/3/library/gc.html#gc.get_stats). """ def create_cpython_gc_collections(meter: Meter) -> Counter: """The number of times a generation was collected since interpreter start""" return meter.create_counter( name=CPYTHON_GC_COLLECTIONS, description="The number of times a generation was collected since interpreter start.", unit="{collection}", ) CPYTHON_GC_UNCOLLECTABLE_OBJECTS: Final = "cpython.gc.uncollectable_objects" """ The total number of objects which were found to be uncollectable inside a generation since interpreter start Instrument: counter Unit: {object} Note: This metric reports data from [`gc.stats()`](https://docs.python.org/3/library/gc.html#gc.get_stats). """ def create_cpython_gc_uncollectable_objects(meter: Meter) -> Counter: """The total number of objects which were found to be uncollectable inside a generation since interpreter start""" return meter.create_counter( name=CPYTHON_GC_UNCOLLECTABLE_OBJECTS, description="The total number of objects which were found to be uncollectable inside a generation since interpreter start.", unit="{object}", ) db_metrics.py000066400000000000000000000300061511654350100405530ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/metrics# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Final from opentelemetry.metrics import Counter, Histogram, Meter, UpDownCounter DB_CLIENT_CONNECTION_COUNT: Final = "db.client.connection.count" """ The number of connections that are currently in state described by the `state` attribute Instrument: updowncounter Unit: {connection} """ def create_db_client_connection_count(meter: Meter) -> UpDownCounter: """The number of connections that are currently in state described by the `state` attribute""" return meter.create_up_down_counter( name=DB_CLIENT_CONNECTION_COUNT, description="The number of connections that are currently in state described by the `state` attribute.", unit="{connection}", ) DB_CLIENT_CONNECTION_CREATE_TIME: Final = "db.client.connection.create_time" """ The time it took to create a new connection Instrument: histogram Unit: s """ def create_db_client_connection_create_time(meter: Meter) -> Histogram: """The time it took to create a new connection""" return meter.create_histogram( name=DB_CLIENT_CONNECTION_CREATE_TIME, description="The time it took to create a new connection.", unit="s", ) DB_CLIENT_CONNECTION_IDLE_MAX: Final = "db.client.connection.idle.max" """ The maximum number of idle open connections allowed Instrument: updowncounter Unit: {connection} """ def create_db_client_connection_idle_max(meter: Meter) -> UpDownCounter: """The maximum number of idle open connections allowed""" return meter.create_up_down_counter( name=DB_CLIENT_CONNECTION_IDLE_MAX, description="The maximum number of idle open connections allowed.", unit="{connection}", ) DB_CLIENT_CONNECTION_IDLE_MIN: Final = "db.client.connection.idle.min" """ The minimum number of idle open connections allowed Instrument: updowncounter Unit: {connection} """ def create_db_client_connection_idle_min(meter: Meter) -> UpDownCounter: """The minimum number of idle open connections allowed""" return meter.create_up_down_counter( name=DB_CLIENT_CONNECTION_IDLE_MIN, description="The minimum number of idle open connections allowed.", unit="{connection}", ) DB_CLIENT_CONNECTION_MAX: Final = "db.client.connection.max" """ The maximum number of open connections allowed Instrument: updowncounter Unit: {connection} """ def create_db_client_connection_max(meter: Meter) -> UpDownCounter: """The maximum number of open connections allowed""" return meter.create_up_down_counter( name=DB_CLIENT_CONNECTION_MAX, description="The maximum number of open connections allowed.", unit="{connection}", ) DB_CLIENT_CONNECTION_PENDING_REQUESTS: Final = ( "db.client.connection.pending_requests" ) """ The number of current pending requests for an open connection Instrument: updowncounter Unit: {request} """ def create_db_client_connection_pending_requests( meter: Meter, ) -> UpDownCounter: """The number of current pending requests for an open connection""" return meter.create_up_down_counter( name=DB_CLIENT_CONNECTION_PENDING_REQUESTS, description="The number of current pending requests for an open connection.", unit="{request}", ) DB_CLIENT_CONNECTION_TIMEOUTS: Final = "db.client.connection.timeouts" """ The number of connection timeouts that have occurred trying to obtain a connection from the pool Instrument: counter Unit: {timeout} """ def create_db_client_connection_timeouts(meter: Meter) -> Counter: """The number of connection timeouts that have occurred trying to obtain a connection from the pool""" return meter.create_counter( name=DB_CLIENT_CONNECTION_TIMEOUTS, description="The number of connection timeouts that have occurred trying to obtain a connection from the pool.", unit="{timeout}", ) DB_CLIENT_CONNECTION_USE_TIME: Final = "db.client.connection.use_time" """ The time between borrowing a connection and returning it to the pool Instrument: histogram Unit: s """ def create_db_client_connection_use_time(meter: Meter) -> Histogram: """The time between borrowing a connection and returning it to the pool""" return meter.create_histogram( name=DB_CLIENT_CONNECTION_USE_TIME, description="The time between borrowing a connection and returning it to the pool.", unit="s", ) DB_CLIENT_CONNECTION_WAIT_TIME: Final = "db.client.connection.wait_time" """ The time it took to obtain an open connection from the pool Instrument: histogram Unit: s """ def create_db_client_connection_wait_time(meter: Meter) -> Histogram: """The time it took to obtain an open connection from the pool""" return meter.create_histogram( name=DB_CLIENT_CONNECTION_WAIT_TIME, description="The time it took to obtain an open connection from the pool.", unit="s", ) DB_CLIENT_CONNECTIONS_CREATE_TIME: Final = "db.client.connections.create_time" """ Deprecated: Replaced by `db.client.connection.create_time` with unit `s`. """ def create_db_client_connections_create_time(meter: Meter) -> Histogram: """Deprecated, use `db.client.connection.create_time` instead. Note: the unit also changed from `ms` to `s`""" return meter.create_histogram( name=DB_CLIENT_CONNECTIONS_CREATE_TIME, description="Deprecated, use `db.client.connection.create_time` instead. Note: the unit also changed from `ms` to `s`.", unit="ms", ) DB_CLIENT_CONNECTIONS_IDLE_MAX: Final = "db.client.connections.idle.max" """ Deprecated: Replaced by `db.client.connection.idle.max`. """ def create_db_client_connections_idle_max(meter: Meter) -> UpDownCounter: """Deprecated, use `db.client.connection.idle.max` instead""" return meter.create_up_down_counter( name=DB_CLIENT_CONNECTIONS_IDLE_MAX, description="Deprecated, use `db.client.connection.idle.max` instead.", unit="{connection}", ) DB_CLIENT_CONNECTIONS_IDLE_MIN: Final = "db.client.connections.idle.min" """ Deprecated: Replaced by `db.client.connection.idle.min`. """ def create_db_client_connections_idle_min(meter: Meter) -> UpDownCounter: """Deprecated, use `db.client.connection.idle.min` instead""" return meter.create_up_down_counter( name=DB_CLIENT_CONNECTIONS_IDLE_MIN, description="Deprecated, use `db.client.connection.idle.min` instead.", unit="{connection}", ) DB_CLIENT_CONNECTIONS_MAX: Final = "db.client.connections.max" """ Deprecated: Replaced by `db.client.connection.max`. """ def create_db_client_connections_max(meter: Meter) -> UpDownCounter: """Deprecated, use `db.client.connection.max` instead""" return meter.create_up_down_counter( name=DB_CLIENT_CONNECTIONS_MAX, description="Deprecated, use `db.client.connection.max` instead.", unit="{connection}", ) DB_CLIENT_CONNECTIONS_PENDING_REQUESTS: Final = ( "db.client.connections.pending_requests" ) """ Deprecated: Replaced by `db.client.connection.pending_requests`. """ def create_db_client_connections_pending_requests( meter: Meter, ) -> UpDownCounter: """Deprecated, use `db.client.connection.pending_requests` instead""" return meter.create_up_down_counter( name=DB_CLIENT_CONNECTIONS_PENDING_REQUESTS, description="Deprecated, use `db.client.connection.pending_requests` instead.", unit="{request}", ) DB_CLIENT_CONNECTIONS_TIMEOUTS: Final = "db.client.connections.timeouts" """ Deprecated: Replaced by `db.client.connection.timeouts`. """ def create_db_client_connections_timeouts(meter: Meter) -> Counter: """Deprecated, use `db.client.connection.timeouts` instead""" return meter.create_counter( name=DB_CLIENT_CONNECTIONS_TIMEOUTS, description="Deprecated, use `db.client.connection.timeouts` instead.", unit="{timeout}", ) DB_CLIENT_CONNECTIONS_USAGE: Final = "db.client.connections.usage" """ Deprecated: Replaced by `db.client.connection.count`. """ def create_db_client_connections_usage(meter: Meter) -> UpDownCounter: """Deprecated, use `db.client.connection.count` instead""" return meter.create_up_down_counter( name=DB_CLIENT_CONNECTIONS_USAGE, description="Deprecated, use `db.client.connection.count` instead.", unit="{connection}", ) DB_CLIENT_CONNECTIONS_USE_TIME: Final = "db.client.connections.use_time" """ Deprecated: Replaced by `db.client.connection.use_time` with unit `s`. """ def create_db_client_connections_use_time(meter: Meter) -> Histogram: """Deprecated, use `db.client.connection.use_time` instead. Note: the unit also changed from `ms` to `s`""" return meter.create_histogram( name=DB_CLIENT_CONNECTIONS_USE_TIME, description="Deprecated, use `db.client.connection.use_time` instead. Note: the unit also changed from `ms` to `s`.", unit="ms", ) DB_CLIENT_CONNECTIONS_WAIT_TIME: Final = "db.client.connections.wait_time" """ Deprecated: Replaced by `db.client.connection.wait_time` with unit `s`. """ def create_db_client_connections_wait_time(meter: Meter) -> Histogram: """Deprecated, use `db.client.connection.wait_time` instead. Note: the unit also changed from `ms` to `s`""" return meter.create_histogram( name=DB_CLIENT_CONNECTIONS_WAIT_TIME, description="Deprecated, use `db.client.connection.wait_time` instead. Note: the unit also changed from `ms` to `s`.", unit="ms", ) DB_CLIENT_COSMOSDB_ACTIVE_INSTANCE_COUNT: Final = ( "db.client.cosmosdb.active_instance.count" ) """ Deprecated: Replaced by `azure.cosmosdb.client.active_instance.count`. """ def create_db_client_cosmosdb_active_instance_count( meter: Meter, ) -> UpDownCounter: """Deprecated, use `azure.cosmosdb.client.active_instance.count` instead""" return meter.create_up_down_counter( name=DB_CLIENT_COSMOSDB_ACTIVE_INSTANCE_COUNT, description="Deprecated, use `azure.cosmosdb.client.active_instance.count` instead.", unit="{instance}", ) DB_CLIENT_COSMOSDB_OPERATION_REQUEST_CHARGE: Final = ( "db.client.cosmosdb.operation.request_charge" ) """ Deprecated: Replaced by `azure.cosmosdb.client.operation.request_charge`. """ def create_db_client_cosmosdb_operation_request_charge( meter: Meter, ) -> Histogram: """Deprecated, use `azure.cosmosdb.client.operation.request_charge` instead""" return meter.create_histogram( name=DB_CLIENT_COSMOSDB_OPERATION_REQUEST_CHARGE, description="Deprecated, use `azure.cosmosdb.client.operation.request_charge` instead.", unit="{request_unit}", ) DB_CLIENT_OPERATION_DURATION: Final = "db.client.operation.duration" """ Deprecated in favor of stable :py:const:`opentelemetry.semconv.metrics.db_metrics.DB_CLIENT_OPERATION_DURATION`. """ def create_db_client_operation_duration(meter: Meter) -> Histogram: """Duration of database client operations""" return meter.create_histogram( name=DB_CLIENT_OPERATION_DURATION, description="Duration of database client operations.", unit="s", ) DB_CLIENT_RESPONSE_RETURNED_ROWS: Final = "db.client.response.returned_rows" """ The actual number of records returned by the database operation Instrument: histogram Unit: {row} """ def create_db_client_response_returned_rows(meter: Meter) -> Histogram: """The actual number of records returned by the database operation""" return meter.create_histogram( name=DB_CLIENT_RESPONSE_RETURNED_ROWS, description="The actual number of records returned by the database operation.", unit="{row}", ) dns_metrics.py000066400000000000000000000020751511654350100407570ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/metrics# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Final from opentelemetry.metrics import Histogram, Meter DNS_LOOKUP_DURATION: Final = "dns.lookup.duration" """ Measures the time taken to perform a DNS lookup Instrument: histogram Unit: s """ def create_dns_lookup_duration(meter: Meter) -> Histogram: """Measures the time taken to perform a DNS lookup""" return meter.create_histogram( name=DNS_LOOKUP_DURATION, description="Measures the time taken to perform a DNS lookup.", unit="s", ) faas_metrics.py000066400000000000000000000102311511654350100410760ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/metrics# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Final from opentelemetry.metrics import Counter, Histogram, Meter FAAS_COLDSTARTS: Final = "faas.coldstarts" """ Number of invocation cold starts Instrument: counter Unit: {coldstart} """ def create_faas_coldstarts(meter: Meter) -> Counter: """Number of invocation cold starts""" return meter.create_counter( name=FAAS_COLDSTARTS, description="Number of invocation cold starts.", unit="{coldstart}", ) FAAS_CPU_USAGE: Final = "faas.cpu_usage" """ Distribution of CPU usage per invocation Instrument: histogram Unit: s """ def create_faas_cpu_usage(meter: Meter) -> Histogram: """Distribution of CPU usage per invocation""" return meter.create_histogram( name=FAAS_CPU_USAGE, description="Distribution of CPU usage per invocation.", unit="s", ) FAAS_ERRORS: Final = "faas.errors" """ Number of invocation errors Instrument: counter Unit: {error} """ def create_faas_errors(meter: Meter) -> Counter: """Number of invocation errors""" return meter.create_counter( name=FAAS_ERRORS, description="Number of invocation errors.", unit="{error}", ) FAAS_INIT_DURATION: Final = "faas.init_duration" """ Measures the duration of the function's initialization, such as a cold start Instrument: histogram Unit: s """ def create_faas_init_duration(meter: Meter) -> Histogram: """Measures the duration of the function's initialization, such as a cold start""" return meter.create_histogram( name=FAAS_INIT_DURATION, description="Measures the duration of the function's initialization, such as a cold start.", unit="s", ) FAAS_INVOCATIONS: Final = "faas.invocations" """ Number of successful invocations Instrument: counter Unit: {invocation} """ def create_faas_invocations(meter: Meter) -> Counter: """Number of successful invocations""" return meter.create_counter( name=FAAS_INVOCATIONS, description="Number of successful invocations.", unit="{invocation}", ) FAAS_INVOKE_DURATION: Final = "faas.invoke_duration" """ Measures the duration of the function's logic execution Instrument: histogram Unit: s """ def create_faas_invoke_duration(meter: Meter) -> Histogram: """Measures the duration of the function's logic execution""" return meter.create_histogram( name=FAAS_INVOKE_DURATION, description="Measures the duration of the function's logic execution.", unit="s", ) FAAS_MEM_USAGE: Final = "faas.mem_usage" """ Distribution of max memory usage per invocation Instrument: histogram Unit: By """ def create_faas_mem_usage(meter: Meter) -> Histogram: """Distribution of max memory usage per invocation""" return meter.create_histogram( name=FAAS_MEM_USAGE, description="Distribution of max memory usage per invocation.", unit="By", ) FAAS_NET_IO: Final = "faas.net_io" """ Distribution of net I/O usage per invocation Instrument: histogram Unit: By """ def create_faas_net_io(meter: Meter) -> Histogram: """Distribution of net I/O usage per invocation""" return meter.create_histogram( name=FAAS_NET_IO, description="Distribution of net I/O usage per invocation.", unit="By", ) FAAS_TIMEOUTS: Final = "faas.timeouts" """ Number of invocation timeouts Instrument: counter Unit: {timeout} """ def create_faas_timeouts(meter: Meter) -> Counter: """Number of invocation timeouts""" return meter.create_counter( name=FAAS_TIMEOUTS, description="Number of invocation timeouts.", unit="{timeout}", ) gen_ai_metrics.py000066400000000000000000000061331511654350100414140ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/metrics# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Final from opentelemetry.metrics import Histogram, Meter GEN_AI_CLIENT_OPERATION_DURATION: Final = "gen_ai.client.operation.duration" """ GenAI operation duration Instrument: histogram Unit: s """ def create_gen_ai_client_operation_duration(meter: Meter) -> Histogram: """GenAI operation duration""" return meter.create_histogram( name=GEN_AI_CLIENT_OPERATION_DURATION, description="GenAI operation duration.", unit="s", ) GEN_AI_CLIENT_TOKEN_USAGE: Final = "gen_ai.client.token.usage" """ Number of input and output tokens used Instrument: histogram Unit: {token} """ def create_gen_ai_client_token_usage(meter: Meter) -> Histogram: """Number of input and output tokens used""" return meter.create_histogram( name=GEN_AI_CLIENT_TOKEN_USAGE, description="Number of input and output tokens used.", unit="{token}", ) GEN_AI_SERVER_REQUEST_DURATION: Final = "gen_ai.server.request.duration" """ Generative AI server request duration such as time-to-last byte or last output token Instrument: histogram Unit: s """ def create_gen_ai_server_request_duration(meter: Meter) -> Histogram: """Generative AI server request duration such as time-to-last byte or last output token""" return meter.create_histogram( name=GEN_AI_SERVER_REQUEST_DURATION, description="Generative AI server request duration such as time-to-last byte or last output token.", unit="s", ) GEN_AI_SERVER_TIME_PER_OUTPUT_TOKEN: Final = ( "gen_ai.server.time_per_output_token" ) """ Time per output token generated after the first token for successful responses Instrument: histogram Unit: s """ def create_gen_ai_server_time_per_output_token(meter: Meter) -> Histogram: """Time per output token generated after the first token for successful responses""" return meter.create_histogram( name=GEN_AI_SERVER_TIME_PER_OUTPUT_TOKEN, description="Time per output token generated after the first token for successful responses.", unit="s", ) GEN_AI_SERVER_TIME_TO_FIRST_TOKEN: Final = "gen_ai.server.time_to_first_token" """ Time to generate first token for successful responses Instrument: histogram Unit: s """ def create_gen_ai_server_time_to_first_token(meter: Meter) -> Histogram: """Time to generate first token for successful responses""" return meter.create_histogram( name=GEN_AI_SERVER_TIME_TO_FIRST_TOKEN, description="Time to generate first token for successful responses.", unit="s", ) http_metrics.py000066400000000000000000000147711511654350100411600ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/metrics# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Final from opentelemetry.metrics import Histogram, Meter, UpDownCounter HTTP_CLIENT_ACTIVE_REQUESTS: Final = "http.client.active_requests" """ Number of active HTTP requests Instrument: updowncounter Unit: {request} """ def create_http_client_active_requests(meter: Meter) -> UpDownCounter: """Number of active HTTP requests""" return meter.create_up_down_counter( name=HTTP_CLIENT_ACTIVE_REQUESTS, description="Number of active HTTP requests.", unit="{request}", ) HTTP_CLIENT_CONNECTION_DURATION: Final = "http.client.connection.duration" """ The duration of the successfully established outbound HTTP connections Instrument: histogram Unit: s """ def create_http_client_connection_duration(meter: Meter) -> Histogram: """The duration of the successfully established outbound HTTP connections""" return meter.create_histogram( name=HTTP_CLIENT_CONNECTION_DURATION, description="The duration of the successfully established outbound HTTP connections.", unit="s", ) HTTP_CLIENT_OPEN_CONNECTIONS: Final = "http.client.open_connections" """ Number of outbound HTTP connections that are currently active or idle on the client Instrument: updowncounter Unit: {connection} """ def create_http_client_open_connections(meter: Meter) -> UpDownCounter: """Number of outbound HTTP connections that are currently active or idle on the client""" return meter.create_up_down_counter( name=HTTP_CLIENT_OPEN_CONNECTIONS, description="Number of outbound HTTP connections that are currently active or idle on the client.", unit="{connection}", ) HTTP_CLIENT_REQUEST_BODY_SIZE: Final = "http.client.request.body.size" """ Size of HTTP client request bodies Instrument: histogram Unit: By Note: The size of the request payload body in bytes. This is the number of bytes transferred excluding headers and is often, but not always, present as the [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) header. For requests using transport encoding, this should be the compressed size. """ def create_http_client_request_body_size(meter: Meter) -> Histogram: """Size of HTTP client request bodies""" return meter.create_histogram( name=HTTP_CLIENT_REQUEST_BODY_SIZE, description="Size of HTTP client request bodies.", unit="By", ) HTTP_CLIENT_REQUEST_DURATION: Final = "http.client.request.duration" """ Deprecated in favor of stable :py:const:`opentelemetry.semconv.metrics.http_metrics.HTTP_CLIENT_REQUEST_DURATION`. """ def create_http_client_request_duration(meter: Meter) -> Histogram: """Duration of HTTP client requests""" return meter.create_histogram( name=HTTP_CLIENT_REQUEST_DURATION, description="Duration of HTTP client requests.", unit="s", ) HTTP_CLIENT_RESPONSE_BODY_SIZE: Final = "http.client.response.body.size" """ Size of HTTP client response bodies Instrument: histogram Unit: By Note: The size of the response payload body in bytes. This is the number of bytes transferred excluding headers and is often, but not always, present as the [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) header. For requests using transport encoding, this should be the compressed size. """ def create_http_client_response_body_size(meter: Meter) -> Histogram: """Size of HTTP client response bodies""" return meter.create_histogram( name=HTTP_CLIENT_RESPONSE_BODY_SIZE, description="Size of HTTP client response bodies.", unit="By", ) HTTP_SERVER_ACTIVE_REQUESTS: Final = "http.server.active_requests" """ Number of active HTTP server requests Instrument: updowncounter Unit: {request} """ def create_http_server_active_requests(meter: Meter) -> UpDownCounter: """Number of active HTTP server requests""" return meter.create_up_down_counter( name=HTTP_SERVER_ACTIVE_REQUESTS, description="Number of active HTTP server requests.", unit="{request}", ) HTTP_SERVER_REQUEST_BODY_SIZE: Final = "http.server.request.body.size" """ Size of HTTP server request bodies Instrument: histogram Unit: By Note: The size of the request payload body in bytes. This is the number of bytes transferred excluding headers and is often, but not always, present as the [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) header. For requests using transport encoding, this should be the compressed size. """ def create_http_server_request_body_size(meter: Meter) -> Histogram: """Size of HTTP server request bodies""" return meter.create_histogram( name=HTTP_SERVER_REQUEST_BODY_SIZE, description="Size of HTTP server request bodies.", unit="By", ) HTTP_SERVER_REQUEST_DURATION: Final = "http.server.request.duration" """ Deprecated in favor of stable :py:const:`opentelemetry.semconv.metrics.http_metrics.HTTP_SERVER_REQUEST_DURATION`. """ def create_http_server_request_duration(meter: Meter) -> Histogram: """Duration of HTTP server requests""" return meter.create_histogram( name=HTTP_SERVER_REQUEST_DURATION, description="Duration of HTTP server requests.", unit="s", ) HTTP_SERVER_RESPONSE_BODY_SIZE: Final = "http.server.response.body.size" """ Size of HTTP server response bodies Instrument: histogram Unit: By Note: The size of the response payload body in bytes. This is the number of bytes transferred excluding headers and is often, but not always, present as the [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) header. For requests using transport encoding, this should be the compressed size. """ def create_http_server_response_body_size(meter: Meter) -> Histogram: """Size of HTTP server response bodies""" return meter.create_histogram( name=HTTP_SERVER_RESPONSE_BODY_SIZE, description="Size of HTTP server response bodies.", unit="By", ) hw_metrics.py000066400000000000000000000523171511654350100406150ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/metrics# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import ( Callable, Final, Generator, Iterable, Optional, Sequence, Union, ) from opentelemetry.metrics import ( CallbackOptions, Counter, Meter, ObservableGauge, Observation, UpDownCounter, ) # pylint: disable=invalid-name CallbackT = Union[ Callable[[CallbackOptions], Iterable[Observation]], Generator[Iterable[Observation], CallbackOptions, None], ] HW_BATTERY_CHARGE: Final = "hw.battery.charge" """ Remaining fraction of battery charge Instrument: gauge Unit: 1 """ def create_hw_battery_charge( meter: Meter, callbacks: Optional[Sequence[CallbackT]] ) -> ObservableGauge: """Remaining fraction of battery charge""" return meter.create_observable_gauge( name=HW_BATTERY_CHARGE, callbacks=callbacks, description="Remaining fraction of battery charge.", unit="1", ) HW_BATTERY_CHARGE_LIMIT: Final = "hw.battery.charge.limit" """ Lower limit of battery charge fraction to ensure proper operation Instrument: gauge Unit: 1 """ def create_hw_battery_charge_limit( meter: Meter, callbacks: Optional[Sequence[CallbackT]] ) -> ObservableGauge: """Lower limit of battery charge fraction to ensure proper operation""" return meter.create_observable_gauge( name=HW_BATTERY_CHARGE_LIMIT, callbacks=callbacks, description="Lower limit of battery charge fraction to ensure proper operation.", unit="1", ) HW_BATTERY_TIME_LEFT: Final = "hw.battery.time_left" """ Time left before battery is completely charged or discharged Instrument: gauge Unit: s """ def create_hw_battery_time_left( meter: Meter, callbacks: Optional[Sequence[CallbackT]] ) -> ObservableGauge: """Time left before battery is completely charged or discharged""" return meter.create_observable_gauge( name=HW_BATTERY_TIME_LEFT, callbacks=callbacks, description="Time left before battery is completely charged or discharged.", unit="s", ) HW_CPU_SPEED: Final = "hw.cpu.speed" """ CPU current frequency Instrument: gauge Unit: Hz """ def create_hw_cpu_speed( meter: Meter, callbacks: Optional[Sequence[CallbackT]] ) -> ObservableGauge: """CPU current frequency""" return meter.create_observable_gauge( name=HW_CPU_SPEED, callbacks=callbacks, description="CPU current frequency.", unit="Hz", ) HW_CPU_SPEED_LIMIT: Final = "hw.cpu.speed.limit" """ CPU maximum frequency Instrument: gauge Unit: Hz """ def create_hw_cpu_speed_limit( meter: Meter, callbacks: Optional[Sequence[CallbackT]] ) -> ObservableGauge: """CPU maximum frequency""" return meter.create_observable_gauge( name=HW_CPU_SPEED_LIMIT, callbacks=callbacks, description="CPU maximum frequency.", unit="Hz", ) HW_ENERGY: Final = "hw.energy" """ Energy consumed by the component Instrument: counter Unit: J """ def create_hw_energy(meter: Meter) -> Counter: """Energy consumed by the component""" return meter.create_counter( name=HW_ENERGY, description="Energy consumed by the component.", unit="J", ) HW_ERRORS: Final = "hw.errors" """ Number of errors encountered by the component Instrument: counter Unit: {error} """ def create_hw_errors(meter: Meter) -> Counter: """Number of errors encountered by the component""" return meter.create_counter( name=HW_ERRORS, description="Number of errors encountered by the component.", unit="{error}", ) HW_FAN_SPEED: Final = "hw.fan.speed" """ Fan speed in revolutions per minute Instrument: gauge Unit: rpm """ def create_hw_fan_speed( meter: Meter, callbacks: Optional[Sequence[CallbackT]] ) -> ObservableGauge: """Fan speed in revolutions per minute""" return meter.create_observable_gauge( name=HW_FAN_SPEED, callbacks=callbacks, description="Fan speed in revolutions per minute.", unit="rpm", ) HW_FAN_SPEED_LIMIT: Final = "hw.fan.speed.limit" """ Speed limit in rpm Instrument: gauge Unit: rpm """ def create_hw_fan_speed_limit( meter: Meter, callbacks: Optional[Sequence[CallbackT]] ) -> ObservableGauge: """Speed limit in rpm""" return meter.create_observable_gauge( name=HW_FAN_SPEED_LIMIT, callbacks=callbacks, description="Speed limit in rpm.", unit="rpm", ) HW_FAN_SPEED_RATIO: Final = "hw.fan.speed_ratio" """ Fan speed expressed as a fraction of its maximum speed Instrument: gauge Unit: 1 """ def create_hw_fan_speed_ratio( meter: Meter, callbacks: Optional[Sequence[CallbackT]] ) -> ObservableGauge: """Fan speed expressed as a fraction of its maximum speed""" return meter.create_observable_gauge( name=HW_FAN_SPEED_RATIO, callbacks=callbacks, description="Fan speed expressed as a fraction of its maximum speed.", unit="1", ) HW_GPU_IO: Final = "hw.gpu.io" """ Received and transmitted bytes by the GPU Instrument: counter Unit: By """ def create_hw_gpu_io(meter: Meter) -> Counter: """Received and transmitted bytes by the GPU""" return meter.create_counter( name=HW_GPU_IO, description="Received and transmitted bytes by the GPU.", unit="By", ) HW_GPU_MEMORY_LIMIT: Final = "hw.gpu.memory.limit" """ Size of the GPU memory Instrument: updowncounter Unit: By """ def create_hw_gpu_memory_limit(meter: Meter) -> UpDownCounter: """Size of the GPU memory""" return meter.create_up_down_counter( name=HW_GPU_MEMORY_LIMIT, description="Size of the GPU memory.", unit="By", ) HW_GPU_MEMORY_USAGE: Final = "hw.gpu.memory.usage" """ GPU memory used Instrument: updowncounter Unit: By """ def create_hw_gpu_memory_usage(meter: Meter) -> UpDownCounter: """GPU memory used""" return meter.create_up_down_counter( name=HW_GPU_MEMORY_USAGE, description="GPU memory used.", unit="By", ) HW_GPU_MEMORY_UTILIZATION: Final = "hw.gpu.memory.utilization" """ Fraction of GPU memory used Instrument: gauge Unit: 1 """ def create_hw_gpu_memory_utilization( meter: Meter, callbacks: Optional[Sequence[CallbackT]] ) -> ObservableGauge: """Fraction of GPU memory used""" return meter.create_observable_gauge( name=HW_GPU_MEMORY_UTILIZATION, callbacks=callbacks, description="Fraction of GPU memory used.", unit="1", ) HW_GPU_UTILIZATION: Final = "hw.gpu.utilization" """ Fraction of time spent in a specific task Instrument: gauge Unit: 1 """ def create_hw_gpu_utilization( meter: Meter, callbacks: Optional[Sequence[CallbackT]] ) -> ObservableGauge: """Fraction of time spent in a specific task""" return meter.create_observable_gauge( name=HW_GPU_UTILIZATION, callbacks=callbacks, description="Fraction of time spent in a specific task.", unit="1", ) HW_HOST_AMBIENT_TEMPERATURE: Final = "hw.host.ambient_temperature" """ Ambient (external) temperature of the physical host Instrument: gauge Unit: Cel """ def create_hw_host_ambient_temperature( meter: Meter, callbacks: Optional[Sequence[CallbackT]] ) -> ObservableGauge: """Ambient (external) temperature of the physical host""" return meter.create_observable_gauge( name=HW_HOST_AMBIENT_TEMPERATURE, callbacks=callbacks, description="Ambient (external) temperature of the physical host.", unit="Cel", ) HW_HOST_ENERGY: Final = "hw.host.energy" """ Total energy consumed by the entire physical host, in joules Instrument: counter Unit: J Note: The overall energy usage of a host MUST be reported using the specific `hw.host.energy` and `hw.host.power` metrics **only**, instead of the generic `hw.energy` and `hw.power` described in the previous section, to prevent summing up overlapping values. """ def create_hw_host_energy(meter: Meter) -> Counter: """Total energy consumed by the entire physical host, in joules""" return meter.create_counter( name=HW_HOST_ENERGY, description="Total energy consumed by the entire physical host, in joules.", unit="J", ) HW_HOST_HEATING_MARGIN: Final = "hw.host.heating_margin" """ By how many degrees Celsius the temperature of the physical host can be increased, before reaching a warning threshold on one of the internal sensors Instrument: gauge Unit: Cel """ def create_hw_host_heating_margin( meter: Meter, callbacks: Optional[Sequence[CallbackT]] ) -> ObservableGauge: """By how many degrees Celsius the temperature of the physical host can be increased, before reaching a warning threshold on one of the internal sensors""" return meter.create_observable_gauge( name=HW_HOST_HEATING_MARGIN, callbacks=callbacks, description="By how many degrees Celsius the temperature of the physical host can be increased, before reaching a warning threshold on one of the internal sensors.", unit="Cel", ) HW_HOST_POWER: Final = "hw.host.power" """ Instantaneous power consumed by the entire physical host in Watts (`hw.host.energy` is preferred) Instrument: gauge Unit: W Note: The overall energy usage of a host MUST be reported using the specific `hw.host.energy` and `hw.host.power` metrics **only**, instead of the generic `hw.energy` and `hw.power` described in the previous section, to prevent summing up overlapping values. """ def create_hw_host_power( meter: Meter, callbacks: Optional[Sequence[CallbackT]] ) -> ObservableGauge: """Instantaneous power consumed by the entire physical host in Watts (`hw.host.energy` is preferred)""" return meter.create_observable_gauge( name=HW_HOST_POWER, callbacks=callbacks, description="Instantaneous power consumed by the entire physical host in Watts (`hw.host.energy` is preferred).", unit="W", ) HW_LOGICAL_DISK_LIMIT: Final = "hw.logical_disk.limit" """ Size of the logical disk Instrument: updowncounter Unit: By """ def create_hw_logical_disk_limit(meter: Meter) -> UpDownCounter: """Size of the logical disk""" return meter.create_up_down_counter( name=HW_LOGICAL_DISK_LIMIT, description="Size of the logical disk.", unit="By", ) HW_LOGICAL_DISK_USAGE: Final = "hw.logical_disk.usage" """ Logical disk space usage Instrument: updowncounter Unit: By """ def create_hw_logical_disk_usage(meter: Meter) -> UpDownCounter: """Logical disk space usage""" return meter.create_up_down_counter( name=HW_LOGICAL_DISK_USAGE, description="Logical disk space usage.", unit="By", ) HW_LOGICAL_DISK_UTILIZATION: Final = "hw.logical_disk.utilization" """ Logical disk space utilization as a fraction Instrument: gauge Unit: 1 """ def create_hw_logical_disk_utilization( meter: Meter, callbacks: Optional[Sequence[CallbackT]] ) -> ObservableGauge: """Logical disk space utilization as a fraction""" return meter.create_observable_gauge( name=HW_LOGICAL_DISK_UTILIZATION, callbacks=callbacks, description="Logical disk space utilization as a fraction.", unit="1", ) HW_MEMORY_SIZE: Final = "hw.memory.size" """ Size of the memory module Instrument: updowncounter Unit: By """ def create_hw_memory_size(meter: Meter) -> UpDownCounter: """Size of the memory module""" return meter.create_up_down_counter( name=HW_MEMORY_SIZE, description="Size of the memory module.", unit="By", ) HW_NETWORK_BANDWIDTH_LIMIT: Final = "hw.network.bandwidth.limit" """ Link speed Instrument: updowncounter Unit: By/s """ def create_hw_network_bandwidth_limit(meter: Meter) -> UpDownCounter: """Link speed""" return meter.create_up_down_counter( name=HW_NETWORK_BANDWIDTH_LIMIT, description="Link speed.", unit="By/s", ) HW_NETWORK_BANDWIDTH_UTILIZATION: Final = "hw.network.bandwidth.utilization" """ Utilization of the network bandwidth as a fraction Instrument: gauge Unit: 1 """ def create_hw_network_bandwidth_utilization( meter: Meter, callbacks: Optional[Sequence[CallbackT]] ) -> ObservableGauge: """Utilization of the network bandwidth as a fraction""" return meter.create_observable_gauge( name=HW_NETWORK_BANDWIDTH_UTILIZATION, callbacks=callbacks, description="Utilization of the network bandwidth as a fraction.", unit="1", ) HW_NETWORK_IO: Final = "hw.network.io" """ Received and transmitted network traffic in bytes Instrument: counter Unit: By """ def create_hw_network_io(meter: Meter) -> Counter: """Received and transmitted network traffic in bytes""" return meter.create_counter( name=HW_NETWORK_IO, description="Received and transmitted network traffic in bytes.", unit="By", ) HW_NETWORK_PACKETS: Final = "hw.network.packets" """ Received and transmitted network traffic in packets (or frames) Instrument: counter Unit: {packet} """ def create_hw_network_packets(meter: Meter) -> Counter: """Received and transmitted network traffic in packets (or frames)""" return meter.create_counter( name=HW_NETWORK_PACKETS, description="Received and transmitted network traffic in packets (or frames).", unit="{packet}", ) HW_NETWORK_UP: Final = "hw.network.up" """ Link status: `1` (up) or `0` (down) Instrument: updowncounter Unit: 1 """ def create_hw_network_up(meter: Meter) -> UpDownCounter: """Link status: `1` (up) or `0` (down)""" return meter.create_up_down_counter( name=HW_NETWORK_UP, description="Link status: `1` (up) or `0` (down).", unit="1", ) HW_PHYSICAL_DISK_ENDURANCE_UTILIZATION: Final = ( "hw.physical_disk.endurance_utilization" ) """ Endurance remaining for this SSD disk Instrument: gauge Unit: 1 """ def create_hw_physical_disk_endurance_utilization( meter: Meter, callbacks: Optional[Sequence[CallbackT]] ) -> ObservableGauge: """Endurance remaining for this SSD disk""" return meter.create_observable_gauge( name=HW_PHYSICAL_DISK_ENDURANCE_UTILIZATION, callbacks=callbacks, description="Endurance remaining for this SSD disk.", unit="1", ) HW_PHYSICAL_DISK_SIZE: Final = "hw.physical_disk.size" """ Size of the disk Instrument: updowncounter Unit: By """ def create_hw_physical_disk_size(meter: Meter) -> UpDownCounter: """Size of the disk""" return meter.create_up_down_counter( name=HW_PHYSICAL_DISK_SIZE, description="Size of the disk.", unit="By", ) HW_PHYSICAL_DISK_SMART: Final = "hw.physical_disk.smart" """ Value of the corresponding [S.M.A.R.T.](https://wikipedia.org/wiki/S.M.A.R.T.) (Self-Monitoring, Analysis, and Reporting Technology) attribute Instrument: gauge Unit: 1 """ def create_hw_physical_disk_smart( meter: Meter, callbacks: Optional[Sequence[CallbackT]] ) -> ObservableGauge: """Value of the corresponding [S.M.A.R.T.](https://wikipedia.org/wiki/S.M.A.R.T.) (Self-Monitoring, Analysis, and Reporting Technology) attribute""" return meter.create_observable_gauge( name=HW_PHYSICAL_DISK_SMART, callbacks=callbacks, description="Value of the corresponding [S.M.A.R.T.](https://wikipedia.org/wiki/S.M.A.R.T.) (Self-Monitoring, Analysis, and Reporting Technology) attribute.", unit="1", ) HW_POWER: Final = "hw.power" """ Instantaneous power consumed by the component Instrument: gauge Unit: W Note: It is recommended to report `hw.energy` instead of `hw.power` when possible. """ def create_hw_power( meter: Meter, callbacks: Optional[Sequence[CallbackT]] ) -> ObservableGauge: """Instantaneous power consumed by the component""" return meter.create_observable_gauge( name=HW_POWER, callbacks=callbacks, description="Instantaneous power consumed by the component.", unit="W", ) HW_POWER_SUPPLY_LIMIT: Final = "hw.power_supply.limit" """ Maximum power output of the power supply Instrument: updowncounter Unit: W """ def create_hw_power_supply_limit(meter: Meter) -> UpDownCounter: """Maximum power output of the power supply""" return meter.create_up_down_counter( name=HW_POWER_SUPPLY_LIMIT, description="Maximum power output of the power supply.", unit="W", ) HW_POWER_SUPPLY_USAGE: Final = "hw.power_supply.usage" """ Current power output of the power supply Instrument: updowncounter Unit: W """ def create_hw_power_supply_usage(meter: Meter) -> UpDownCounter: """Current power output of the power supply""" return meter.create_up_down_counter( name=HW_POWER_SUPPLY_USAGE, description="Current power output of the power supply.", unit="W", ) HW_POWER_SUPPLY_UTILIZATION: Final = "hw.power_supply.utilization" """ Utilization of the power supply as a fraction of its maximum output Instrument: gauge Unit: 1 """ def create_hw_power_supply_utilization( meter: Meter, callbacks: Optional[Sequence[CallbackT]] ) -> ObservableGauge: """Utilization of the power supply as a fraction of its maximum output""" return meter.create_observable_gauge( name=HW_POWER_SUPPLY_UTILIZATION, callbacks=callbacks, description="Utilization of the power supply as a fraction of its maximum output.", unit="1", ) HW_STATUS: Final = "hw.status" """ Operational status: `1` (true) or `0` (false) for each of the possible states Instrument: updowncounter Unit: 1 Note: `hw.status` is currently specified as an *UpDownCounter* but would ideally be represented using a [*StateSet* as defined in OpenMetrics](https://github.com/prometheus/OpenMetrics/blob/v1.0.0/specification/OpenMetrics.md#stateset). This semantic convention will be updated once *StateSet* is specified in OpenTelemetry. This planned change is not expected to have any consequence on the way users query their timeseries backend to retrieve the values of `hw.status` over time. """ def create_hw_status(meter: Meter) -> UpDownCounter: """Operational status: `1` (true) or `0` (false) for each of the possible states""" return meter.create_up_down_counter( name=HW_STATUS, description="Operational status: `1` (true) or `0` (false) for each of the possible states.", unit="1", ) HW_TAPE_DRIVE_OPERATIONS: Final = "hw.tape_drive.operations" """ Operations performed by the tape drive Instrument: counter Unit: {operation} """ def create_hw_tape_drive_operations(meter: Meter) -> Counter: """Operations performed by the tape drive""" return meter.create_counter( name=HW_TAPE_DRIVE_OPERATIONS, description="Operations performed by the tape drive.", unit="{operation}", ) HW_TEMPERATURE: Final = "hw.temperature" """ Temperature in degrees Celsius Instrument: gauge Unit: Cel """ def create_hw_temperature( meter: Meter, callbacks: Optional[Sequence[CallbackT]] ) -> ObservableGauge: """Temperature in degrees Celsius""" return meter.create_observable_gauge( name=HW_TEMPERATURE, callbacks=callbacks, description="Temperature in degrees Celsius.", unit="Cel", ) HW_TEMPERATURE_LIMIT: Final = "hw.temperature.limit" """ Temperature limit in degrees Celsius Instrument: gauge Unit: Cel """ def create_hw_temperature_limit( meter: Meter, callbacks: Optional[Sequence[CallbackT]] ) -> ObservableGauge: """Temperature limit in degrees Celsius""" return meter.create_observable_gauge( name=HW_TEMPERATURE_LIMIT, callbacks=callbacks, description="Temperature limit in degrees Celsius.", unit="Cel", ) HW_VOLTAGE: Final = "hw.voltage" """ Voltage measured by the sensor Instrument: gauge Unit: V """ def create_hw_voltage( meter: Meter, callbacks: Optional[Sequence[CallbackT]] ) -> ObservableGauge: """Voltage measured by the sensor""" return meter.create_observable_gauge( name=HW_VOLTAGE, callbacks=callbacks, description="Voltage measured by the sensor.", unit="V", ) HW_VOLTAGE_LIMIT: Final = "hw.voltage.limit" """ Voltage limit in Volts Instrument: gauge Unit: V """ def create_hw_voltage_limit( meter: Meter, callbacks: Optional[Sequence[CallbackT]] ) -> ObservableGauge: """Voltage limit in Volts""" return meter.create_observable_gauge( name=HW_VOLTAGE_LIMIT, callbacks=callbacks, description="Voltage limit in Volts.", unit="V", ) HW_VOLTAGE_NOMINAL: Final = "hw.voltage.nominal" """ Nominal (expected) voltage Instrument: gauge Unit: V """ def create_hw_voltage_nominal( meter: Meter, callbacks: Optional[Sequence[CallbackT]] ) -> ObservableGauge: """Nominal (expected) voltage""" return meter.create_observable_gauge( name=HW_VOLTAGE_NOMINAL, callbacks=callbacks, description="Nominal (expected) voltage.", unit="V", ) k8s_metrics.py000066400000000000000000002636111511654350100407050ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/metrics# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import ( Callable, Final, Generator, Iterable, Optional, Sequence, Union, ) from opentelemetry.metrics import ( CallbackOptions, Counter, Meter, ObservableGauge, Observation, UpDownCounter, ) # pylint: disable=invalid-name CallbackT = Union[ Callable[[CallbackOptions], Iterable[Observation]], Generator[Iterable[Observation], CallbackOptions, None], ] K8S_CONTAINER_CPU_LIMIT: Final = "k8s.container.cpu.limit" """ Maximum CPU resource limit set for the container Instrument: updowncounter Unit: {cpu} Note: See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#resourcerequirements-v1-core for details. """ def create_k8s_container_cpu_limit(meter: Meter) -> UpDownCounter: """Maximum CPU resource limit set for the container""" return meter.create_up_down_counter( name=K8S_CONTAINER_CPU_LIMIT, description="Maximum CPU resource limit set for the container.", unit="{cpu}", ) K8S_CONTAINER_CPU_LIMIT_UTILIZATION: Final = ( "k8s.container.cpu.limit_utilization" ) """ The ratio of container CPU usage to its CPU limit Instrument: gauge Unit: 1 Note: The value range is [0.0,1.0]. A value of 1.0 means the container is using 100% of its CPU limit. If the CPU limit is not set, this metric SHOULD NOT be emitted for that container. """ def create_k8s_container_cpu_limit_utilization( meter: Meter, callbacks: Optional[Sequence[CallbackT]] ) -> ObservableGauge: """The ratio of container CPU usage to its CPU limit""" return meter.create_observable_gauge( name=K8S_CONTAINER_CPU_LIMIT_UTILIZATION, callbacks=callbacks, description="The ratio of container CPU usage to its CPU limit.", unit="1", ) K8S_CONTAINER_CPU_REQUEST: Final = "k8s.container.cpu.request" """ CPU resource requested for the container Instrument: updowncounter Unit: {cpu} Note: See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#resourcerequirements-v1-core for details. """ def create_k8s_container_cpu_request(meter: Meter) -> UpDownCounter: """CPU resource requested for the container""" return meter.create_up_down_counter( name=K8S_CONTAINER_CPU_REQUEST, description="CPU resource requested for the container.", unit="{cpu}", ) K8S_CONTAINER_CPU_REQUEST_UTILIZATION: Final = ( "k8s.container.cpu.request_utilization" ) """ The ratio of container CPU usage to its CPU request Instrument: gauge Unit: 1 """ def create_k8s_container_cpu_request_utilization( meter: Meter, callbacks: Optional[Sequence[CallbackT]] ) -> ObservableGauge: """The ratio of container CPU usage to its CPU request""" return meter.create_observable_gauge( name=K8S_CONTAINER_CPU_REQUEST_UTILIZATION, callbacks=callbacks, description="The ratio of container CPU usage to its CPU request.", unit="1", ) K8S_CONTAINER_EPHEMERAL_STORAGE_LIMIT: Final = ( "k8s.container.ephemeral_storage.limit" ) """ Maximum ephemeral storage resource limit set for the container Instrument: updowncounter Unit: By Note: See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#resourcerequirements-v1-core for details. """ def create_k8s_container_ephemeral_storage_limit( meter: Meter, ) -> UpDownCounter: """Maximum ephemeral storage resource limit set for the container""" return meter.create_up_down_counter( name=K8S_CONTAINER_EPHEMERAL_STORAGE_LIMIT, description="Maximum ephemeral storage resource limit set for the container.", unit="By", ) K8S_CONTAINER_EPHEMERAL_STORAGE_REQUEST: Final = ( "k8s.container.ephemeral_storage.request" ) """ Ephemeral storage resource requested for the container Instrument: updowncounter Unit: By Note: See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#resourcerequirements-v1-core for details. """ def create_k8s_container_ephemeral_storage_request( meter: Meter, ) -> UpDownCounter: """Ephemeral storage resource requested for the container""" return meter.create_up_down_counter( name=K8S_CONTAINER_EPHEMERAL_STORAGE_REQUEST, description="Ephemeral storage resource requested for the container.", unit="By", ) K8S_CONTAINER_MEMORY_LIMIT: Final = "k8s.container.memory.limit" """ Maximum memory resource limit set for the container Instrument: updowncounter Unit: By Note: See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#resourcerequirements-v1-core for details. """ def create_k8s_container_memory_limit(meter: Meter) -> UpDownCounter: """Maximum memory resource limit set for the container""" return meter.create_up_down_counter( name=K8S_CONTAINER_MEMORY_LIMIT, description="Maximum memory resource limit set for the container.", unit="By", ) K8S_CONTAINER_MEMORY_REQUEST: Final = "k8s.container.memory.request" """ Memory resource requested for the container Instrument: updowncounter Unit: By Note: See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#resourcerequirements-v1-core for details. """ def create_k8s_container_memory_request(meter: Meter) -> UpDownCounter: """Memory resource requested for the container""" return meter.create_up_down_counter( name=K8S_CONTAINER_MEMORY_REQUEST, description="Memory resource requested for the container.", unit="By", ) K8S_CONTAINER_READY: Final = "k8s.container.ready" """ Indicates whether the container is currently marked as ready to accept traffic, based on its readiness probe (1 = ready, 0 = not ready) Instrument: updowncounter Unit: {container} Note: This metric SHOULD reflect the value of the `ready` field in the [K8s ContainerStatus](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#containerstatus-v1-core). """ def create_k8s_container_ready(meter: Meter) -> UpDownCounter: """Indicates whether the container is currently marked as ready to accept traffic, based on its readiness probe (1 = ready, 0 = not ready)""" return meter.create_up_down_counter( name=K8S_CONTAINER_READY, description="Indicates whether the container is currently marked as ready to accept traffic, based on its readiness probe (1 = ready, 0 = not ready).", unit="{container}", ) K8S_CONTAINER_RESTART_COUNT: Final = "k8s.container.restart.count" """ Describes how many times the container has restarted (since the last counter reset) Instrument: updowncounter Unit: {restart} Note: This value is pulled directly from the K8s API and the value can go indefinitely high and be reset to 0 at any time depending on how your kubelet is configured to prune dead containers. It is best to not depend too much on the exact value but rather look at it as either == 0, in which case you can conclude there were no restarts in the recent past, or > 0, in which case you can conclude there were restarts in the recent past, and not try and analyze the value beyond that. """ def create_k8s_container_restart_count(meter: Meter) -> UpDownCounter: """Describes how many times the container has restarted (since the last counter reset)""" return meter.create_up_down_counter( name=K8S_CONTAINER_RESTART_COUNT, description="Describes how many times the container has restarted (since the last counter reset).", unit="{restart}", ) K8S_CONTAINER_STATUS_REASON: Final = "k8s.container.status.reason" """ Describes the number of K8s containers that are currently in a state for a given reason Instrument: updowncounter Unit: {container} Note: All possible container state reasons will be reported at each time interval to avoid missing metrics. Only the value corresponding to the current state reason will be non-zero. """ def create_k8s_container_status_reason(meter: Meter) -> UpDownCounter: """Describes the number of K8s containers that are currently in a state for a given reason""" return meter.create_up_down_counter( name=K8S_CONTAINER_STATUS_REASON, description="Describes the number of K8s containers that are currently in a state for a given reason.", unit="{container}", ) K8S_CONTAINER_STATUS_STATE: Final = "k8s.container.status.state" """ Describes the number of K8s containers that are currently in a given state Instrument: updowncounter Unit: {container} Note: All possible container states will be reported at each time interval to avoid missing metrics. Only the value corresponding to the current state will be non-zero. """ def create_k8s_container_status_state(meter: Meter) -> UpDownCounter: """Describes the number of K8s containers that are currently in a given state""" return meter.create_up_down_counter( name=K8S_CONTAINER_STATUS_STATE, description="Describes the number of K8s containers that are currently in a given state.", unit="{container}", ) K8S_CONTAINER_STORAGE_LIMIT: Final = "k8s.container.storage.limit" """ Maximum storage resource limit set for the container Instrument: updowncounter Unit: By Note: See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#resourcerequirements-v1-core for details. """ def create_k8s_container_storage_limit(meter: Meter) -> UpDownCounter: """Maximum storage resource limit set for the container""" return meter.create_up_down_counter( name=K8S_CONTAINER_STORAGE_LIMIT, description="Maximum storage resource limit set for the container.", unit="By", ) K8S_CONTAINER_STORAGE_REQUEST: Final = "k8s.container.storage.request" """ Storage resource requested for the container Instrument: updowncounter Unit: By Note: See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#resourcerequirements-v1-core for details. """ def create_k8s_container_storage_request(meter: Meter) -> UpDownCounter: """Storage resource requested for the container""" return meter.create_up_down_counter( name=K8S_CONTAINER_STORAGE_REQUEST, description="Storage resource requested for the container.", unit="By", ) K8S_CRONJOB_ACTIVE_JOBS: Final = "k8s.cronjob.active_jobs" """ Deprecated: Replaced by `k8s.cronjob.job.active`. """ def create_k8s_cronjob_active_jobs(meter: Meter) -> UpDownCounter: """Deprecated, use `k8s.cronjob.job.active` instead""" return meter.create_up_down_counter( name=K8S_CRONJOB_ACTIVE_JOBS, description="Deprecated, use `k8s.cronjob.job.active` instead.", unit="{job}", ) K8S_CRONJOB_JOB_ACTIVE: Final = "k8s.cronjob.job.active" """ The number of actively running jobs for a cronjob Instrument: updowncounter Unit: {job} Note: This metric aligns with the `active` field of the [K8s CronJobStatus](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#cronjobstatus-v1-batch). """ def create_k8s_cronjob_job_active(meter: Meter) -> UpDownCounter: """The number of actively running jobs for a cronjob""" return meter.create_up_down_counter( name=K8S_CRONJOB_JOB_ACTIVE, description="The number of actively running jobs for a cronjob.", unit="{job}", ) K8S_DAEMONSET_CURRENT_SCHEDULED_NODES: Final = ( "k8s.daemonset.current_scheduled_nodes" ) """ Deprecated: Replaced by `k8s.daemonset.node.current_scheduled`. """ def create_k8s_daemonset_current_scheduled_nodes( meter: Meter, ) -> UpDownCounter: """Deprecated, use `k8s.daemonset.node.current_scheduled` instead""" return meter.create_up_down_counter( name=K8S_DAEMONSET_CURRENT_SCHEDULED_NODES, description="Deprecated, use `k8s.daemonset.node.current_scheduled` instead.", unit="{node}", ) K8S_DAEMONSET_DESIRED_SCHEDULED_NODES: Final = ( "k8s.daemonset.desired_scheduled_nodes" ) """ Deprecated: Replaced by `k8s.daemonset.node.desired_scheduled`. """ def create_k8s_daemonset_desired_scheduled_nodes( meter: Meter, ) -> UpDownCounter: """Deprecated, use `k8s.daemonset.node.desired_scheduled` instead""" return meter.create_up_down_counter( name=K8S_DAEMONSET_DESIRED_SCHEDULED_NODES, description="Deprecated, use `k8s.daemonset.node.desired_scheduled` instead.", unit="{node}", ) K8S_DAEMONSET_MISSCHEDULED_NODES: Final = "k8s.daemonset.misscheduled_nodes" """ Deprecated: Replaced by `k8s.daemonset.node.misscheduled`. """ def create_k8s_daemonset_misscheduled_nodes(meter: Meter) -> UpDownCounter: """Deprecated, use `k8s.daemonset.node.misscheduled` instead""" return meter.create_up_down_counter( name=K8S_DAEMONSET_MISSCHEDULED_NODES, description="Deprecated, use `k8s.daemonset.node.misscheduled` instead.", unit="{node}", ) K8S_DAEMONSET_NODE_CURRENT_SCHEDULED: Final = ( "k8s.daemonset.node.current_scheduled" ) """ Number of nodes that are running at least 1 daemon pod and are supposed to run the daemon pod Instrument: updowncounter Unit: {node} Note: This metric aligns with the `currentNumberScheduled` field of the [K8s DaemonSetStatus](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#daemonsetstatus-v1-apps). """ def create_k8s_daemonset_node_current_scheduled(meter: Meter) -> UpDownCounter: """Number of nodes that are running at least 1 daemon pod and are supposed to run the daemon pod""" return meter.create_up_down_counter( name=K8S_DAEMONSET_NODE_CURRENT_SCHEDULED, description="Number of nodes that are running at least 1 daemon pod and are supposed to run the daemon pod.", unit="{node}", ) K8S_DAEMONSET_NODE_DESIRED_SCHEDULED: Final = ( "k8s.daemonset.node.desired_scheduled" ) """ Number of nodes that should be running the daemon pod (including nodes currently running the daemon pod) Instrument: updowncounter Unit: {node} Note: This metric aligns with the `desiredNumberScheduled` field of the [K8s DaemonSetStatus](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#daemonsetstatus-v1-apps). """ def create_k8s_daemonset_node_desired_scheduled(meter: Meter) -> UpDownCounter: """Number of nodes that should be running the daemon pod (including nodes currently running the daemon pod)""" return meter.create_up_down_counter( name=K8S_DAEMONSET_NODE_DESIRED_SCHEDULED, description="Number of nodes that should be running the daemon pod (including nodes currently running the daemon pod).", unit="{node}", ) K8S_DAEMONSET_NODE_MISSCHEDULED: Final = "k8s.daemonset.node.misscheduled" """ Number of nodes that are running the daemon pod, but are not supposed to run the daemon pod Instrument: updowncounter Unit: {node} Note: This metric aligns with the `numberMisscheduled` field of the [K8s DaemonSetStatus](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#daemonsetstatus-v1-apps). """ def create_k8s_daemonset_node_misscheduled(meter: Meter) -> UpDownCounter: """Number of nodes that are running the daemon pod, but are not supposed to run the daemon pod""" return meter.create_up_down_counter( name=K8S_DAEMONSET_NODE_MISSCHEDULED, description="Number of nodes that are running the daemon pod, but are not supposed to run the daemon pod.", unit="{node}", ) K8S_DAEMONSET_NODE_READY: Final = "k8s.daemonset.node.ready" """ Number of nodes that should be running the daemon pod and have one or more of the daemon pod running and ready Instrument: updowncounter Unit: {node} Note: This metric aligns with the `numberReady` field of the [K8s DaemonSetStatus](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#daemonsetstatus-v1-apps). """ def create_k8s_daemonset_node_ready(meter: Meter) -> UpDownCounter: """Number of nodes that should be running the daemon pod and have one or more of the daemon pod running and ready""" return meter.create_up_down_counter( name=K8S_DAEMONSET_NODE_READY, description="Number of nodes that should be running the daemon pod and have one or more of the daemon pod running and ready.", unit="{node}", ) K8S_DAEMONSET_READY_NODES: Final = "k8s.daemonset.ready_nodes" """ Deprecated: Replaced by `k8s.daemonset.node.ready`. """ def create_k8s_daemonset_ready_nodes(meter: Meter) -> UpDownCounter: """Deprecated, use `k8s.daemonset.node.ready` instead""" return meter.create_up_down_counter( name=K8S_DAEMONSET_READY_NODES, description="Deprecated, use `k8s.daemonset.node.ready` instead.", unit="{node}", ) K8S_DEPLOYMENT_AVAILABLE_PODS: Final = "k8s.deployment.available_pods" """ Deprecated: Replaced by `k8s.deployment.pod.available`. """ def create_k8s_deployment_available_pods(meter: Meter) -> UpDownCounter: """Deprecated, use `k8s.deployment.pod.available` instead""" return meter.create_up_down_counter( name=K8S_DEPLOYMENT_AVAILABLE_PODS, description="Deprecated, use `k8s.deployment.pod.available` instead.", unit="{pod}", ) K8S_DEPLOYMENT_DESIRED_PODS: Final = "k8s.deployment.desired_pods" """ Deprecated: Replaced by `k8s.deployment.pod.desired`. """ def create_k8s_deployment_desired_pods(meter: Meter) -> UpDownCounter: """Deprecated, use `k8s.deployment.pod.desired` instead""" return meter.create_up_down_counter( name=K8S_DEPLOYMENT_DESIRED_PODS, description="Deprecated, use `k8s.deployment.pod.desired` instead.", unit="{pod}", ) K8S_DEPLOYMENT_POD_AVAILABLE: Final = "k8s.deployment.pod.available" """ Total number of available replica pods (ready for at least minReadySeconds) targeted by this deployment Instrument: updowncounter Unit: {pod} Note: This metric aligns with the `availableReplicas` field of the [K8s DeploymentStatus](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#deploymentstatus-v1-apps). """ def create_k8s_deployment_pod_available(meter: Meter) -> UpDownCounter: """Total number of available replica pods (ready for at least minReadySeconds) targeted by this deployment""" return meter.create_up_down_counter( name=K8S_DEPLOYMENT_POD_AVAILABLE, description="Total number of available replica pods (ready for at least minReadySeconds) targeted by this deployment.", unit="{pod}", ) K8S_DEPLOYMENT_POD_DESIRED: Final = "k8s.deployment.pod.desired" """ Number of desired replica pods in this deployment Instrument: updowncounter Unit: {pod} Note: This metric aligns with the `replicas` field of the [K8s DeploymentSpec](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#deploymentspec-v1-apps). """ def create_k8s_deployment_pod_desired(meter: Meter) -> UpDownCounter: """Number of desired replica pods in this deployment""" return meter.create_up_down_counter( name=K8S_DEPLOYMENT_POD_DESIRED, description="Number of desired replica pods in this deployment.", unit="{pod}", ) K8S_HPA_CURRENT_PODS: Final = "k8s.hpa.current_pods" """ Deprecated: Replaced by `k8s.hpa.pod.current`. """ def create_k8s_hpa_current_pods(meter: Meter) -> UpDownCounter: """Deprecated, use `k8s.hpa.pod.current` instead""" return meter.create_up_down_counter( name=K8S_HPA_CURRENT_PODS, description="Deprecated, use `k8s.hpa.pod.current` instead.", unit="{pod}", ) K8S_HPA_DESIRED_PODS: Final = "k8s.hpa.desired_pods" """ Deprecated: Replaced by `k8s.hpa.pod.desired`. """ def create_k8s_hpa_desired_pods(meter: Meter) -> UpDownCounter: """Deprecated, use `k8s.hpa.pod.desired` instead""" return meter.create_up_down_counter( name=K8S_HPA_DESIRED_PODS, description="Deprecated, use `k8s.hpa.pod.desired` instead.", unit="{pod}", ) K8S_HPA_MAX_PODS: Final = "k8s.hpa.max_pods" """ Deprecated: Replaced by `k8s.hpa.pod.max`. """ def create_k8s_hpa_max_pods(meter: Meter) -> UpDownCounter: """Deprecated, use `k8s.hpa.pod.max` instead""" return meter.create_up_down_counter( name=K8S_HPA_MAX_PODS, description="Deprecated, use `k8s.hpa.pod.max` instead.", unit="{pod}", ) K8S_HPA_METRIC_TARGET_CPU_AVERAGE_UTILIZATION: Final = ( "k8s.hpa.metric.target.cpu.average_utilization" ) """ Target average utilization, in percentage, for CPU resource in HPA config Instrument: gauge Unit: 1 Note: This metric aligns with the `averageUtilization` field of the [K8s HPA MetricTarget](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#metrictarget-v2-autoscaling). If the type of the metric is [`ContainerResource`](https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/#support-for-metrics-apis), the `k8s.container.name` attribute MUST be set to identify the specific container within the pod to which the metric applies. """ def create_k8s_hpa_metric_target_cpu_average_utilization( meter: Meter, callbacks: Optional[Sequence[CallbackT]] ) -> ObservableGauge: """Target average utilization, in percentage, for CPU resource in HPA config""" return meter.create_observable_gauge( name=K8S_HPA_METRIC_TARGET_CPU_AVERAGE_UTILIZATION, callbacks=callbacks, description="Target average utilization, in percentage, for CPU resource in HPA config.", unit="1", ) K8S_HPA_METRIC_TARGET_CPU_AVERAGE_VALUE: Final = ( "k8s.hpa.metric.target.cpu.average_value" ) """ Target average value for CPU resource in HPA config Instrument: gauge Unit: {cpu} Note: This metric aligns with the `averageValue` field of the [K8s HPA MetricTarget](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#metrictarget-v2-autoscaling). If the type of the metric is [`ContainerResource`](https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/#support-for-metrics-apis), the `k8s.container.name` attribute MUST be set to identify the specific container within the pod to which the metric applies. """ def create_k8s_hpa_metric_target_cpu_average_value( meter: Meter, callbacks: Optional[Sequence[CallbackT]] ) -> ObservableGauge: """Target average value for CPU resource in HPA config""" return meter.create_observable_gauge( name=K8S_HPA_METRIC_TARGET_CPU_AVERAGE_VALUE, callbacks=callbacks, description="Target average value for CPU resource in HPA config.", unit="{cpu}", ) K8S_HPA_METRIC_TARGET_CPU_VALUE: Final = "k8s.hpa.metric.target.cpu.value" """ Target value for CPU resource in HPA config Instrument: gauge Unit: {cpu} Note: This metric aligns with the `value` field of the [K8s HPA MetricTarget](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#metrictarget-v2-autoscaling). If the type of the metric is [`ContainerResource`](https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/#support-for-metrics-apis), the `k8s.container.name` attribute MUST be set to identify the specific container within the pod to which the metric applies. """ def create_k8s_hpa_metric_target_cpu_value( meter: Meter, callbacks: Optional[Sequence[CallbackT]] ) -> ObservableGauge: """Target value for CPU resource in HPA config""" return meter.create_observable_gauge( name=K8S_HPA_METRIC_TARGET_CPU_VALUE, callbacks=callbacks, description="Target value for CPU resource in HPA config.", unit="{cpu}", ) K8S_HPA_MIN_PODS: Final = "k8s.hpa.min_pods" """ Deprecated: Replaced by `k8s.hpa.pod.min`. """ def create_k8s_hpa_min_pods(meter: Meter) -> UpDownCounter: """Deprecated, use `k8s.hpa.pod.min` instead""" return meter.create_up_down_counter( name=K8S_HPA_MIN_PODS, description="Deprecated, use `k8s.hpa.pod.min` instead.", unit="{pod}", ) K8S_HPA_POD_CURRENT: Final = "k8s.hpa.pod.current" """ Current number of replica pods managed by this horizontal pod autoscaler, as last seen by the autoscaler Instrument: updowncounter Unit: {pod} Note: This metric aligns with the `currentReplicas` field of the [K8s HorizontalPodAutoscalerStatus](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#horizontalpodautoscalerstatus-v2-autoscaling). """ def create_k8s_hpa_pod_current(meter: Meter) -> UpDownCounter: """Current number of replica pods managed by this horizontal pod autoscaler, as last seen by the autoscaler""" return meter.create_up_down_counter( name=K8S_HPA_POD_CURRENT, description="Current number of replica pods managed by this horizontal pod autoscaler, as last seen by the autoscaler.", unit="{pod}", ) K8S_HPA_POD_DESIRED: Final = "k8s.hpa.pod.desired" """ Desired number of replica pods managed by this horizontal pod autoscaler, as last calculated by the autoscaler Instrument: updowncounter Unit: {pod} Note: This metric aligns with the `desiredReplicas` field of the [K8s HorizontalPodAutoscalerStatus](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#horizontalpodautoscalerstatus-v2-autoscaling). """ def create_k8s_hpa_pod_desired(meter: Meter) -> UpDownCounter: """Desired number of replica pods managed by this horizontal pod autoscaler, as last calculated by the autoscaler""" return meter.create_up_down_counter( name=K8S_HPA_POD_DESIRED, description="Desired number of replica pods managed by this horizontal pod autoscaler, as last calculated by the autoscaler.", unit="{pod}", ) K8S_HPA_POD_MAX: Final = "k8s.hpa.pod.max" """ The upper limit for the number of replica pods to which the autoscaler can scale up Instrument: updowncounter Unit: {pod} Note: This metric aligns with the `maxReplicas` field of the [K8s HorizontalPodAutoscalerSpec](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#horizontalpodautoscalerspec-v2-autoscaling). """ def create_k8s_hpa_pod_max(meter: Meter) -> UpDownCounter: """The upper limit for the number of replica pods to which the autoscaler can scale up""" return meter.create_up_down_counter( name=K8S_HPA_POD_MAX, description="The upper limit for the number of replica pods to which the autoscaler can scale up.", unit="{pod}", ) K8S_HPA_POD_MIN: Final = "k8s.hpa.pod.min" """ The lower limit for the number of replica pods to which the autoscaler can scale down Instrument: updowncounter Unit: {pod} Note: This metric aligns with the `minReplicas` field of the [K8s HorizontalPodAutoscalerSpec](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#horizontalpodautoscalerspec-v2-autoscaling). """ def create_k8s_hpa_pod_min(meter: Meter) -> UpDownCounter: """The lower limit for the number of replica pods to which the autoscaler can scale down""" return meter.create_up_down_counter( name=K8S_HPA_POD_MIN, description="The lower limit for the number of replica pods to which the autoscaler can scale down.", unit="{pod}", ) K8S_JOB_ACTIVE_PODS: Final = "k8s.job.active_pods" """ Deprecated: Replaced by `k8s.job.pod.active`. """ def create_k8s_job_active_pods(meter: Meter) -> UpDownCounter: """Deprecated, use `k8s.job.pod.active` instead""" return meter.create_up_down_counter( name=K8S_JOB_ACTIVE_PODS, description="Deprecated, use `k8s.job.pod.active` instead.", unit="{pod}", ) K8S_JOB_DESIRED_SUCCESSFUL_PODS: Final = "k8s.job.desired_successful_pods" """ Deprecated: Replaced by `k8s.job.pod.desired_successful`. """ def create_k8s_job_desired_successful_pods(meter: Meter) -> UpDownCounter: """Deprecated, use `k8s.job.pod.desired_successful` instead""" return meter.create_up_down_counter( name=K8S_JOB_DESIRED_SUCCESSFUL_PODS, description="Deprecated, use `k8s.job.pod.desired_successful` instead.", unit="{pod}", ) K8S_JOB_FAILED_PODS: Final = "k8s.job.failed_pods" """ Deprecated: Replaced by `k8s.job.pod.failed`. """ def create_k8s_job_failed_pods(meter: Meter) -> UpDownCounter: """Deprecated, use `k8s.job.pod.failed` instead""" return meter.create_up_down_counter( name=K8S_JOB_FAILED_PODS, description="Deprecated, use `k8s.job.pod.failed` instead.", unit="{pod}", ) K8S_JOB_MAX_PARALLEL_PODS: Final = "k8s.job.max_parallel_pods" """ Deprecated: Replaced by `k8s.job.pod.max_parallel`. """ def create_k8s_job_max_parallel_pods(meter: Meter) -> UpDownCounter: """Deprecated, use `k8s.job.pod.max_parallel` instead""" return meter.create_up_down_counter( name=K8S_JOB_MAX_PARALLEL_PODS, description="Deprecated, use `k8s.job.pod.max_parallel` instead.", unit="{pod}", ) K8S_JOB_POD_ACTIVE: Final = "k8s.job.pod.active" """ The number of pending and actively running pods for a job Instrument: updowncounter Unit: {pod} Note: This metric aligns with the `active` field of the [K8s JobStatus](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#jobstatus-v1-batch). """ def create_k8s_job_pod_active(meter: Meter) -> UpDownCounter: """The number of pending and actively running pods for a job""" return meter.create_up_down_counter( name=K8S_JOB_POD_ACTIVE, description="The number of pending and actively running pods for a job.", unit="{pod}", ) K8S_JOB_POD_DESIRED_SUCCESSFUL: Final = "k8s.job.pod.desired_successful" """ The desired number of successfully finished pods the job should be run with Instrument: updowncounter Unit: {pod} Note: This metric aligns with the `completions` field of the [K8s JobSpec](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#jobspec-v1-batch). """ def create_k8s_job_pod_desired_successful(meter: Meter) -> UpDownCounter: """The desired number of successfully finished pods the job should be run with""" return meter.create_up_down_counter( name=K8S_JOB_POD_DESIRED_SUCCESSFUL, description="The desired number of successfully finished pods the job should be run with.", unit="{pod}", ) K8S_JOB_POD_FAILED: Final = "k8s.job.pod.failed" """ The number of pods which reached phase Failed for a job Instrument: updowncounter Unit: {pod} Note: This metric aligns with the `failed` field of the [K8s JobStatus](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#jobstatus-v1-batch). """ def create_k8s_job_pod_failed(meter: Meter) -> UpDownCounter: """The number of pods which reached phase Failed for a job""" return meter.create_up_down_counter( name=K8S_JOB_POD_FAILED, description="The number of pods which reached phase Failed for a job.", unit="{pod}", ) K8S_JOB_POD_MAX_PARALLEL: Final = "k8s.job.pod.max_parallel" """ The max desired number of pods the job should run at any given time Instrument: updowncounter Unit: {pod} Note: This metric aligns with the `parallelism` field of the [K8s JobSpec](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#jobspec-v1-batch). """ def create_k8s_job_pod_max_parallel(meter: Meter) -> UpDownCounter: """The max desired number of pods the job should run at any given time""" return meter.create_up_down_counter( name=K8S_JOB_POD_MAX_PARALLEL, description="The max desired number of pods the job should run at any given time.", unit="{pod}", ) K8S_JOB_POD_SUCCESSFUL: Final = "k8s.job.pod.successful" """ The number of pods which reached phase Succeeded for a job Instrument: updowncounter Unit: {pod} Note: This metric aligns with the `succeeded` field of the [K8s JobStatus](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#jobstatus-v1-batch). """ def create_k8s_job_pod_successful(meter: Meter) -> UpDownCounter: """The number of pods which reached phase Succeeded for a job""" return meter.create_up_down_counter( name=K8S_JOB_POD_SUCCESSFUL, description="The number of pods which reached phase Succeeded for a job.", unit="{pod}", ) K8S_JOB_SUCCESSFUL_PODS: Final = "k8s.job.successful_pods" """ Deprecated: Replaced by `k8s.job.pod.successful`. """ def create_k8s_job_successful_pods(meter: Meter) -> UpDownCounter: """Deprecated, use `k8s.job.pod.successful` instead""" return meter.create_up_down_counter( name=K8S_JOB_SUCCESSFUL_PODS, description="Deprecated, use `k8s.job.pod.successful` instead.", unit="{pod}", ) K8S_NAMESPACE_PHASE: Final = "k8s.namespace.phase" """ Describes number of K8s namespaces that are currently in a given phase Instrument: updowncounter Unit: {namespace} """ def create_k8s_namespace_phase(meter: Meter) -> UpDownCounter: """Describes number of K8s namespaces that are currently in a given phase""" return meter.create_up_down_counter( name=K8S_NAMESPACE_PHASE, description="Describes number of K8s namespaces that are currently in a given phase.", unit="{namespace}", ) K8S_NODE_ALLOCATABLE_CPU: Final = "k8s.node.allocatable.cpu" """ Deprecated: Replaced by `k8s.node.cpu.allocatable`. """ def create_k8s_node_allocatable_cpu(meter: Meter) -> UpDownCounter: """Deprecated, use `k8s.node.cpu.allocatable` instead""" return meter.create_up_down_counter( name=K8S_NODE_ALLOCATABLE_CPU, description="Deprecated, use `k8s.node.cpu.allocatable` instead.", unit="{cpu}", ) K8S_NODE_ALLOCATABLE_EPHEMERAL_STORAGE: Final = ( "k8s.node.allocatable.ephemeral_storage" ) """ Deprecated: Replaced by `k8s.node.ephemeral_storage.allocatable`. """ def create_k8s_node_allocatable_ephemeral_storage( meter: Meter, ) -> UpDownCounter: """Deprecated, use `k8s.node.ephemeral_storage.allocatable` instead""" return meter.create_up_down_counter( name=K8S_NODE_ALLOCATABLE_EPHEMERAL_STORAGE, description="Deprecated, use `k8s.node.ephemeral_storage.allocatable` instead.", unit="By", ) K8S_NODE_ALLOCATABLE_MEMORY: Final = "k8s.node.allocatable.memory" """ Deprecated: Replaced by `k8s.node.memory.allocatable`. """ def create_k8s_node_allocatable_memory(meter: Meter) -> UpDownCounter: """Deprecated, use `k8s.node.memory.allocatable` instead""" return meter.create_up_down_counter( name=K8S_NODE_ALLOCATABLE_MEMORY, description="Deprecated, use `k8s.node.memory.allocatable` instead.", unit="By", ) K8S_NODE_ALLOCATABLE_PODS: Final = "k8s.node.allocatable.pods" """ Deprecated: Replaced by `k8s.node.pod.allocatable`. """ def create_k8s_node_allocatable_pods(meter: Meter) -> UpDownCounter: """Deprecated, use `k8s.node.pod.allocatable` instead""" return meter.create_up_down_counter( name=K8S_NODE_ALLOCATABLE_PODS, description="Deprecated, use `k8s.node.pod.allocatable` instead.", unit="{pod}", ) K8S_NODE_CONDITION_STATUS: Final = "k8s.node.condition.status" """ Describes the condition of a particular Node Instrument: updowncounter Unit: {node} Note: All possible node condition pairs (type and status) will be reported at each time interval to avoid missing metrics. Condition pairs corresponding to the current conditions' statuses will be non-zero. """ def create_k8s_node_condition_status(meter: Meter) -> UpDownCounter: """Describes the condition of a particular Node""" return meter.create_up_down_counter( name=K8S_NODE_CONDITION_STATUS, description="Describes the condition of a particular Node.", unit="{node}", ) K8S_NODE_CPU_ALLOCATABLE: Final = "k8s.node.cpu.allocatable" """ Amount of cpu allocatable on the node Instrument: updowncounter Unit: {cpu} """ def create_k8s_node_cpu_allocatable(meter: Meter) -> UpDownCounter: """Amount of cpu allocatable on the node""" return meter.create_up_down_counter( name=K8S_NODE_CPU_ALLOCATABLE, description="Amount of cpu allocatable on the node.", unit="{cpu}", ) K8S_NODE_CPU_TIME: Final = "k8s.node.cpu.time" """ Total CPU time consumed Instrument: counter Unit: s Note: Total CPU time consumed by the specific Node on all available CPU cores. """ def create_k8s_node_cpu_time(meter: Meter) -> Counter: """Total CPU time consumed""" return meter.create_counter( name=K8S_NODE_CPU_TIME, description="Total CPU time consumed.", unit="s", ) K8S_NODE_CPU_USAGE: Final = "k8s.node.cpu.usage" """ Node's CPU usage, measured in cpus. Range from 0 to the number of allocatable CPUs Instrument: gauge Unit: {cpu} Note: CPU usage of the specific Node on all available CPU cores, averaged over the sample window. """ def create_k8s_node_cpu_usage( meter: Meter, callbacks: Optional[Sequence[CallbackT]] ) -> ObservableGauge: """Node's CPU usage, measured in cpus. Range from 0 to the number of allocatable CPUs""" return meter.create_observable_gauge( name=K8S_NODE_CPU_USAGE, callbacks=callbacks, description="Node's CPU usage, measured in cpus. Range from 0 to the number of allocatable CPUs.", unit="{cpu}", ) K8S_NODE_EPHEMERAL_STORAGE_ALLOCATABLE: Final = ( "k8s.node.ephemeral_storage.allocatable" ) """ Amount of ephemeral-storage allocatable on the node Instrument: updowncounter Unit: By """ def create_k8s_node_ephemeral_storage_allocatable( meter: Meter, ) -> UpDownCounter: """Amount of ephemeral-storage allocatable on the node""" return meter.create_up_down_counter( name=K8S_NODE_EPHEMERAL_STORAGE_ALLOCATABLE, description="Amount of ephemeral-storage allocatable on the node.", unit="By", ) K8S_NODE_FILESYSTEM_AVAILABLE: Final = "k8s.node.filesystem.available" """ Node filesystem available bytes Instrument: updowncounter Unit: By Note: This metric is derived from the [FsStats.AvailableBytes](https://pkg.go.dev/k8s.io/kubelet@v0.33.0/pkg/apis/stats/v1alpha1#FsStats) field of the [NodeStats.Fs](https://pkg.go.dev/k8s.io/kubelet@v0.33.0/pkg/apis/stats/v1alpha1#NodeStats) of the Kubelet's stats API. """ def create_k8s_node_filesystem_available(meter: Meter) -> UpDownCounter: """Node filesystem available bytes""" return meter.create_up_down_counter( name=K8S_NODE_FILESYSTEM_AVAILABLE, description="Node filesystem available bytes.", unit="By", ) K8S_NODE_FILESYSTEM_CAPACITY: Final = "k8s.node.filesystem.capacity" """ Node filesystem capacity Instrument: updowncounter Unit: By Note: This metric is derived from the [FsStats.CapacityBytes](https://pkg.go.dev/k8s.io/kubelet@v0.33.0/pkg/apis/stats/v1alpha1#FsStats) field of the [NodeStats.Fs](https://pkg.go.dev/k8s.io/kubelet@v0.33.0/pkg/apis/stats/v1alpha1#NodeStats) of the Kubelet's stats API. """ def create_k8s_node_filesystem_capacity(meter: Meter) -> UpDownCounter: """Node filesystem capacity""" return meter.create_up_down_counter( name=K8S_NODE_FILESYSTEM_CAPACITY, description="Node filesystem capacity.", unit="By", ) K8S_NODE_FILESYSTEM_USAGE: Final = "k8s.node.filesystem.usage" """ Node filesystem usage Instrument: updowncounter Unit: By Note: This may not equal capacity - available. This metric is derived from the [FsStats.UsedBytes](https://pkg.go.dev/k8s.io/kubelet@v0.33.0/pkg/apis/stats/v1alpha1#FsStats) field of the [NodeStats.Fs](https://pkg.go.dev/k8s.io/kubelet@v0.33.0/pkg/apis/stats/v1alpha1#NodeStats) of the Kubelet's stats API. """ def create_k8s_node_filesystem_usage(meter: Meter) -> UpDownCounter: """Node filesystem usage""" return meter.create_up_down_counter( name=K8S_NODE_FILESYSTEM_USAGE, description="Node filesystem usage.", unit="By", ) K8S_NODE_MEMORY_ALLOCATABLE: Final = "k8s.node.memory.allocatable" """ Amount of memory allocatable on the node Instrument: updowncounter Unit: By """ def create_k8s_node_memory_allocatable(meter: Meter) -> UpDownCounter: """Amount of memory allocatable on the node""" return meter.create_up_down_counter( name=K8S_NODE_MEMORY_ALLOCATABLE, description="Amount of memory allocatable on the node.", unit="By", ) K8S_NODE_MEMORY_AVAILABLE: Final = "k8s.node.memory.available" """ Node memory available Instrument: updowncounter Unit: By Note: Available memory for use. This is defined as the memory limit - workingSetBytes. If memory limit is undefined, the available bytes is omitted. This metric is derived from the [MemoryStats.AvailableBytes](https://pkg.go.dev/k8s.io/kubelet@v0.34.0/pkg/apis/stats/v1alpha1#MemoryStats) field of the [NodeStats.Memory](https://pkg.go.dev/k8s.io/kubelet@v0.34.0/pkg/apis/stats/v1alpha1#NodeStats) of the Kubelet's stats API. """ def create_k8s_node_memory_available(meter: Meter) -> UpDownCounter: """Node memory available""" return meter.create_up_down_counter( name=K8S_NODE_MEMORY_AVAILABLE, description="Node memory available.", unit="By", ) K8S_NODE_MEMORY_PAGING_FAULTS: Final = "k8s.node.memory.paging.faults" """ Node memory paging faults Instrument: counter Unit: {fault} Note: Cumulative number of major/minor page faults. This metric is derived from the [MemoryStats.PageFaults](https://pkg.go.dev/k8s.io/kubelet@v0.34.0/pkg/apis/stats/v1alpha1#MemoryStats) and [MemoryStats.MajorPageFaults](https://pkg.go.dev/k8s.io/kubelet@v0.34.0/pkg/apis/stats/v1alpha1#MemoryStats) fields of the [NodeStats.Memory](https://pkg.go.dev/k8s.io/kubelet@v0.34.0/pkg/apis/stats/v1alpha1#NodeStats) of the Kubelet's stats API. """ def create_k8s_node_memory_paging_faults(meter: Meter) -> Counter: """Node memory paging faults""" return meter.create_counter( name=K8S_NODE_MEMORY_PAGING_FAULTS, description="Node memory paging faults.", unit="{fault}", ) K8S_NODE_MEMORY_RSS: Final = "k8s.node.memory.rss" """ Node memory RSS Instrument: updowncounter Unit: By Note: The amount of anonymous and swap cache memory (includes transparent hugepages). This metric is derived from the [MemoryStats.RSSBytes](https://pkg.go.dev/k8s.io/kubelet@v0.34.0/pkg/apis/stats/v1alpha1#MemoryStats) field of the [NodeStats.Memory](https://pkg.go.dev/k8s.io/kubelet@v0.34.0/pkg/apis/stats/v1alpha1#NodeStats) of the Kubelet's stats API. """ def create_k8s_node_memory_rss(meter: Meter) -> UpDownCounter: """Node memory RSS""" return meter.create_up_down_counter( name=K8S_NODE_MEMORY_RSS, description="Node memory RSS.", unit="By", ) K8S_NODE_MEMORY_USAGE: Final = "k8s.node.memory.usage" """ Memory usage of the Node Instrument: gauge Unit: By Note: Total memory usage of the Node. """ def create_k8s_node_memory_usage( meter: Meter, callbacks: Optional[Sequence[CallbackT]] ) -> ObservableGauge: """Memory usage of the Node""" return meter.create_observable_gauge( name=K8S_NODE_MEMORY_USAGE, callbacks=callbacks, description="Memory usage of the Node.", unit="By", ) K8S_NODE_MEMORY_WORKING_SET: Final = "k8s.node.memory.working_set" """ Node memory working set Instrument: updowncounter Unit: By Note: The amount of working set memory. This includes recently accessed memory, dirty memory, and kernel memory. WorkingSetBytes is <= UsageBytes. This metric is derived from the [MemoryStats.WorkingSetBytes](https://pkg.go.dev/k8s.io/kubelet@v0.34.0/pkg/apis/stats/v1alpha1#MemoryStats) field of the [NodeStats.Memory](https://pkg.go.dev/k8s.io/kubelet@v0.34.0/pkg/apis/stats/v1alpha1#NodeStats) of the Kubelet's stats API. """ def create_k8s_node_memory_working_set(meter: Meter) -> UpDownCounter: """Node memory working set""" return meter.create_up_down_counter( name=K8S_NODE_MEMORY_WORKING_SET, description="Node memory working set.", unit="By", ) K8S_NODE_NETWORK_ERRORS: Final = "k8s.node.network.errors" """ Node network errors Instrument: counter Unit: {error} """ def create_k8s_node_network_errors(meter: Meter) -> Counter: """Node network errors""" return meter.create_counter( name=K8S_NODE_NETWORK_ERRORS, description="Node network errors.", unit="{error}", ) K8S_NODE_NETWORK_IO: Final = "k8s.node.network.io" """ Network bytes for the Node Instrument: counter Unit: By """ def create_k8s_node_network_io(meter: Meter) -> Counter: """Network bytes for the Node""" return meter.create_counter( name=K8S_NODE_NETWORK_IO, description="Network bytes for the Node.", unit="By", ) K8S_NODE_POD_ALLOCATABLE: Final = "k8s.node.pod.allocatable" """ Amount of pods allocatable on the node Instrument: updowncounter Unit: {pod} """ def create_k8s_node_pod_allocatable(meter: Meter) -> UpDownCounter: """Amount of pods allocatable on the node""" return meter.create_up_down_counter( name=K8S_NODE_POD_ALLOCATABLE, description="Amount of pods allocatable on the node.", unit="{pod}", ) K8S_NODE_UPTIME: Final = "k8s.node.uptime" """ The time the Node has been running Instrument: gauge Unit: s Note: Instrumentations SHOULD use a gauge with type `double` and measure uptime in seconds as a floating point number with the highest precision available. The actual accuracy would depend on the instrumentation and operating system. """ def create_k8s_node_uptime( meter: Meter, callbacks: Optional[Sequence[CallbackT]] ) -> ObservableGauge: """The time the Node has been running""" return meter.create_observable_gauge( name=K8S_NODE_UPTIME, callbacks=callbacks, description="The time the Node has been running.", unit="s", ) K8S_POD_CPU_TIME: Final = "k8s.pod.cpu.time" """ Total CPU time consumed Instrument: counter Unit: s Note: Total CPU time consumed by the specific Pod on all available CPU cores. """ def create_k8s_pod_cpu_time(meter: Meter) -> Counter: """Total CPU time consumed""" return meter.create_counter( name=K8S_POD_CPU_TIME, description="Total CPU time consumed.", unit="s", ) K8S_POD_CPU_USAGE: Final = "k8s.pod.cpu.usage" """ Pod's CPU usage, measured in cpus. Range from 0 to the number of allocatable CPUs Instrument: gauge Unit: {cpu} Note: CPU usage of the specific Pod on all available CPU cores, averaged over the sample window. """ def create_k8s_pod_cpu_usage( meter: Meter, callbacks: Optional[Sequence[CallbackT]] ) -> ObservableGauge: """Pod's CPU usage, measured in cpus. Range from 0 to the number of allocatable CPUs""" return meter.create_observable_gauge( name=K8S_POD_CPU_USAGE, callbacks=callbacks, description="Pod's CPU usage, measured in cpus. Range from 0 to the number of allocatable CPUs.", unit="{cpu}", ) K8S_POD_FILESYSTEM_AVAILABLE: Final = "k8s.pod.filesystem.available" """ Pod filesystem available bytes Instrument: updowncounter Unit: By Note: This metric is derived from the [FsStats.AvailableBytes](https://pkg.go.dev/k8s.io/kubelet@v0.33.0/pkg/apis/stats/v1alpha1#FsStats) field of the [PodStats.EphemeralStorage](https://pkg.go.dev/k8s.io/kubelet@v0.33.0/pkg/apis/stats/v1alpha1#PodStats) of the Kubelet's stats API. """ def create_k8s_pod_filesystem_available(meter: Meter) -> UpDownCounter: """Pod filesystem available bytes""" return meter.create_up_down_counter( name=K8S_POD_FILESYSTEM_AVAILABLE, description="Pod filesystem available bytes.", unit="By", ) K8S_POD_FILESYSTEM_CAPACITY: Final = "k8s.pod.filesystem.capacity" """ Pod filesystem capacity Instrument: updowncounter Unit: By Note: This metric is derived from the [FsStats.CapacityBytes](https://pkg.go.dev/k8s.io/kubelet@v0.33.0/pkg/apis/stats/v1alpha1#FsStats) field of the [PodStats.EphemeralStorage](https://pkg.go.dev/k8s.io/kubelet@v0.33.0/pkg/apis/stats/v1alpha1#PodStats) of the Kubelet's stats API. """ def create_k8s_pod_filesystem_capacity(meter: Meter) -> UpDownCounter: """Pod filesystem capacity""" return meter.create_up_down_counter( name=K8S_POD_FILESYSTEM_CAPACITY, description="Pod filesystem capacity.", unit="By", ) K8S_POD_FILESYSTEM_USAGE: Final = "k8s.pod.filesystem.usage" """ Pod filesystem usage Instrument: updowncounter Unit: By Note: This may not equal capacity - available. This metric is derived from the [FsStats.UsedBytes](https://pkg.go.dev/k8s.io/kubelet@v0.33.0/pkg/apis/stats/v1alpha1#FsStats) field of the [PodStats.EphemeralStorage](https://pkg.go.dev/k8s.io/kubelet@v0.33.0/pkg/apis/stats/v1alpha1#PodStats) of the Kubelet's stats API. """ def create_k8s_pod_filesystem_usage(meter: Meter) -> UpDownCounter: """Pod filesystem usage""" return meter.create_up_down_counter( name=K8S_POD_FILESYSTEM_USAGE, description="Pod filesystem usage.", unit="By", ) K8S_POD_MEMORY_AVAILABLE: Final = "k8s.pod.memory.available" """ Pod memory available Instrument: updowncounter Unit: By Note: Available memory for use. This is defined as the memory limit - workingSetBytes. If memory limit is undefined, the available bytes is omitted. This metric is derived from the [MemoryStats.AvailableBytes](https://pkg.go.dev/k8s.io/kubelet@v0.34.0/pkg/apis/stats/v1alpha1#MemoryStats) field of the [PodStats.Memory](https://pkg.go.dev/k8s.io/kubelet@v0.34.0/pkg/apis/stats/v1alpha1#PodStats) of the Kubelet's stats API. """ def create_k8s_pod_memory_available(meter: Meter) -> UpDownCounter: """Pod memory available""" return meter.create_up_down_counter( name=K8S_POD_MEMORY_AVAILABLE, description="Pod memory available.", unit="By", ) K8S_POD_MEMORY_PAGING_FAULTS: Final = "k8s.pod.memory.paging.faults" """ Pod memory paging faults Instrument: counter Unit: {fault} Note: Cumulative number of major/minor page faults. This metric is derived from the [MemoryStats.PageFaults](https://pkg.go.dev/k8s.io/kubelet@v0.34.0/pkg/apis/stats/v1alpha1#MemoryStats) and [MemoryStats.MajorPageFaults](https://pkg.go.dev/k8s.io/kubelet@v0.34.0/pkg/apis/stats/v1alpha1#MemoryStats) field of the [PodStats.Memory](https://pkg.go.dev/k8s.io/kubelet@v0.34.0/pkg/apis/stats/v1alpha1#PodStats) of the Kubelet's stats API. """ def create_k8s_pod_memory_paging_faults(meter: Meter) -> Counter: """Pod memory paging faults""" return meter.create_counter( name=K8S_POD_MEMORY_PAGING_FAULTS, description="Pod memory paging faults.", unit="{fault}", ) K8S_POD_MEMORY_RSS: Final = "k8s.pod.memory.rss" """ Pod memory RSS Instrument: updowncounter Unit: By Note: The amount of anonymous and swap cache memory (includes transparent hugepages). This metric is derived from the [MemoryStats.RSSBytes](https://pkg.go.dev/k8s.io/kubelet@v0.34.0/pkg/apis/stats/v1alpha1#MemoryStats) field of the [PodStats.Memory](https://pkg.go.dev/k8s.io/kubelet@v0.34.0/pkg/apis/stats/v1alpha1#PodStats) of the Kubelet's stats API. """ def create_k8s_pod_memory_rss(meter: Meter) -> UpDownCounter: """Pod memory RSS""" return meter.create_up_down_counter( name=K8S_POD_MEMORY_RSS, description="Pod memory RSS.", unit="By", ) K8S_POD_MEMORY_USAGE: Final = "k8s.pod.memory.usage" """ Memory usage of the Pod Instrument: gauge Unit: By Note: Total memory usage of the Pod. """ def create_k8s_pod_memory_usage( meter: Meter, callbacks: Optional[Sequence[CallbackT]] ) -> ObservableGauge: """Memory usage of the Pod""" return meter.create_observable_gauge( name=K8S_POD_MEMORY_USAGE, callbacks=callbacks, description="Memory usage of the Pod.", unit="By", ) K8S_POD_MEMORY_WORKING_SET: Final = "k8s.pod.memory.working_set" """ Pod memory working set Instrument: updowncounter Unit: By Note: The amount of working set memory. This includes recently accessed memory, dirty memory, and kernel memory. WorkingSetBytes is <= UsageBytes. This metric is derived from the [MemoryStats.WorkingSetBytes](https://pkg.go.dev/k8s.io/kubelet@v0.34.0/pkg/apis/stats/v1alpha1#MemoryStats) field of the [PodStats.Memory](https://pkg.go.dev/k8s.io/kubelet@v0.34.0/pkg/apis/stats/v1alpha1#PodStats) of the Kubelet's stats API. """ def create_k8s_pod_memory_working_set(meter: Meter) -> UpDownCounter: """Pod memory working set""" return meter.create_up_down_counter( name=K8S_POD_MEMORY_WORKING_SET, description="Pod memory working set.", unit="By", ) K8S_POD_NETWORK_ERRORS: Final = "k8s.pod.network.errors" """ Pod network errors Instrument: counter Unit: {error} """ def create_k8s_pod_network_errors(meter: Meter) -> Counter: """Pod network errors""" return meter.create_counter( name=K8S_POD_NETWORK_ERRORS, description="Pod network errors.", unit="{error}", ) K8S_POD_NETWORK_IO: Final = "k8s.pod.network.io" """ Network bytes for the Pod Instrument: counter Unit: By """ def create_k8s_pod_network_io(meter: Meter) -> Counter: """Network bytes for the Pod""" return meter.create_counter( name=K8S_POD_NETWORK_IO, description="Network bytes for the Pod.", unit="By", ) K8S_POD_STATUS_PHASE: Final = "k8s.pod.status.phase" """ Describes number of K8s Pods that are currently in a given phase Instrument: updowncounter Unit: {pod} Note: All possible pod phases will be reported at each time interval to avoid missing metrics. Only the value corresponding to the current phase will be non-zero. """ def create_k8s_pod_status_phase(meter: Meter) -> UpDownCounter: """Describes number of K8s Pods that are currently in a given phase""" return meter.create_up_down_counter( name=K8S_POD_STATUS_PHASE, description="Describes number of K8s Pods that are currently in a given phase.", unit="{pod}", ) K8S_POD_STATUS_REASON: Final = "k8s.pod.status.reason" """ Describes the number of K8s Pods that are currently in a state for a given reason Instrument: updowncounter Unit: {pod} Note: All possible pod status reasons will be reported at each time interval to avoid missing metrics. Only the value corresponding to the current reason will be non-zero. """ def create_k8s_pod_status_reason(meter: Meter) -> UpDownCounter: """Describes the number of K8s Pods that are currently in a state for a given reason""" return meter.create_up_down_counter( name=K8S_POD_STATUS_REASON, description="Describes the number of K8s Pods that are currently in a state for a given reason.", unit="{pod}", ) K8S_POD_UPTIME: Final = "k8s.pod.uptime" """ The time the Pod has been running Instrument: gauge Unit: s Note: Instrumentations SHOULD use a gauge with type `double` and measure uptime in seconds as a floating point number with the highest precision available. The actual accuracy would depend on the instrumentation and operating system. """ def create_k8s_pod_uptime( meter: Meter, callbacks: Optional[Sequence[CallbackT]] ) -> ObservableGauge: """The time the Pod has been running""" return meter.create_observable_gauge( name=K8S_POD_UPTIME, callbacks=callbacks, description="The time the Pod has been running.", unit="s", ) K8S_POD_VOLUME_AVAILABLE: Final = "k8s.pod.volume.available" """ Pod volume storage space available Instrument: updowncounter Unit: By Note: This metric is derived from the [VolumeStats.AvailableBytes](https://pkg.go.dev/k8s.io/kubelet@v0.33.0/pkg/apis/stats/v1alpha1#VolumeStats) field of the [PodStats](https://pkg.go.dev/k8s.io/kubelet@v0.33.0/pkg/apis/stats/v1alpha1#PodStats) of the Kubelet's stats API. """ def create_k8s_pod_volume_available(meter: Meter) -> UpDownCounter: """Pod volume storage space available""" return meter.create_up_down_counter( name=K8S_POD_VOLUME_AVAILABLE, description="Pod volume storage space available.", unit="By", ) K8S_POD_VOLUME_CAPACITY: Final = "k8s.pod.volume.capacity" """ Pod volume total capacity Instrument: updowncounter Unit: By Note: This metric is derived from the [VolumeStats.CapacityBytes](https://pkg.go.dev/k8s.io/kubelet@v0.33.0/pkg/apis/stats/v1alpha1#VolumeStats) field of the [PodStats](https://pkg.go.dev/k8s.io/kubelet@v0.33.0/pkg/apis/stats/v1alpha1#PodStats) of the Kubelet's stats API. """ def create_k8s_pod_volume_capacity(meter: Meter) -> UpDownCounter: """Pod volume total capacity""" return meter.create_up_down_counter( name=K8S_POD_VOLUME_CAPACITY, description="Pod volume total capacity.", unit="By", ) K8S_POD_VOLUME_INODE_COUNT: Final = "k8s.pod.volume.inode.count" """ The total inodes in the filesystem of the Pod's volume Instrument: updowncounter Unit: {inode} Note: This metric is derived from the [VolumeStats.Inodes](https://pkg.go.dev/k8s.io/kubelet@v0.33.0/pkg/apis/stats/v1alpha1#VolumeStats) field of the [PodStats](https://pkg.go.dev/k8s.io/kubelet@v0.33.0/pkg/apis/stats/v1alpha1#PodStats) of the Kubelet's stats API. """ def create_k8s_pod_volume_inode_count(meter: Meter) -> UpDownCounter: """The total inodes in the filesystem of the Pod's volume""" return meter.create_up_down_counter( name=K8S_POD_VOLUME_INODE_COUNT, description="The total inodes in the filesystem of the Pod's volume.", unit="{inode}", ) K8S_POD_VOLUME_INODE_FREE: Final = "k8s.pod.volume.inode.free" """ The free inodes in the filesystem of the Pod's volume Instrument: updowncounter Unit: {inode} Note: This metric is derived from the [VolumeStats.InodesFree](https://pkg.go.dev/k8s.io/kubelet@v0.33.0/pkg/apis/stats/v1alpha1#VolumeStats) field of the [PodStats](https://pkg.go.dev/k8s.io/kubelet@v0.33.0/pkg/apis/stats/v1alpha1#PodStats) of the Kubelet's stats API. """ def create_k8s_pod_volume_inode_free(meter: Meter) -> UpDownCounter: """The free inodes in the filesystem of the Pod's volume""" return meter.create_up_down_counter( name=K8S_POD_VOLUME_INODE_FREE, description="The free inodes in the filesystem of the Pod's volume.", unit="{inode}", ) K8S_POD_VOLUME_INODE_USED: Final = "k8s.pod.volume.inode.used" """ The inodes used by the filesystem of the Pod's volume Instrument: updowncounter Unit: {inode} Note: This metric is derived from the [VolumeStats.InodesUsed](https://pkg.go.dev/k8s.io/kubelet@v0.33.0/pkg/apis/stats/v1alpha1#VolumeStats) field of the [PodStats](https://pkg.go.dev/k8s.io/kubelet@v0.33.0/pkg/apis/stats/v1alpha1#PodStats) of the Kubelet's stats API. This may not be equal to `inodes - free` because filesystem may share inodes with other filesystems. """ def create_k8s_pod_volume_inode_used(meter: Meter) -> UpDownCounter: """The inodes used by the filesystem of the Pod's volume""" return meter.create_up_down_counter( name=K8S_POD_VOLUME_INODE_USED, description="The inodes used by the filesystem of the Pod's volume.", unit="{inode}", ) K8S_POD_VOLUME_USAGE: Final = "k8s.pod.volume.usage" """ Pod volume usage Instrument: updowncounter Unit: By Note: This may not equal capacity - available. This metric is derived from the [VolumeStats.UsedBytes](https://pkg.go.dev/k8s.io/kubelet@v0.33.0/pkg/apis/stats/v1alpha1#VolumeStats) field of the [PodStats](https://pkg.go.dev/k8s.io/kubelet@v0.33.0/pkg/apis/stats/v1alpha1#PodStats) of the Kubelet's stats API. """ def create_k8s_pod_volume_usage(meter: Meter) -> UpDownCounter: """Pod volume usage""" return meter.create_up_down_counter( name=K8S_POD_VOLUME_USAGE, description="Pod volume usage.", unit="By", ) K8S_REPLICASET_AVAILABLE_PODS: Final = "k8s.replicaset.available_pods" """ Deprecated: Replaced by `k8s.replicaset.pod.available`. """ def create_k8s_replicaset_available_pods(meter: Meter) -> UpDownCounter: """Deprecated, use `k8s.replicaset.pod.available` instead""" return meter.create_up_down_counter( name=K8S_REPLICASET_AVAILABLE_PODS, description="Deprecated, use `k8s.replicaset.pod.available` instead.", unit="{pod}", ) K8S_REPLICASET_DESIRED_PODS: Final = "k8s.replicaset.desired_pods" """ Deprecated: Replaced by `k8s.replicaset.pod.desired`. """ def create_k8s_replicaset_desired_pods(meter: Meter) -> UpDownCounter: """Deprecated, use `k8s.replicaset.pod.desired` instead""" return meter.create_up_down_counter( name=K8S_REPLICASET_DESIRED_PODS, description="Deprecated, use `k8s.replicaset.pod.desired` instead.", unit="{pod}", ) K8S_REPLICASET_POD_AVAILABLE: Final = "k8s.replicaset.pod.available" """ Total number of available replica pods (ready for at least minReadySeconds) targeted by this replicaset Instrument: updowncounter Unit: {pod} Note: This metric aligns with the `availableReplicas` field of the [K8s ReplicaSetStatus](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#replicasetstatus-v1-apps). """ def create_k8s_replicaset_pod_available(meter: Meter) -> UpDownCounter: """Total number of available replica pods (ready for at least minReadySeconds) targeted by this replicaset""" return meter.create_up_down_counter( name=K8S_REPLICASET_POD_AVAILABLE, description="Total number of available replica pods (ready for at least minReadySeconds) targeted by this replicaset.", unit="{pod}", ) K8S_REPLICASET_POD_DESIRED: Final = "k8s.replicaset.pod.desired" """ Number of desired replica pods in this replicaset Instrument: updowncounter Unit: {pod} Note: This metric aligns with the `replicas` field of the [K8s ReplicaSetSpec](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#replicasetspec-v1-apps). """ def create_k8s_replicaset_pod_desired(meter: Meter) -> UpDownCounter: """Number of desired replica pods in this replicaset""" return meter.create_up_down_counter( name=K8S_REPLICASET_POD_DESIRED, description="Number of desired replica pods in this replicaset.", unit="{pod}", ) K8S_REPLICATION_CONTROLLER_AVAILABLE_PODS: Final = ( "k8s.replication_controller.available_pods" ) """ Deprecated: Replaced by `k8s.replicationcontroller.pod.available`. """ def create_k8s_replication_controller_available_pods( meter: Meter, ) -> UpDownCounter: """Deprecated, use `k8s.replicationcontroller.pod.available` instead""" return meter.create_up_down_counter( name=K8S_REPLICATION_CONTROLLER_AVAILABLE_PODS, description="Deprecated, use `k8s.replicationcontroller.pod.available` instead.", unit="{pod}", ) K8S_REPLICATION_CONTROLLER_DESIRED_PODS: Final = ( "k8s.replication_controller.desired_pods" ) """ Deprecated: Replaced by `k8s.replicationcontroller.pod.desired`. """ def create_k8s_replication_controller_desired_pods( meter: Meter, ) -> UpDownCounter: """Deprecated, use `k8s.replicationcontroller.pod.desired` instead""" return meter.create_up_down_counter( name=K8S_REPLICATION_CONTROLLER_DESIRED_PODS, description="Deprecated, use `k8s.replicationcontroller.pod.desired` instead.", unit="{pod}", ) K8S_REPLICATIONCONTROLLER_AVAILABLE_PODS: Final = ( "k8s.replicationcontroller.available_pods" ) """ Deprecated: Replaced by `k8s.replicationcontroller.pod.available`. """ def create_k8s_replicationcontroller_available_pods( meter: Meter, ) -> UpDownCounter: """Deprecated, use `k8s.replicationcontroller.pod.available` instead""" return meter.create_up_down_counter( name=K8S_REPLICATIONCONTROLLER_AVAILABLE_PODS, description="Deprecated, use `k8s.replicationcontroller.pod.available` instead.", unit="{pod}", ) K8S_REPLICATIONCONTROLLER_DESIRED_PODS: Final = ( "k8s.replicationcontroller.desired_pods" ) """ Deprecated: Replaced by `k8s.replicationcontroller.pod.desired`. """ def create_k8s_replicationcontroller_desired_pods( meter: Meter, ) -> UpDownCounter: """Deprecated, use `k8s.replicationcontroller.pod.desired` instead""" return meter.create_up_down_counter( name=K8S_REPLICATIONCONTROLLER_DESIRED_PODS, description="Deprecated, use `k8s.replicationcontroller.pod.desired` instead.", unit="{pod}", ) K8S_REPLICATIONCONTROLLER_POD_AVAILABLE: Final = ( "k8s.replicationcontroller.pod.available" ) """ Total number of available replica pods (ready for at least minReadySeconds) targeted by this replication controller Instrument: updowncounter Unit: {pod} Note: This metric aligns with the `availableReplicas` field of the [K8s ReplicationControllerStatus](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#replicationcontrollerstatus-v1-core). """ def create_k8s_replicationcontroller_pod_available( meter: Meter, ) -> UpDownCounter: """Total number of available replica pods (ready for at least minReadySeconds) targeted by this replication controller""" return meter.create_up_down_counter( name=K8S_REPLICATIONCONTROLLER_POD_AVAILABLE, description="Total number of available replica pods (ready for at least minReadySeconds) targeted by this replication controller.", unit="{pod}", ) K8S_REPLICATIONCONTROLLER_POD_DESIRED: Final = ( "k8s.replicationcontroller.pod.desired" ) """ Number of desired replica pods in this replication controller Instrument: updowncounter Unit: {pod} Note: This metric aligns with the `replicas` field of the [K8s ReplicationControllerSpec](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#replicationcontrollerspec-v1-core). """ def create_k8s_replicationcontroller_pod_desired( meter: Meter, ) -> UpDownCounter: """Number of desired replica pods in this replication controller""" return meter.create_up_down_counter( name=K8S_REPLICATIONCONTROLLER_POD_DESIRED, description="Number of desired replica pods in this replication controller.", unit="{pod}", ) K8S_RESOURCEQUOTA_CPU_LIMIT_HARD: Final = "k8s.resourcequota.cpu.limit.hard" """ The CPU limits in a specific namespace. The value represents the configured quota limit of the resource in the namespace Instrument: updowncounter Unit: {cpu} Note: This metric is retrieved from the `hard` field of the [K8s ResourceQuotaStatus](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#resourcequotastatus-v1-core). """ def create_k8s_resourcequota_cpu_limit_hard(meter: Meter) -> UpDownCounter: """The CPU limits in a specific namespace. The value represents the configured quota limit of the resource in the namespace""" return meter.create_up_down_counter( name=K8S_RESOURCEQUOTA_CPU_LIMIT_HARD, description="The CPU limits in a specific namespace. The value represents the configured quota limit of the resource in the namespace.", unit="{cpu}", ) K8S_RESOURCEQUOTA_CPU_LIMIT_USED: Final = "k8s.resourcequota.cpu.limit.used" """ The CPU limits in a specific namespace. The value represents the current observed total usage of the resource in the namespace Instrument: updowncounter Unit: {cpu} Note: This metric is retrieved from the `used` field of the [K8s ResourceQuotaStatus](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#resourcequotastatus-v1-core). """ def create_k8s_resourcequota_cpu_limit_used(meter: Meter) -> UpDownCounter: """The CPU limits in a specific namespace. The value represents the current observed total usage of the resource in the namespace""" return meter.create_up_down_counter( name=K8S_RESOURCEQUOTA_CPU_LIMIT_USED, description="The CPU limits in a specific namespace. The value represents the current observed total usage of the resource in the namespace.", unit="{cpu}", ) K8S_RESOURCEQUOTA_CPU_REQUEST_HARD: Final = ( "k8s.resourcequota.cpu.request.hard" ) """ The CPU requests in a specific namespace. The value represents the configured quota limit of the resource in the namespace Instrument: updowncounter Unit: {cpu} Note: This metric is retrieved from the `hard` field of the [K8s ResourceQuotaStatus](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#resourcequotastatus-v1-core). """ def create_k8s_resourcequota_cpu_request_hard(meter: Meter) -> UpDownCounter: """The CPU requests in a specific namespace. The value represents the configured quota limit of the resource in the namespace""" return meter.create_up_down_counter( name=K8S_RESOURCEQUOTA_CPU_REQUEST_HARD, description="The CPU requests in a specific namespace. The value represents the configured quota limit of the resource in the namespace.", unit="{cpu}", ) K8S_RESOURCEQUOTA_CPU_REQUEST_USED: Final = ( "k8s.resourcequota.cpu.request.used" ) """ The CPU requests in a specific namespace. The value represents the current observed total usage of the resource in the namespace Instrument: updowncounter Unit: {cpu} Note: This metric is retrieved from the `used` field of the [K8s ResourceQuotaStatus](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#resourcequotastatus-v1-core). """ def create_k8s_resourcequota_cpu_request_used(meter: Meter) -> UpDownCounter: """The CPU requests in a specific namespace. The value represents the current observed total usage of the resource in the namespace""" return meter.create_up_down_counter( name=K8S_RESOURCEQUOTA_CPU_REQUEST_USED, description="The CPU requests in a specific namespace. The value represents the current observed total usage of the resource in the namespace.", unit="{cpu}", ) K8S_RESOURCEQUOTA_EPHEMERAL_STORAGE_LIMIT_HARD: Final = ( "k8s.resourcequota.ephemeral_storage.limit.hard" ) """ The sum of local ephemeral storage limits in the namespace. The value represents the configured quota limit of the resource in the namespace Instrument: updowncounter Unit: By Note: This metric is retrieved from the `hard` field of the [K8s ResourceQuotaStatus](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#resourcequotastatus-v1-core). """ def create_k8s_resourcequota_ephemeral_storage_limit_hard( meter: Meter, ) -> UpDownCounter: """The sum of local ephemeral storage limits in the namespace. The value represents the configured quota limit of the resource in the namespace""" return meter.create_up_down_counter( name=K8S_RESOURCEQUOTA_EPHEMERAL_STORAGE_LIMIT_HARD, description="The sum of local ephemeral storage limits in the namespace. The value represents the configured quota limit of the resource in the namespace.", unit="By", ) K8S_RESOURCEQUOTA_EPHEMERAL_STORAGE_LIMIT_USED: Final = ( "k8s.resourcequota.ephemeral_storage.limit.used" ) """ The sum of local ephemeral storage limits in the namespace. The value represents the current observed total usage of the resource in the namespace Instrument: updowncounter Unit: By Note: This metric is retrieved from the `used` field of the [K8s ResourceQuotaStatus](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#resourcequotastatus-v1-core). """ def create_k8s_resourcequota_ephemeral_storage_limit_used( meter: Meter, ) -> UpDownCounter: """The sum of local ephemeral storage limits in the namespace. The value represents the current observed total usage of the resource in the namespace""" return meter.create_up_down_counter( name=K8S_RESOURCEQUOTA_EPHEMERAL_STORAGE_LIMIT_USED, description="The sum of local ephemeral storage limits in the namespace. The value represents the current observed total usage of the resource in the namespace.", unit="By", ) K8S_RESOURCEQUOTA_EPHEMERAL_STORAGE_REQUEST_HARD: Final = ( "k8s.resourcequota.ephemeral_storage.request.hard" ) """ The sum of local ephemeral storage requests in the namespace. The value represents the configured quota limit of the resource in the namespace Instrument: updowncounter Unit: By Note: This metric is retrieved from the `hard` field of the [K8s ResourceQuotaStatus](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#resourcequotastatus-v1-core). """ def create_k8s_resourcequota_ephemeral_storage_request_hard( meter: Meter, ) -> UpDownCounter: """The sum of local ephemeral storage requests in the namespace. The value represents the configured quota limit of the resource in the namespace""" return meter.create_up_down_counter( name=K8S_RESOURCEQUOTA_EPHEMERAL_STORAGE_REQUEST_HARD, description="The sum of local ephemeral storage requests in the namespace. The value represents the configured quota limit of the resource in the namespace.", unit="By", ) K8S_RESOURCEQUOTA_EPHEMERAL_STORAGE_REQUEST_USED: Final = ( "k8s.resourcequota.ephemeral_storage.request.used" ) """ The sum of local ephemeral storage requests in the namespace. The value represents the current observed total usage of the resource in the namespace Instrument: updowncounter Unit: By Note: This metric is retrieved from the `used` field of the [K8s ResourceQuotaStatus](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#resourcequotastatus-v1-core). """ def create_k8s_resourcequota_ephemeral_storage_request_used( meter: Meter, ) -> UpDownCounter: """The sum of local ephemeral storage requests in the namespace. The value represents the current observed total usage of the resource in the namespace""" return meter.create_up_down_counter( name=K8S_RESOURCEQUOTA_EPHEMERAL_STORAGE_REQUEST_USED, description="The sum of local ephemeral storage requests in the namespace. The value represents the current observed total usage of the resource in the namespace.", unit="By", ) K8S_RESOURCEQUOTA_HUGEPAGE_COUNT_REQUEST_HARD: Final = ( "k8s.resourcequota.hugepage_count.request.hard" ) """ The huge page requests in a specific namespace. The value represents the configured quota limit of the resource in the namespace Instrument: updowncounter Unit: {hugepage} Note: This metric is retrieved from the `hard` field of the [K8s ResourceQuotaStatus](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#resourcequotastatus-v1-core). """ def create_k8s_resourcequota_hugepage_count_request_hard( meter: Meter, ) -> UpDownCounter: """The huge page requests in a specific namespace. The value represents the configured quota limit of the resource in the namespace""" return meter.create_up_down_counter( name=K8S_RESOURCEQUOTA_HUGEPAGE_COUNT_REQUEST_HARD, description="The huge page requests in a specific namespace. The value represents the configured quota limit of the resource in the namespace.", unit="{hugepage}", ) K8S_RESOURCEQUOTA_HUGEPAGE_COUNT_REQUEST_USED: Final = ( "k8s.resourcequota.hugepage_count.request.used" ) """ The huge page requests in a specific namespace. The value represents the current observed total usage of the resource in the namespace Instrument: updowncounter Unit: {hugepage} Note: This metric is retrieved from the `used` field of the [K8s ResourceQuotaStatus](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#resourcequotastatus-v1-core). """ def create_k8s_resourcequota_hugepage_count_request_used( meter: Meter, ) -> UpDownCounter: """The huge page requests in a specific namespace. The value represents the current observed total usage of the resource in the namespace""" return meter.create_up_down_counter( name=K8S_RESOURCEQUOTA_HUGEPAGE_COUNT_REQUEST_USED, description="The huge page requests in a specific namespace. The value represents the current observed total usage of the resource in the namespace.", unit="{hugepage}", ) K8S_RESOURCEQUOTA_MEMORY_LIMIT_HARD: Final = ( "k8s.resourcequota.memory.limit.hard" ) """ The memory limits in a specific namespace. The value represents the configured quota limit of the resource in the namespace Instrument: updowncounter Unit: By Note: This metric is retrieved from the `hard` field of the [K8s ResourceQuotaStatus](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#resourcequotastatus-v1-core). """ def create_k8s_resourcequota_memory_limit_hard(meter: Meter) -> UpDownCounter: """The memory limits in a specific namespace. The value represents the configured quota limit of the resource in the namespace""" return meter.create_up_down_counter( name=K8S_RESOURCEQUOTA_MEMORY_LIMIT_HARD, description="The memory limits in a specific namespace. The value represents the configured quota limit of the resource in the namespace.", unit="By", ) K8S_RESOURCEQUOTA_MEMORY_LIMIT_USED: Final = ( "k8s.resourcequota.memory.limit.used" ) """ The memory limits in a specific namespace. The value represents the current observed total usage of the resource in the namespace Instrument: updowncounter Unit: By Note: This metric is retrieved from the `used` field of the [K8s ResourceQuotaStatus](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#resourcequotastatus-v1-core). """ def create_k8s_resourcequota_memory_limit_used(meter: Meter) -> UpDownCounter: """The memory limits in a specific namespace. The value represents the current observed total usage of the resource in the namespace""" return meter.create_up_down_counter( name=K8S_RESOURCEQUOTA_MEMORY_LIMIT_USED, description="The memory limits in a specific namespace. The value represents the current observed total usage of the resource in the namespace.", unit="By", ) K8S_RESOURCEQUOTA_MEMORY_REQUEST_HARD: Final = ( "k8s.resourcequota.memory.request.hard" ) """ The memory requests in a specific namespace. The value represents the configured quota limit of the resource in the namespace Instrument: updowncounter Unit: By Note: This metric is retrieved from the `hard` field of the [K8s ResourceQuotaStatus](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#resourcequotastatus-v1-core). """ def create_k8s_resourcequota_memory_request_hard( meter: Meter, ) -> UpDownCounter: """The memory requests in a specific namespace. The value represents the configured quota limit of the resource in the namespace""" return meter.create_up_down_counter( name=K8S_RESOURCEQUOTA_MEMORY_REQUEST_HARD, description="The memory requests in a specific namespace. The value represents the configured quota limit of the resource in the namespace.", unit="By", ) K8S_RESOURCEQUOTA_MEMORY_REQUEST_USED: Final = ( "k8s.resourcequota.memory.request.used" ) """ The memory requests in a specific namespace. The value represents the current observed total usage of the resource in the namespace Instrument: updowncounter Unit: By Note: This metric is retrieved from the `used` field of the [K8s ResourceQuotaStatus](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#resourcequotastatus-v1-core). """ def create_k8s_resourcequota_memory_request_used( meter: Meter, ) -> UpDownCounter: """The memory requests in a specific namespace. The value represents the current observed total usage of the resource in the namespace""" return meter.create_up_down_counter( name=K8S_RESOURCEQUOTA_MEMORY_REQUEST_USED, description="The memory requests in a specific namespace. The value represents the current observed total usage of the resource in the namespace.", unit="By", ) K8S_RESOURCEQUOTA_OBJECT_COUNT_HARD: Final = ( "k8s.resourcequota.object_count.hard" ) """ The object count limits in a specific namespace. The value represents the configured quota limit of the resource in the namespace Instrument: updowncounter Unit: {object} Note: This metric is retrieved from the `hard` field of the [K8s ResourceQuotaStatus](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#resourcequotastatus-v1-core). """ def create_k8s_resourcequota_object_count_hard(meter: Meter) -> UpDownCounter: """The object count limits in a specific namespace. The value represents the configured quota limit of the resource in the namespace""" return meter.create_up_down_counter( name=K8S_RESOURCEQUOTA_OBJECT_COUNT_HARD, description="The object count limits in a specific namespace. The value represents the configured quota limit of the resource in the namespace.", unit="{object}", ) K8S_RESOURCEQUOTA_OBJECT_COUNT_USED: Final = ( "k8s.resourcequota.object_count.used" ) """ The object count limits in a specific namespace. The value represents the current observed total usage of the resource in the namespace Instrument: updowncounter Unit: {object} Note: This metric is retrieved from the `used` field of the [K8s ResourceQuotaStatus](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#resourcequotastatus-v1-core). """ def create_k8s_resourcequota_object_count_used(meter: Meter) -> UpDownCounter: """The object count limits in a specific namespace. The value represents the current observed total usage of the resource in the namespace""" return meter.create_up_down_counter( name=K8S_RESOURCEQUOTA_OBJECT_COUNT_USED, description="The object count limits in a specific namespace. The value represents the current observed total usage of the resource in the namespace.", unit="{object}", ) K8S_RESOURCEQUOTA_PERSISTENTVOLUMECLAIM_COUNT_HARD: Final = ( "k8s.resourcequota.persistentvolumeclaim_count.hard" ) """ The total number of PersistentVolumeClaims that can exist in the namespace. The value represents the configured quota limit of the resource in the namespace Instrument: updowncounter Unit: {persistentvolumeclaim} Note: This metric is retrieved from the `hard` field of the [K8s ResourceQuotaStatus](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#resourcequotastatus-v1-core). The `k8s.storageclass.name` should be required when a resource quota is defined for a specific storage class. """ def create_k8s_resourcequota_persistentvolumeclaim_count_hard( meter: Meter, ) -> UpDownCounter: """The total number of PersistentVolumeClaims that can exist in the namespace. The value represents the configured quota limit of the resource in the namespace""" return meter.create_up_down_counter( name=K8S_RESOURCEQUOTA_PERSISTENTVOLUMECLAIM_COUNT_HARD, description="The total number of PersistentVolumeClaims that can exist in the namespace. The value represents the configured quota limit of the resource in the namespace.", unit="{persistentvolumeclaim}", ) K8S_RESOURCEQUOTA_PERSISTENTVOLUMECLAIM_COUNT_USED: Final = ( "k8s.resourcequota.persistentvolumeclaim_count.used" ) """ The total number of PersistentVolumeClaims that can exist in the namespace. The value represents the current observed total usage of the resource in the namespace Instrument: updowncounter Unit: {persistentvolumeclaim} Note: This metric is retrieved from the `used` field of the [K8s ResourceQuotaStatus](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#resourcequotastatus-v1-core). The `k8s.storageclass.name` should be required when a resource quota is defined for a specific storage class. """ def create_k8s_resourcequota_persistentvolumeclaim_count_used( meter: Meter, ) -> UpDownCounter: """The total number of PersistentVolumeClaims that can exist in the namespace. The value represents the current observed total usage of the resource in the namespace""" return meter.create_up_down_counter( name=K8S_RESOURCEQUOTA_PERSISTENTVOLUMECLAIM_COUNT_USED, description="The total number of PersistentVolumeClaims that can exist in the namespace. The value represents the current observed total usage of the resource in the namespace.", unit="{persistentvolumeclaim}", ) K8S_RESOURCEQUOTA_STORAGE_REQUEST_HARD: Final = ( "k8s.resourcequota.storage.request.hard" ) """ The storage requests in a specific namespace. The value represents the configured quota limit of the resource in the namespace Instrument: updowncounter Unit: By Note: This metric is retrieved from the `hard` field of the [K8s ResourceQuotaStatus](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#resourcequotastatus-v1-core). The `k8s.storageclass.name` should be required when a resource quota is defined for a specific storage class. """ def create_k8s_resourcequota_storage_request_hard( meter: Meter, ) -> UpDownCounter: """The storage requests in a specific namespace. The value represents the configured quota limit of the resource in the namespace""" return meter.create_up_down_counter( name=K8S_RESOURCEQUOTA_STORAGE_REQUEST_HARD, description="The storage requests in a specific namespace. The value represents the configured quota limit of the resource in the namespace.", unit="By", ) K8S_RESOURCEQUOTA_STORAGE_REQUEST_USED: Final = ( "k8s.resourcequota.storage.request.used" ) """ The storage requests in a specific namespace. The value represents the current observed total usage of the resource in the namespace Instrument: updowncounter Unit: By Note: This metric is retrieved from the `used` field of the [K8s ResourceQuotaStatus](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#resourcequotastatus-v1-core). The `k8s.storageclass.name` should be required when a resource quota is defined for a specific storage class. """ def create_k8s_resourcequota_storage_request_used( meter: Meter, ) -> UpDownCounter: """The storage requests in a specific namespace. The value represents the current observed total usage of the resource in the namespace""" return meter.create_up_down_counter( name=K8S_RESOURCEQUOTA_STORAGE_REQUEST_USED, description="The storage requests in a specific namespace. The value represents the current observed total usage of the resource in the namespace.", unit="By", ) K8S_STATEFULSET_CURRENT_PODS: Final = "k8s.statefulset.current_pods" """ Deprecated: Replaced by `k8s.statefulset.pod.current`. """ def create_k8s_statefulset_current_pods(meter: Meter) -> UpDownCounter: """Deprecated, use `k8s.statefulset.pod.current` instead""" return meter.create_up_down_counter( name=K8S_STATEFULSET_CURRENT_PODS, description="Deprecated, use `k8s.statefulset.pod.current` instead.", unit="{pod}", ) K8S_STATEFULSET_DESIRED_PODS: Final = "k8s.statefulset.desired_pods" """ Deprecated: Replaced by `k8s.statefulset.pod.desired`. """ def create_k8s_statefulset_desired_pods(meter: Meter) -> UpDownCounter: """Deprecated, use `k8s.statefulset.pod.desired` instead""" return meter.create_up_down_counter( name=K8S_STATEFULSET_DESIRED_PODS, description="Deprecated, use `k8s.statefulset.pod.desired` instead.", unit="{pod}", ) K8S_STATEFULSET_POD_CURRENT: Final = "k8s.statefulset.pod.current" """ The number of replica pods created by the statefulset controller from the statefulset version indicated by currentRevision Instrument: updowncounter Unit: {pod} Note: This metric aligns with the `currentReplicas` field of the [K8s StatefulSetStatus](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#statefulsetstatus-v1-apps). """ def create_k8s_statefulset_pod_current(meter: Meter) -> UpDownCounter: """The number of replica pods created by the statefulset controller from the statefulset version indicated by currentRevision""" return meter.create_up_down_counter( name=K8S_STATEFULSET_POD_CURRENT, description="The number of replica pods created by the statefulset controller from the statefulset version indicated by currentRevision.", unit="{pod}", ) K8S_STATEFULSET_POD_DESIRED: Final = "k8s.statefulset.pod.desired" """ Number of desired replica pods in this statefulset Instrument: updowncounter Unit: {pod} Note: This metric aligns with the `replicas` field of the [K8s StatefulSetSpec](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#statefulsetspec-v1-apps). """ def create_k8s_statefulset_pod_desired(meter: Meter) -> UpDownCounter: """Number of desired replica pods in this statefulset""" return meter.create_up_down_counter( name=K8S_STATEFULSET_POD_DESIRED, description="Number of desired replica pods in this statefulset.", unit="{pod}", ) K8S_STATEFULSET_POD_READY: Final = "k8s.statefulset.pod.ready" """ The number of replica pods created for this statefulset with a Ready Condition Instrument: updowncounter Unit: {pod} Note: This metric aligns with the `readyReplicas` field of the [K8s StatefulSetStatus](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#statefulsetstatus-v1-apps). """ def create_k8s_statefulset_pod_ready(meter: Meter) -> UpDownCounter: """The number of replica pods created for this statefulset with a Ready Condition""" return meter.create_up_down_counter( name=K8S_STATEFULSET_POD_READY, description="The number of replica pods created for this statefulset with a Ready Condition.", unit="{pod}", ) K8S_STATEFULSET_POD_UPDATED: Final = "k8s.statefulset.pod.updated" """ Number of replica pods created by the statefulset controller from the statefulset version indicated by updateRevision Instrument: updowncounter Unit: {pod} Note: This metric aligns with the `updatedReplicas` field of the [K8s StatefulSetStatus](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#statefulsetstatus-v1-apps). """ def create_k8s_statefulset_pod_updated(meter: Meter) -> UpDownCounter: """Number of replica pods created by the statefulset controller from the statefulset version indicated by updateRevision""" return meter.create_up_down_counter( name=K8S_STATEFULSET_POD_UPDATED, description="Number of replica pods created by the statefulset controller from the statefulset version indicated by updateRevision.", unit="{pod}", ) K8S_STATEFULSET_READY_PODS: Final = "k8s.statefulset.ready_pods" """ Deprecated: Replaced by `k8s.statefulset.pod.ready`. """ def create_k8s_statefulset_ready_pods(meter: Meter) -> UpDownCounter: """Deprecated, use `k8s.statefulset.pod.ready` instead""" return meter.create_up_down_counter( name=K8S_STATEFULSET_READY_PODS, description="Deprecated, use `k8s.statefulset.pod.ready` instead.", unit="{pod}", ) K8S_STATEFULSET_UPDATED_PODS: Final = "k8s.statefulset.updated_pods" """ Deprecated: Replaced by `k8s.statefulset.pod.updated`. """ def create_k8s_statefulset_updated_pods(meter: Meter) -> UpDownCounter: """Deprecated, use `k8s.statefulset.pod.updated` instead""" return meter.create_up_down_counter( name=K8S_STATEFULSET_UPDATED_PODS, description="Deprecated, use `k8s.statefulset.pod.updated` instead.", unit="{pod}", ) messaging_metrics.py000066400000000000000000000141321511654350100421450ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/metrics# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Final from opentelemetry.metrics import Counter, Histogram, Meter MESSAGING_CLIENT_CONSUMED_MESSAGES: Final = ( "messaging.client.consumed.messages" ) """ Number of messages that were delivered to the application Instrument: counter Unit: {message} Note: Records the number of messages pulled from the broker or number of messages dispatched to the application in push-based scenarios. The metric SHOULD be reported once per message delivery. For example, if receiving and processing operations are both instrumented for a single message delivery, this counter is incremented when the message is received and not reported when it is processed. """ def create_messaging_client_consumed_messages(meter: Meter) -> Counter: """Number of messages that were delivered to the application""" return meter.create_counter( name=MESSAGING_CLIENT_CONSUMED_MESSAGES, description="Number of messages that were delivered to the application.", unit="{message}", ) MESSAGING_CLIENT_OPERATION_DURATION: Final = ( "messaging.client.operation.duration" ) """ Duration of messaging operation initiated by a producer or consumer client Instrument: histogram Unit: s Note: This metric SHOULD NOT be used to report processing duration - processing duration is reported in `messaging.process.duration` metric. """ def create_messaging_client_operation_duration(meter: Meter) -> Histogram: """Duration of messaging operation initiated by a producer or consumer client""" return meter.create_histogram( name=MESSAGING_CLIENT_OPERATION_DURATION, description="Duration of messaging operation initiated by a producer or consumer client.", unit="s", ) MESSAGING_CLIENT_PUBLISHED_MESSAGES: Final = ( "messaging.client.published.messages" ) """ Deprecated: Replaced by `messaging.client.sent.messages`. """ def create_messaging_client_published_messages(meter: Meter) -> Counter: """Deprecated. Use `messaging.client.sent.messages` instead""" return meter.create_counter( name=MESSAGING_CLIENT_PUBLISHED_MESSAGES, description="Deprecated. Use `messaging.client.sent.messages` instead.", unit="{message}", ) MESSAGING_CLIENT_SENT_MESSAGES: Final = "messaging.client.sent.messages" """ Number of messages producer attempted to send to the broker Instrument: counter Unit: {message} Note: This metric MUST NOT count messages that were created but haven't yet been sent. """ def create_messaging_client_sent_messages(meter: Meter) -> Counter: """Number of messages producer attempted to send to the broker""" return meter.create_counter( name=MESSAGING_CLIENT_SENT_MESSAGES, description="Number of messages producer attempted to send to the broker.", unit="{message}", ) MESSAGING_PROCESS_DURATION: Final = "messaging.process.duration" """ Duration of processing operation Instrument: histogram Unit: s Note: This metric MUST be reported for operations with `messaging.operation.type` that matches `process`. """ def create_messaging_process_duration(meter: Meter) -> Histogram: """Duration of processing operation""" return meter.create_histogram( name=MESSAGING_PROCESS_DURATION, description="Duration of processing operation.", unit="s", ) MESSAGING_PROCESS_MESSAGES: Final = "messaging.process.messages" """ Deprecated: Replaced by `messaging.client.consumed.messages`. """ def create_messaging_process_messages(meter: Meter) -> Counter: """Deprecated. Use `messaging.client.consumed.messages` instead""" return meter.create_counter( name=MESSAGING_PROCESS_MESSAGES, description="Deprecated. Use `messaging.client.consumed.messages` instead.", unit="{message}", ) MESSAGING_PUBLISH_DURATION: Final = "messaging.publish.duration" """ Deprecated: Replaced by `messaging.client.operation.duration`. """ def create_messaging_publish_duration(meter: Meter) -> Histogram: """Deprecated. Use `messaging.client.operation.duration` instead""" return meter.create_histogram( name=MESSAGING_PUBLISH_DURATION, description="Deprecated. Use `messaging.client.operation.duration` instead.", unit="s", ) MESSAGING_PUBLISH_MESSAGES: Final = "messaging.publish.messages" """ Deprecated: Replaced by `messaging.client.sent.messages`. """ def create_messaging_publish_messages(meter: Meter) -> Counter: """Deprecated. Use `messaging.client.sent.messages` instead""" return meter.create_counter( name=MESSAGING_PUBLISH_MESSAGES, description="Deprecated. Use `messaging.client.sent.messages` instead.", unit="{message}", ) MESSAGING_RECEIVE_DURATION: Final = "messaging.receive.duration" """ Deprecated: Replaced by `messaging.client.operation.duration`. """ def create_messaging_receive_duration(meter: Meter) -> Histogram: """Deprecated. Use `messaging.client.operation.duration` instead""" return meter.create_histogram( name=MESSAGING_RECEIVE_DURATION, description="Deprecated. Use `messaging.client.operation.duration` instead.", unit="s", ) MESSAGING_RECEIVE_MESSAGES: Final = "messaging.receive.messages" """ Deprecated: Replaced by `messaging.client.consumed.messages`. """ def create_messaging_receive_messages(meter: Meter) -> Counter: """Deprecated. Use `messaging.client.consumed.messages` instead""" return meter.create_counter( name=MESSAGING_RECEIVE_MESSAGES, description="Deprecated. Use `messaging.client.consumed.messages` instead.", unit="{message}", ) nfs_metrics.py000066400000000000000000000236321511654350100407630ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/metrics# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Final from opentelemetry.metrics import Counter, Meter, UpDownCounter NFS_CLIENT_NET_COUNT: Final = "nfs.client.net.count" """ Reports the count of kernel NFS client TCP segments and UDP datagrams handled Instrument: counter Unit: {record} Note: Linux: this metric is taken from the Linux kernel's svc_stat.netudpcnt and svc_stat.nettcpcnt. """ def create_nfs_client_net_count(meter: Meter) -> Counter: """Reports the count of kernel NFS client TCP segments and UDP datagrams handled""" return meter.create_counter( name=NFS_CLIENT_NET_COUNT, description="Reports the count of kernel NFS client TCP segments and UDP datagrams handled.", unit="{record}", ) NFS_CLIENT_NET_TCP_CONNECTION_ACCEPTED: Final = ( "nfs.client.net.tcp.connection.accepted" ) """ Reports the count of kernel NFS client TCP connections accepted Instrument: counter Unit: {connection} Note: Linux: this metric is taken from the Linux kernel's svc_stat.nettcpconn. """ def create_nfs_client_net_tcp_connection_accepted(meter: Meter) -> Counter: """Reports the count of kernel NFS client TCP connections accepted""" return meter.create_counter( name=NFS_CLIENT_NET_TCP_CONNECTION_ACCEPTED, description="Reports the count of kernel NFS client TCP connections accepted.", unit="{connection}", ) NFS_CLIENT_OPERATION_COUNT: Final = "nfs.client.operation.count" """ Reports the count of kernel NFSv4+ client operations Instrument: counter Unit: {operation} """ def create_nfs_client_operation_count(meter: Meter) -> Counter: """Reports the count of kernel NFSv4+ client operations""" return meter.create_counter( name=NFS_CLIENT_OPERATION_COUNT, description="Reports the count of kernel NFSv4+ client operations.", unit="{operation}", ) NFS_CLIENT_PROCEDURE_COUNT: Final = "nfs.client.procedure.count" """ Reports the count of kernel NFS client procedures Instrument: counter Unit: {procedure} """ def create_nfs_client_procedure_count(meter: Meter) -> Counter: """Reports the count of kernel NFS client procedures""" return meter.create_counter( name=NFS_CLIENT_PROCEDURE_COUNT, description="Reports the count of kernel NFS client procedures.", unit="{procedure}", ) NFS_CLIENT_RPC_AUTHREFRESH_COUNT: Final = "nfs.client.rpc.authrefresh.count" """ Reports the count of kernel NFS client RPC authentication refreshes Instrument: counter Unit: {authrefresh} Note: Linux: this metric is taken from the Linux kernel's svc_stat.rpcauthrefresh. """ def create_nfs_client_rpc_authrefresh_count(meter: Meter) -> Counter: """Reports the count of kernel NFS client RPC authentication refreshes""" return meter.create_counter( name=NFS_CLIENT_RPC_AUTHREFRESH_COUNT, description="Reports the count of kernel NFS client RPC authentication refreshes.", unit="{authrefresh}", ) NFS_CLIENT_RPC_COUNT: Final = "nfs.client.rpc.count" """ Reports the count of kernel NFS client RPCs sent, regardless of whether they're accepted/rejected by the server Instrument: counter Unit: {request} Note: Linux: this metric is taken from the Linux kernel's svc_stat.rpccnt. """ def create_nfs_client_rpc_count(meter: Meter) -> Counter: """Reports the count of kernel NFS client RPCs sent, regardless of whether they're accepted/rejected by the server""" return meter.create_counter( name=NFS_CLIENT_RPC_COUNT, description="Reports the count of kernel NFS client RPCs sent, regardless of whether they're accepted/rejected by the server.", unit="{request}", ) NFS_CLIENT_RPC_RETRANSMIT_COUNT: Final = "nfs.client.rpc.retransmit.count" """ Reports the count of kernel NFS client RPC retransmits Instrument: counter Unit: {retransmit} Note: Linux: this metric is taken from the Linux kernel's svc_stat.rpcretrans. """ def create_nfs_client_rpc_retransmit_count(meter: Meter) -> Counter: """Reports the count of kernel NFS client RPC retransmits""" return meter.create_counter( name=NFS_CLIENT_RPC_RETRANSMIT_COUNT, description="Reports the count of kernel NFS client RPC retransmits.", unit="{retransmit}", ) NFS_SERVER_FH_STALE_COUNT: Final = "nfs.server.fh.stale.count" """ Reports the count of kernel NFS server stale file handles Instrument: counter Unit: {fh} Note: Linux: this metric is taken from the Linux kernel NFSD_STATS_FH_STALE counter in the nfsd_net struct. """ def create_nfs_server_fh_stale_count(meter: Meter) -> Counter: """Reports the count of kernel NFS server stale file handles""" return meter.create_counter( name=NFS_SERVER_FH_STALE_COUNT, description="Reports the count of kernel NFS server stale file handles.", unit="{fh}", ) NFS_SERVER_IO: Final = "nfs.server.io" """ Reports the count of kernel NFS server bytes returned to receive and transmit (read and write) requests Instrument: counter Unit: By Note: Linux: this metric is taken from the Linux kernel NFSD_STATS_IO_READ and NFSD_STATS_IO_WRITE counters in the nfsd_net struct. """ def create_nfs_server_io(meter: Meter) -> Counter: """Reports the count of kernel NFS server bytes returned to receive and transmit (read and write) requests""" return meter.create_counter( name=NFS_SERVER_IO, description="Reports the count of kernel NFS server bytes returned to receive and transmit (read and write) requests.", unit="By", ) NFS_SERVER_NET_COUNT: Final = "nfs.server.net.count" """ Reports the count of kernel NFS server TCP segments and UDP datagrams handled Instrument: counter Unit: {record} Note: Linux: this metric is taken from the Linux kernel's svc_stat.nettcpcnt and svc_stat.netudpcnt. """ def create_nfs_server_net_count(meter: Meter) -> Counter: """Reports the count of kernel NFS server TCP segments and UDP datagrams handled""" return meter.create_counter( name=NFS_SERVER_NET_COUNT, description="Reports the count of kernel NFS server TCP segments and UDP datagrams handled.", unit="{record}", ) NFS_SERVER_NET_TCP_CONNECTION_ACCEPTED: Final = ( "nfs.server.net.tcp.connection.accepted" ) """ Reports the count of kernel NFS server TCP connections accepted Instrument: counter Unit: {connection} Note: Linux: this metric is taken from the Linux kernel's svc_stat.nettcpconn. """ def create_nfs_server_net_tcp_connection_accepted(meter: Meter) -> Counter: """Reports the count of kernel NFS server TCP connections accepted""" return meter.create_counter( name=NFS_SERVER_NET_TCP_CONNECTION_ACCEPTED, description="Reports the count of kernel NFS server TCP connections accepted.", unit="{connection}", ) NFS_SERVER_OPERATION_COUNT: Final = "nfs.server.operation.count" """ Reports the count of kernel NFSv4+ server operations Instrument: counter Unit: {operation} """ def create_nfs_server_operation_count(meter: Meter) -> Counter: """Reports the count of kernel NFSv4+ server operations""" return meter.create_counter( name=NFS_SERVER_OPERATION_COUNT, description="Reports the count of kernel NFSv4+ server operations.", unit="{operation}", ) NFS_SERVER_PROCEDURE_COUNT: Final = "nfs.server.procedure.count" """ Reports the count of kernel NFS server procedures Instrument: counter Unit: {procedure} """ def create_nfs_server_procedure_count(meter: Meter) -> Counter: """Reports the count of kernel NFS server procedures""" return meter.create_counter( name=NFS_SERVER_PROCEDURE_COUNT, description="Reports the count of kernel NFS server procedures.", unit="{procedure}", ) NFS_SERVER_REPCACHE_REQUESTS: Final = "nfs.server.repcache.requests" """ Reports the kernel NFS server reply cache request count by cache hit status Instrument: counter Unit: {request} """ def create_nfs_server_repcache_requests(meter: Meter) -> Counter: """Reports the kernel NFS server reply cache request count by cache hit status""" return meter.create_counter( name=NFS_SERVER_REPCACHE_REQUESTS, description="Reports the kernel NFS server reply cache request count by cache hit status.", unit="{request}", ) NFS_SERVER_RPC_COUNT: Final = "nfs.server.rpc.count" """ Reports the count of kernel NFS server RPCs handled Instrument: counter Unit: {request} Note: Linux: this metric is taken from the Linux kernel's svc_stat.rpccnt, the count of good RPCs. This metric can have an error.type of "format", "auth", or "client" for svc_stat.badfmt, svc_stat.badauth, and svc_stat.badclnt. """ def create_nfs_server_rpc_count(meter: Meter) -> Counter: """Reports the count of kernel NFS server RPCs handled""" return meter.create_counter( name=NFS_SERVER_RPC_COUNT, description="Reports the count of kernel NFS server RPCs handled.", unit="{request}", ) NFS_SERVER_THREAD_COUNT: Final = "nfs.server.thread.count" """ Reports the count of kernel NFS server available threads Instrument: updowncounter Unit: {thread} Note: Linux: this metric is taken from the Linux kernel nfsd_th_cnt variable. """ def create_nfs_server_thread_count(meter: Meter) -> UpDownCounter: """Reports the count of kernel NFS server available threads""" return meter.create_up_down_counter( name=NFS_SERVER_THREAD_COUNT, description="Reports the count of kernel NFS server available threads.", unit="{thread}", ) openshift_metrics.py000066400000000000000000000513241511654350100421730ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/metrics# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Final from opentelemetry.metrics import Meter, UpDownCounter OPENSHIFT_CLUSTERQUOTA_CPU_LIMIT_HARD: Final = ( "openshift.clusterquota.cpu.limit.hard" ) """ The enforced hard limit of the resource across all projects Instrument: updowncounter Unit: {cpu} Note: This metric is retrieved from the `Status.Total.Hard` field of the [K8s ResourceQuotaStatus](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#resourcequotastatus-v1-core) of the [ClusterResourceQuota](https://docs.redhat.com/en/documentation/openshift_container_platform/4.19/html/schedule_and_quota_apis/clusterresourcequota-quota-openshift-io-v1#status-total). """ def create_openshift_clusterquota_cpu_limit_hard( meter: Meter, ) -> UpDownCounter: """The enforced hard limit of the resource across all projects""" return meter.create_up_down_counter( name=OPENSHIFT_CLUSTERQUOTA_CPU_LIMIT_HARD, description="The enforced hard limit of the resource across all projects.", unit="{cpu}", ) OPENSHIFT_CLUSTERQUOTA_CPU_LIMIT_USED: Final = ( "openshift.clusterquota.cpu.limit.used" ) """ The current observed total usage of the resource across all projects Instrument: updowncounter Unit: {cpu} Note: This metric is retrieved from the `Status.Total.Used` field of the [K8s ResourceQuotaStatus](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#resourcequotastatus-v1-core) of the [ClusterResourceQuota](https://docs.redhat.com/en/documentation/openshift_container_platform/4.19/html/schedule_and_quota_apis/clusterresourcequota-quota-openshift-io-v1#status-total). """ def create_openshift_clusterquota_cpu_limit_used( meter: Meter, ) -> UpDownCounter: """The current observed total usage of the resource across all projects""" return meter.create_up_down_counter( name=OPENSHIFT_CLUSTERQUOTA_CPU_LIMIT_USED, description="The current observed total usage of the resource across all projects.", unit="{cpu}", ) OPENSHIFT_CLUSTERQUOTA_CPU_REQUEST_HARD: Final = ( "openshift.clusterquota.cpu.request.hard" ) """ The enforced hard limit of the resource across all projects Instrument: updowncounter Unit: {cpu} Note: This metric is retrieved from the `Status.Total.Hard` field of the [K8s ResourceQuotaStatus](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#resourcequotastatus-v1-core) of the [ClusterResourceQuota](https://docs.redhat.com/en/documentation/openshift_container_platform/4.19/html/schedule_and_quota_apis/clusterresourcequota-quota-openshift-io-v1#status-total). """ def create_openshift_clusterquota_cpu_request_hard( meter: Meter, ) -> UpDownCounter: """The enforced hard limit of the resource across all projects""" return meter.create_up_down_counter( name=OPENSHIFT_CLUSTERQUOTA_CPU_REQUEST_HARD, description="The enforced hard limit of the resource across all projects.", unit="{cpu}", ) OPENSHIFT_CLUSTERQUOTA_CPU_REQUEST_USED: Final = ( "openshift.clusterquota.cpu.request.used" ) """ The current observed total usage of the resource across all projects Instrument: updowncounter Unit: {cpu} Note: This metric is retrieved from the `Status.Total.Used` field of the [K8s ResourceQuotaStatus](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#resourcequotastatus-v1-core) of the [ClusterResourceQuota](https://docs.redhat.com/en/documentation/openshift_container_platform/4.19/html/schedule_and_quota_apis/clusterresourcequota-quota-openshift-io-v1#status-total). """ def create_openshift_clusterquota_cpu_request_used( meter: Meter, ) -> UpDownCounter: """The current observed total usage of the resource across all projects""" return meter.create_up_down_counter( name=OPENSHIFT_CLUSTERQUOTA_CPU_REQUEST_USED, description="The current observed total usage of the resource across all projects.", unit="{cpu}", ) OPENSHIFT_CLUSTERQUOTA_EPHEMERAL_STORAGE_LIMIT_HARD: Final = ( "openshift.clusterquota.ephemeral_storage.limit.hard" ) """ The enforced hard limit of the resource across all projects Instrument: updowncounter Unit: By Note: This metric is retrieved from the `Status.Total.Hard` field of the [K8s ResourceQuotaStatus](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#resourcequotastatus-v1-core) of the [ClusterResourceQuota](https://docs.redhat.com/en/documentation/openshift_container_platform/4.19/html/schedule_and_quota_apis/clusterresourcequota-quota-openshift-io-v1#status-total). """ def create_openshift_clusterquota_ephemeral_storage_limit_hard( meter: Meter, ) -> UpDownCounter: """The enforced hard limit of the resource across all projects""" return meter.create_up_down_counter( name=OPENSHIFT_CLUSTERQUOTA_EPHEMERAL_STORAGE_LIMIT_HARD, description="The enforced hard limit of the resource across all projects.", unit="By", ) OPENSHIFT_CLUSTERQUOTA_EPHEMERAL_STORAGE_LIMIT_USED: Final = ( "openshift.clusterquota.ephemeral_storage.limit.used" ) """ The current observed total usage of the resource across all projects Instrument: updowncounter Unit: By Note: This metric is retrieved from the `Status.Total.Used` field of the [K8s ResourceQuotaStatus](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#resourcequotastatus-v1-core) of the [ClusterResourceQuota](https://docs.redhat.com/en/documentation/openshift_container_platform/4.19/html/schedule_and_quota_apis/clusterresourcequota-quota-openshift-io-v1#status-total). """ def create_openshift_clusterquota_ephemeral_storage_limit_used( meter: Meter, ) -> UpDownCounter: """The current observed total usage of the resource across all projects""" return meter.create_up_down_counter( name=OPENSHIFT_CLUSTERQUOTA_EPHEMERAL_STORAGE_LIMIT_USED, description="The current observed total usage of the resource across all projects.", unit="By", ) OPENSHIFT_CLUSTERQUOTA_EPHEMERAL_STORAGE_REQUEST_HARD: Final = ( "openshift.clusterquota.ephemeral_storage.request.hard" ) """ The enforced hard limit of the resource across all projects Instrument: updowncounter Unit: By Note: This metric is retrieved from the `Status.Total.Hard` field of the [K8s ResourceQuotaStatus](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#resourcequotastatus-v1-core) of the [ClusterResourceQuota](https://docs.redhat.com/en/documentation/openshift_container_platform/4.19/html/schedule_and_quota_apis/clusterresourcequota-quota-openshift-io-v1#status-total). """ def create_openshift_clusterquota_ephemeral_storage_request_hard( meter: Meter, ) -> UpDownCounter: """The enforced hard limit of the resource across all projects""" return meter.create_up_down_counter( name=OPENSHIFT_CLUSTERQUOTA_EPHEMERAL_STORAGE_REQUEST_HARD, description="The enforced hard limit of the resource across all projects.", unit="By", ) OPENSHIFT_CLUSTERQUOTA_EPHEMERAL_STORAGE_REQUEST_USED: Final = ( "openshift.clusterquota.ephemeral_storage.request.used" ) """ The current observed total usage of the resource across all projects Instrument: updowncounter Unit: By Note: This metric is retrieved from the `Status.Total.Used` field of the [K8s ResourceQuotaStatus](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#resourcequotastatus-v1-core) of the [ClusterResourceQuota](https://docs.redhat.com/en/documentation/openshift_container_platform/4.19/html/schedule_and_quota_apis/clusterresourcequota-quota-openshift-io-v1#status-total). """ def create_openshift_clusterquota_ephemeral_storage_request_used( meter: Meter, ) -> UpDownCounter: """The current observed total usage of the resource across all projects""" return meter.create_up_down_counter( name=OPENSHIFT_CLUSTERQUOTA_EPHEMERAL_STORAGE_REQUEST_USED, description="The current observed total usage of the resource across all projects.", unit="By", ) OPENSHIFT_CLUSTERQUOTA_HUGEPAGE_COUNT_REQUEST_HARD: Final = ( "openshift.clusterquota.hugepage_count.request.hard" ) """ The enforced hard limit of the resource across all projects Instrument: updowncounter Unit: {hugepage} Note: This metric is retrieved from the `Status.Total.Hard` field of the [K8s ResourceQuotaStatus](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#resourcequotastatus-v1-core) of the [ClusterResourceQuota](https://docs.redhat.com/en/documentation/openshift_container_platform/4.19/html/schedule_and_quota_apis/clusterresourcequota-quota-openshift-io-v1#status-total). """ def create_openshift_clusterquota_hugepage_count_request_hard( meter: Meter, ) -> UpDownCounter: """The enforced hard limit of the resource across all projects""" return meter.create_up_down_counter( name=OPENSHIFT_CLUSTERQUOTA_HUGEPAGE_COUNT_REQUEST_HARD, description="The enforced hard limit of the resource across all projects.", unit="{hugepage}", ) OPENSHIFT_CLUSTERQUOTA_HUGEPAGE_COUNT_REQUEST_USED: Final = ( "openshift.clusterquota.hugepage_count.request.used" ) """ The current observed total usage of the resource across all projects Instrument: updowncounter Unit: {hugepage} Note: This metric is retrieved from the `Status.Total.Used` field of the [K8s ResourceQuotaStatus](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#resourcequotastatus-v1-core) of the [ClusterResourceQuota](https://docs.redhat.com/en/documentation/openshift_container_platform/4.19/html/schedule_and_quota_apis/clusterresourcequota-quota-openshift-io-v1#status-total). """ def create_openshift_clusterquota_hugepage_count_request_used( meter: Meter, ) -> UpDownCounter: """The current observed total usage of the resource across all projects""" return meter.create_up_down_counter( name=OPENSHIFT_CLUSTERQUOTA_HUGEPAGE_COUNT_REQUEST_USED, description="The current observed total usage of the resource across all projects.", unit="{hugepage}", ) OPENSHIFT_CLUSTERQUOTA_MEMORY_LIMIT_HARD: Final = ( "openshift.clusterquota.memory.limit.hard" ) """ The enforced hard limit of the resource across all projects Instrument: updowncounter Unit: By Note: This metric is retrieved from the `Status.Total.Hard` field of the [K8s ResourceQuotaStatus](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#resourcequotastatus-v1-core) of the [ClusterResourceQuota](https://docs.redhat.com/en/documentation/openshift_container_platform/4.19/html/schedule_and_quota_apis/clusterresourcequota-quota-openshift-io-v1#status-total). """ def create_openshift_clusterquota_memory_limit_hard( meter: Meter, ) -> UpDownCounter: """The enforced hard limit of the resource across all projects""" return meter.create_up_down_counter( name=OPENSHIFT_CLUSTERQUOTA_MEMORY_LIMIT_HARD, description="The enforced hard limit of the resource across all projects.", unit="By", ) OPENSHIFT_CLUSTERQUOTA_MEMORY_LIMIT_USED: Final = ( "openshift.clusterquota.memory.limit.used" ) """ The current observed total usage of the resource across all projects Instrument: updowncounter Unit: By Note: This metric is retrieved from the `Status.Total.Used` field of the [K8s ResourceQuotaStatus](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#resourcequotastatus-v1-core) of the [ClusterResourceQuota](https://docs.redhat.com/en/documentation/openshift_container_platform/4.19/html/schedule_and_quota_apis/clusterresourcequota-quota-openshift-io-v1#status-total). """ def create_openshift_clusterquota_memory_limit_used( meter: Meter, ) -> UpDownCounter: """The current observed total usage of the resource across all projects""" return meter.create_up_down_counter( name=OPENSHIFT_CLUSTERQUOTA_MEMORY_LIMIT_USED, description="The current observed total usage of the resource across all projects.", unit="By", ) OPENSHIFT_CLUSTERQUOTA_MEMORY_REQUEST_HARD: Final = ( "openshift.clusterquota.memory.request.hard" ) """ The enforced hard limit of the resource across all projects Instrument: updowncounter Unit: By Note: This metric is retrieved from the `Status.Total.Hard` field of the [K8s ResourceQuotaStatus](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#resourcequotastatus-v1-core) of the [ClusterResourceQuota](https://docs.redhat.com/en/documentation/openshift_container_platform/4.19/html/schedule_and_quota_apis/clusterresourcequota-quota-openshift-io-v1#status-total). """ def create_openshift_clusterquota_memory_request_hard( meter: Meter, ) -> UpDownCounter: """The enforced hard limit of the resource across all projects""" return meter.create_up_down_counter( name=OPENSHIFT_CLUSTERQUOTA_MEMORY_REQUEST_HARD, description="The enforced hard limit of the resource across all projects.", unit="By", ) OPENSHIFT_CLUSTERQUOTA_MEMORY_REQUEST_USED: Final = ( "openshift.clusterquota.memory.request.used" ) """ The current observed total usage of the resource across all projects Instrument: updowncounter Unit: By Note: This metric is retrieved from the `Status.Total.Used` field of the [K8s ResourceQuotaStatus](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#resourcequotastatus-v1-core) of the [ClusterResourceQuota](https://docs.redhat.com/en/documentation/openshift_container_platform/4.19/html/schedule_and_quota_apis/clusterresourcequota-quota-openshift-io-v1#status-total). """ def create_openshift_clusterquota_memory_request_used( meter: Meter, ) -> UpDownCounter: """The current observed total usage of the resource across all projects""" return meter.create_up_down_counter( name=OPENSHIFT_CLUSTERQUOTA_MEMORY_REQUEST_USED, description="The current observed total usage of the resource across all projects.", unit="By", ) OPENSHIFT_CLUSTERQUOTA_OBJECT_COUNT_HARD: Final = ( "openshift.clusterquota.object_count.hard" ) """ The enforced hard limit of the resource across all projects Instrument: updowncounter Unit: {object} Note: This metric is retrieved from the `Status.Total.Hard` field of the [K8s ResourceQuotaStatus](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#resourcequotastatus-v1-core) of the [ClusterResourceQuota](https://docs.redhat.com/en/documentation/openshift_container_platform/4.19/html/schedule_and_quota_apis/clusterresourcequota-quota-openshift-io-v1#status-total). """ def create_openshift_clusterquota_object_count_hard( meter: Meter, ) -> UpDownCounter: """The enforced hard limit of the resource across all projects""" return meter.create_up_down_counter( name=OPENSHIFT_CLUSTERQUOTA_OBJECT_COUNT_HARD, description="The enforced hard limit of the resource across all projects.", unit="{object}", ) OPENSHIFT_CLUSTERQUOTA_OBJECT_COUNT_USED: Final = ( "openshift.clusterquota.object_count.used" ) """ The current observed total usage of the resource across all projects Instrument: updowncounter Unit: {object} Note: This metric is retrieved from the `Status.Total.Used` field of the [K8s ResourceQuotaStatus](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#resourcequotastatus-v1-core) of the [ClusterResourceQuota](https://docs.redhat.com/en/documentation/openshift_container_platform/4.19/html/schedule_and_quota_apis/clusterresourcequota-quota-openshift-io-v1#status-total). """ def create_openshift_clusterquota_object_count_used( meter: Meter, ) -> UpDownCounter: """The current observed total usage of the resource across all projects""" return meter.create_up_down_counter( name=OPENSHIFT_CLUSTERQUOTA_OBJECT_COUNT_USED, description="The current observed total usage of the resource across all projects.", unit="{object}", ) OPENSHIFT_CLUSTERQUOTA_PERSISTENTVOLUMECLAIM_COUNT_HARD: Final = ( "openshift.clusterquota.persistentvolumeclaim_count.hard" ) """ The enforced hard limit of the resource across all projects Instrument: updowncounter Unit: {persistentvolumeclaim} Note: This metric is retrieved from the `Status.Total.Hard` field of the [K8s ResourceQuotaStatus](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#resourcequotastatus-v1-core) of the [ClusterResourceQuota](https://docs.redhat.com/en/documentation/openshift_container_platform/4.19/html/schedule_and_quota_apis/clusterresourcequota-quota-openshift-io-v1#status-total). The `k8s.storageclass.name` should be required when a resource quota is defined for a specific storage class. """ def create_openshift_clusterquota_persistentvolumeclaim_count_hard( meter: Meter, ) -> UpDownCounter: """The enforced hard limit of the resource across all projects""" return meter.create_up_down_counter( name=OPENSHIFT_CLUSTERQUOTA_PERSISTENTVOLUMECLAIM_COUNT_HARD, description="The enforced hard limit of the resource across all projects.", unit="{persistentvolumeclaim}", ) OPENSHIFT_CLUSTERQUOTA_PERSISTENTVOLUMECLAIM_COUNT_USED: Final = ( "openshift.clusterquota.persistentvolumeclaim_count.used" ) """ The current observed total usage of the resource across all projects Instrument: updowncounter Unit: {persistentvolumeclaim} Note: This metric is retrieved from the `Status.Total.Used` field of the [K8s ResourceQuotaStatus](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#resourcequotastatus-v1-core) of the [ClusterResourceQuota](https://docs.redhat.com/en/documentation/openshift_container_platform/4.19/html/schedule_and_quota_apis/clusterresourcequota-quota-openshift-io-v1#status-total). The `k8s.storageclass.name` should be required when a resource quota is defined for a specific storage class. """ def create_openshift_clusterquota_persistentvolumeclaim_count_used( meter: Meter, ) -> UpDownCounter: """The current observed total usage of the resource across all projects""" return meter.create_up_down_counter( name=OPENSHIFT_CLUSTERQUOTA_PERSISTENTVOLUMECLAIM_COUNT_USED, description="The current observed total usage of the resource across all projects.", unit="{persistentvolumeclaim}", ) OPENSHIFT_CLUSTERQUOTA_STORAGE_REQUEST_HARD: Final = ( "openshift.clusterquota.storage.request.hard" ) """ The enforced hard limit of the resource across all projects Instrument: updowncounter Unit: By Note: This metric is retrieved from the `Status.Total.Hard` field of the [K8s ResourceQuotaStatus](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#resourcequotastatus-v1-core) of the [ClusterResourceQuota](https://docs.redhat.com/en/documentation/openshift_container_platform/4.19/html/schedule_and_quota_apis/clusterresourcequota-quota-openshift-io-v1#status-total). The `k8s.storageclass.name` should be required when a resource quota is defined for a specific storage class. """ def create_openshift_clusterquota_storage_request_hard( meter: Meter, ) -> UpDownCounter: """The enforced hard limit of the resource across all projects""" return meter.create_up_down_counter( name=OPENSHIFT_CLUSTERQUOTA_STORAGE_REQUEST_HARD, description="The enforced hard limit of the resource across all projects.", unit="By", ) OPENSHIFT_CLUSTERQUOTA_STORAGE_REQUEST_USED: Final = ( "openshift.clusterquota.storage.request.used" ) """ The current observed total usage of the resource across all projects Instrument: updowncounter Unit: By Note: This metric is retrieved from the `Status.Total.Used` field of the [K8s ResourceQuotaStatus](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#resourcequotastatus-v1-core) of the [ClusterResourceQuota](https://docs.redhat.com/en/documentation/openshift_container_platform/4.19/html/schedule_and_quota_apis/clusterresourcequota-quota-openshift-io-v1#status-total). The `k8s.storageclass.name` should be required when a resource quota is defined for a specific storage class. """ def create_openshift_clusterquota_storage_request_used( meter: Meter, ) -> UpDownCounter: """The current observed total usage of the resource across all projects""" return meter.create_up_down_counter( name=OPENSHIFT_CLUSTERQUOTA_STORAGE_REQUEST_USED, description="The current observed total usage of the resource across all projects.", unit="By", ) otel_metrics.py000066400000000000000000000423301511654350100411340ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/metrics# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Final from opentelemetry.metrics import Counter, Histogram, Meter, UpDownCounter OTEL_SDK_EXPORTER_LOG_EXPORTED: Final = "otel.sdk.exporter.log.exported" """ The number of log records for which the export has finished, either successful or failed Instrument: counter Unit: {log_record} Note: For successful exports, `error.type` MUST NOT be set. For failed exports, `error.type` MUST contain the failure cause. For exporters with partial success semantics (e.g. OTLP with `rejected_log_records`), rejected log records MUST count as failed and only non-rejected log records count as success. If no rejection reason is available, `rejected` SHOULD be used as value for `error.type`. """ def create_otel_sdk_exporter_log_exported(meter: Meter) -> Counter: """The number of log records for which the export has finished, either successful or failed""" return meter.create_counter( name=OTEL_SDK_EXPORTER_LOG_EXPORTED, description="The number of log records for which the export has finished, either successful or failed.", unit="{log_record}", ) OTEL_SDK_EXPORTER_LOG_INFLIGHT: Final = "otel.sdk.exporter.log.inflight" """ The number of log records which were passed to the exporter, but that have not been exported yet (neither successful, nor failed) Instrument: updowncounter Unit: {log_record} Note: For successful exports, `error.type` MUST NOT be set. For failed exports, `error.type` MUST contain the failure cause. """ def create_otel_sdk_exporter_log_inflight(meter: Meter) -> UpDownCounter: """The number of log records which were passed to the exporter, but that have not been exported yet (neither successful, nor failed)""" return meter.create_up_down_counter( name=OTEL_SDK_EXPORTER_LOG_INFLIGHT, description="The number of log records which were passed to the exporter, but that have not been exported yet (neither successful, nor failed).", unit="{log_record}", ) OTEL_SDK_EXPORTER_METRIC_DATA_POINT_EXPORTED: Final = ( "otel.sdk.exporter.metric_data_point.exported" ) """ The number of metric data points for which the export has finished, either successful or failed Instrument: counter Unit: {data_point} Note: For successful exports, `error.type` MUST NOT be set. For failed exports, `error.type` MUST contain the failure cause. For exporters with partial success semantics (e.g. OTLP with `rejected_data_points`), rejected data points MUST count as failed and only non-rejected data points count as success. If no rejection reason is available, `rejected` SHOULD be used as value for `error.type`. """ def create_otel_sdk_exporter_metric_data_point_exported( meter: Meter, ) -> Counter: """The number of metric data points for which the export has finished, either successful or failed""" return meter.create_counter( name=OTEL_SDK_EXPORTER_METRIC_DATA_POINT_EXPORTED, description="The number of metric data points for which the export has finished, either successful or failed.", unit="{data_point}", ) OTEL_SDK_EXPORTER_METRIC_DATA_POINT_INFLIGHT: Final = ( "otel.sdk.exporter.metric_data_point.inflight" ) """ The number of metric data points which were passed to the exporter, but that have not been exported yet (neither successful, nor failed) Instrument: updowncounter Unit: {data_point} Note: For successful exports, `error.type` MUST NOT be set. For failed exports, `error.type` MUST contain the failure cause. """ def create_otel_sdk_exporter_metric_data_point_inflight( meter: Meter, ) -> UpDownCounter: """The number of metric data points which were passed to the exporter, but that have not been exported yet (neither successful, nor failed)""" return meter.create_up_down_counter( name=OTEL_SDK_EXPORTER_METRIC_DATA_POINT_INFLIGHT, description="The number of metric data points which were passed to the exporter, but that have not been exported yet (neither successful, nor failed).", unit="{data_point}", ) OTEL_SDK_EXPORTER_OPERATION_DURATION: Final = ( "otel.sdk.exporter.operation.duration" ) """ The duration of exporting a batch of telemetry records Instrument: histogram Unit: s Note: This metric defines successful operations using the full success definitions for [http](https://github.com/open-telemetry/opentelemetry-proto/blob/v1.5.0/docs/specification.md#full-success-1) and [grpc](https://github.com/open-telemetry/opentelemetry-proto/blob/v1.5.0/docs/specification.md#full-success). Anything else is defined as an unsuccessful operation. For successful operations, `error.type` MUST NOT be set. For unsuccessful export operations, `error.type` MUST contain a relevant failure cause. """ def create_otel_sdk_exporter_operation_duration(meter: Meter) -> Histogram: """The duration of exporting a batch of telemetry records""" return meter.create_histogram( name=OTEL_SDK_EXPORTER_OPERATION_DURATION, description="The duration of exporting a batch of telemetry records.", unit="s", ) OTEL_SDK_EXPORTER_SPAN_EXPORTED: Final = "otel.sdk.exporter.span.exported" """ The number of spans for which the export has finished, either successful or failed Instrument: counter Unit: {span} Note: For successful exports, `error.type` MUST NOT be set. For failed exports, `error.type` MUST contain the failure cause. For exporters with partial success semantics (e.g. OTLP with `rejected_spans`), rejected spans MUST count as failed and only non-rejected spans count as success. If no rejection reason is available, `rejected` SHOULD be used as value for `error.type`. """ def create_otel_sdk_exporter_span_exported(meter: Meter) -> Counter: """The number of spans for which the export has finished, either successful or failed""" return meter.create_counter( name=OTEL_SDK_EXPORTER_SPAN_EXPORTED, description="The number of spans for which the export has finished, either successful or failed.", unit="{span}", ) OTEL_SDK_EXPORTER_SPAN_EXPORTED_COUNT: Final = ( "otel.sdk.exporter.span.exported.count" ) """ Deprecated: Replaced by `otel.sdk.exporter.span.exported`. """ def create_otel_sdk_exporter_span_exported_count( meter: Meter, ) -> UpDownCounter: """Deprecated, use `otel.sdk.exporter.span.exported` instead""" return meter.create_up_down_counter( name=OTEL_SDK_EXPORTER_SPAN_EXPORTED_COUNT, description="Deprecated, use `otel.sdk.exporter.span.exported` instead.", unit="{span}", ) OTEL_SDK_EXPORTER_SPAN_INFLIGHT: Final = "otel.sdk.exporter.span.inflight" """ The number of spans which were passed to the exporter, but that have not been exported yet (neither successful, nor failed) Instrument: updowncounter Unit: {span} Note: For successful exports, `error.type` MUST NOT be set. For failed exports, `error.type` MUST contain the failure cause. """ def create_otel_sdk_exporter_span_inflight(meter: Meter) -> UpDownCounter: """The number of spans which were passed to the exporter, but that have not been exported yet (neither successful, nor failed)""" return meter.create_up_down_counter( name=OTEL_SDK_EXPORTER_SPAN_INFLIGHT, description="The number of spans which were passed to the exporter, but that have not been exported yet (neither successful, nor failed).", unit="{span}", ) OTEL_SDK_EXPORTER_SPAN_INFLIGHT_COUNT: Final = ( "otel.sdk.exporter.span.inflight.count" ) """ Deprecated: Replaced by `otel.sdk.exporter.span.inflight`. """ def create_otel_sdk_exporter_span_inflight_count( meter: Meter, ) -> UpDownCounter: """Deprecated, use `otel.sdk.exporter.span.inflight` instead""" return meter.create_up_down_counter( name=OTEL_SDK_EXPORTER_SPAN_INFLIGHT_COUNT, description="Deprecated, use `otel.sdk.exporter.span.inflight` instead.", unit="{span}", ) OTEL_SDK_LOG_CREATED: Final = "otel.sdk.log.created" """ The number of logs submitted to enabled SDK Loggers Instrument: counter Unit: {log_record} """ def create_otel_sdk_log_created(meter: Meter) -> Counter: """The number of logs submitted to enabled SDK Loggers""" return meter.create_counter( name=OTEL_SDK_LOG_CREATED, description="The number of logs submitted to enabled SDK Loggers.", unit="{log_record}", ) OTEL_SDK_METRIC_READER_COLLECTION_DURATION: Final = ( "otel.sdk.metric_reader.collection.duration" ) """ The duration of the collect operation of the metric reader Instrument: histogram Unit: s Note: For successful collections, `error.type` MUST NOT be set. For failed collections, `error.type` SHOULD contain the failure cause. It can happen that metrics collection is successful for some MetricProducers, while others fail. In that case `error.type` SHOULD be set to any of the failure causes. """ def create_otel_sdk_metric_reader_collection_duration( meter: Meter, ) -> Histogram: """The duration of the collect operation of the metric reader""" return meter.create_histogram( name=OTEL_SDK_METRIC_READER_COLLECTION_DURATION, description="The duration of the collect operation of the metric reader.", unit="s", ) OTEL_SDK_PROCESSOR_LOG_PROCESSED: Final = "otel.sdk.processor.log.processed" """ The number of log records for which the processing has finished, either successful or failed Instrument: counter Unit: {log_record} Note: For successful processing, `error.type` MUST NOT be set. For failed processing, `error.type` MUST contain the failure cause. For the SDK Simple and Batching Log Record Processor a log record is considered to be processed already when it has been submitted to the exporter, not when the corresponding export call has finished. """ def create_otel_sdk_processor_log_processed(meter: Meter) -> Counter: """The number of log records for which the processing has finished, either successful or failed""" return meter.create_counter( name=OTEL_SDK_PROCESSOR_LOG_PROCESSED, description="The number of log records for which the processing has finished, either successful or failed.", unit="{log_record}", ) OTEL_SDK_PROCESSOR_LOG_QUEUE_CAPACITY: Final = ( "otel.sdk.processor.log.queue.capacity" ) """ The maximum number of log records the queue of a given instance of an SDK Log Record processor can hold Instrument: updowncounter Unit: {log_record} Note: Only applies to Log Record processors which use a queue, e.g. the SDK Batching Log Record Processor. """ def create_otel_sdk_processor_log_queue_capacity( meter: Meter, ) -> UpDownCounter: """The maximum number of log records the queue of a given instance of an SDK Log Record processor can hold""" return meter.create_up_down_counter( name=OTEL_SDK_PROCESSOR_LOG_QUEUE_CAPACITY, description="The maximum number of log records the queue of a given instance of an SDK Log Record processor can hold.", unit="{log_record}", ) OTEL_SDK_PROCESSOR_LOG_QUEUE_SIZE: Final = "otel.sdk.processor.log.queue.size" """ The number of log records in the queue of a given instance of an SDK log processor Instrument: updowncounter Unit: {log_record} Note: Only applies to log record processors which use a queue, e.g. the SDK Batching Log Record Processor. """ def create_otel_sdk_processor_log_queue_size(meter: Meter) -> UpDownCounter: """The number of log records in the queue of a given instance of an SDK log processor""" return meter.create_up_down_counter( name=OTEL_SDK_PROCESSOR_LOG_QUEUE_SIZE, description="The number of log records in the queue of a given instance of an SDK log processor.", unit="{log_record}", ) OTEL_SDK_PROCESSOR_SPAN_PROCESSED: Final = "otel.sdk.processor.span.processed" """ The number of spans for which the processing has finished, either successful or failed Instrument: counter Unit: {span} Note: For successful processing, `error.type` MUST NOT be set. For failed processing, `error.type` MUST contain the failure cause. For the SDK Simple and Batching Span Processor a span is considered to be processed already when it has been submitted to the exporter, not when the corresponding export call has finished. """ def create_otel_sdk_processor_span_processed(meter: Meter) -> Counter: """The number of spans for which the processing has finished, either successful or failed""" return meter.create_counter( name=OTEL_SDK_PROCESSOR_SPAN_PROCESSED, description="The number of spans for which the processing has finished, either successful or failed.", unit="{span}", ) OTEL_SDK_PROCESSOR_SPAN_PROCESSED_COUNT: Final = ( "otel.sdk.processor.span.processed.count" ) """ Deprecated: Replaced by `otel.sdk.processor.span.processed`. """ def create_otel_sdk_processor_span_processed_count( meter: Meter, ) -> UpDownCounter: """Deprecated, use `otel.sdk.processor.span.processed` instead""" return meter.create_up_down_counter( name=OTEL_SDK_PROCESSOR_SPAN_PROCESSED_COUNT, description="Deprecated, use `otel.sdk.processor.span.processed` instead.", unit="{span}", ) OTEL_SDK_PROCESSOR_SPAN_QUEUE_CAPACITY: Final = ( "otel.sdk.processor.span.queue.capacity" ) """ The maximum number of spans the queue of a given instance of an SDK span processor can hold Instrument: updowncounter Unit: {span} Note: Only applies to span processors which use a queue, e.g. the SDK Batching Span Processor. """ def create_otel_sdk_processor_span_queue_capacity( meter: Meter, ) -> UpDownCounter: """The maximum number of spans the queue of a given instance of an SDK span processor can hold""" return meter.create_up_down_counter( name=OTEL_SDK_PROCESSOR_SPAN_QUEUE_CAPACITY, description="The maximum number of spans the queue of a given instance of an SDK span processor can hold.", unit="{span}", ) OTEL_SDK_PROCESSOR_SPAN_QUEUE_SIZE: Final = ( "otel.sdk.processor.span.queue.size" ) """ The number of spans in the queue of a given instance of an SDK span processor Instrument: updowncounter Unit: {span} Note: Only applies to span processors which use a queue, e.g. the SDK Batching Span Processor. """ def create_otel_sdk_processor_span_queue_size(meter: Meter) -> UpDownCounter: """The number of spans in the queue of a given instance of an SDK span processor""" return meter.create_up_down_counter( name=OTEL_SDK_PROCESSOR_SPAN_QUEUE_SIZE, description="The number of spans in the queue of a given instance of an SDK span processor.", unit="{span}", ) OTEL_SDK_SPAN_ENDED: Final = "otel.sdk.span.ended" """ Deprecated: Obsoleted. """ def create_otel_sdk_span_ended(meter: Meter) -> Counter: """Use `otel.sdk.span.started` minus `otel.sdk.span.live` to derive this value""" return meter.create_counter( name=OTEL_SDK_SPAN_ENDED, description="Use `otel.sdk.span.started` minus `otel.sdk.span.live` to derive this value.", unit="{span}", ) OTEL_SDK_SPAN_ENDED_COUNT: Final = "otel.sdk.span.ended.count" """ Deprecated: Obsoleted. """ def create_otel_sdk_span_ended_count(meter: Meter) -> Counter: """Use `otel.sdk.span.started` minus `otel.sdk.span.live` to derive this value""" return meter.create_counter( name=OTEL_SDK_SPAN_ENDED_COUNT, description="Use `otel.sdk.span.started` minus `otel.sdk.span.live` to derive this value.", unit="{span}", ) OTEL_SDK_SPAN_LIVE: Final = "otel.sdk.span.live" """ The number of created spans with `recording=true` for which the end operation has not been called yet Instrument: updowncounter Unit: {span} """ def create_otel_sdk_span_live(meter: Meter) -> UpDownCounter: """The number of created spans with `recording=true` for which the end operation has not been called yet""" return meter.create_up_down_counter( name=OTEL_SDK_SPAN_LIVE, description="The number of created spans with `recording=true` for which the end operation has not been called yet.", unit="{span}", ) OTEL_SDK_SPAN_LIVE_COUNT: Final = "otel.sdk.span.live.count" """ Deprecated: Replaced by `otel.sdk.span.live`. """ def create_otel_sdk_span_live_count(meter: Meter) -> UpDownCounter: """Deprecated, use `otel.sdk.span.live` instead""" return meter.create_up_down_counter( name=OTEL_SDK_SPAN_LIVE_COUNT, description="Deprecated, use `otel.sdk.span.live` instead.", unit="{span}", ) OTEL_SDK_SPAN_STARTED: Final = "otel.sdk.span.started" """ The number of created spans Instrument: counter Unit: {span} Note: Implementations MUST record this metric for all spans, even for non-recording ones. """ def create_otel_sdk_span_started(meter: Meter) -> Counter: """The number of created spans""" return meter.create_counter( name=OTEL_SDK_SPAN_STARTED, description="The number of created spans.", unit="{span}", ) process_metrics.py000066400000000000000000000141211511654350100416440ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/metrics# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import ( Callable, Final, Generator, Iterable, Optional, Sequence, Union, ) from opentelemetry.metrics import ( CallbackOptions, Counter, Meter, ObservableGauge, Observation, UpDownCounter, ) # pylint: disable=invalid-name CallbackT = Union[ Callable[[CallbackOptions], Iterable[Observation]], Generator[Iterable[Observation], CallbackOptions, None], ] PROCESS_CONTEXT_SWITCHES: Final = "process.context_switches" """ Number of times the process has been context switched Instrument: counter Unit: {context_switch} """ def create_process_context_switches(meter: Meter) -> Counter: """Number of times the process has been context switched""" return meter.create_counter( name=PROCESS_CONTEXT_SWITCHES, description="Number of times the process has been context switched.", unit="{context_switch}", ) PROCESS_CPU_TIME: Final = "process.cpu.time" """ Total CPU seconds broken down by different states Instrument: counter Unit: s """ def create_process_cpu_time(meter: Meter) -> Counter: """Total CPU seconds broken down by different states""" return meter.create_counter( name=PROCESS_CPU_TIME, description="Total CPU seconds broken down by different states.", unit="s", ) PROCESS_CPU_UTILIZATION: Final = "process.cpu.utilization" """ Difference in process.cpu.time since the last measurement, divided by the elapsed time and number of CPUs available to the process Instrument: gauge Unit: 1 """ def create_process_cpu_utilization( meter: Meter, callbacks: Optional[Sequence[CallbackT]] ) -> ObservableGauge: """Difference in process.cpu.time since the last measurement, divided by the elapsed time and number of CPUs available to the process""" return meter.create_observable_gauge( name=PROCESS_CPU_UTILIZATION, callbacks=callbacks, description="Difference in process.cpu.time since the last measurement, divided by the elapsed time and number of CPUs available to the process.", unit="1", ) PROCESS_DISK_IO: Final = "process.disk.io" """ Disk bytes transferred Instrument: counter Unit: By """ def create_process_disk_io(meter: Meter) -> Counter: """Disk bytes transferred""" return meter.create_counter( name=PROCESS_DISK_IO, description="Disk bytes transferred.", unit="By", ) PROCESS_MEMORY_USAGE: Final = "process.memory.usage" """ The amount of physical memory in use Instrument: updowncounter Unit: By """ def create_process_memory_usage(meter: Meter) -> UpDownCounter: """The amount of physical memory in use""" return meter.create_up_down_counter( name=PROCESS_MEMORY_USAGE, description="The amount of physical memory in use.", unit="By", ) PROCESS_MEMORY_VIRTUAL: Final = "process.memory.virtual" """ The amount of committed virtual memory Instrument: updowncounter Unit: By """ def create_process_memory_virtual(meter: Meter) -> UpDownCounter: """The amount of committed virtual memory""" return meter.create_up_down_counter( name=PROCESS_MEMORY_VIRTUAL, description="The amount of committed virtual memory.", unit="By", ) PROCESS_NETWORK_IO: Final = "process.network.io" """ Network bytes transferred Instrument: counter Unit: By """ def create_process_network_io(meter: Meter) -> Counter: """Network bytes transferred""" return meter.create_counter( name=PROCESS_NETWORK_IO, description="Network bytes transferred.", unit="By", ) PROCESS_OPEN_FILE_DESCRIPTOR_COUNT: Final = ( "process.open_file_descriptor.count" ) """ Number of file descriptors in use by the process Instrument: updowncounter Unit: {file_descriptor} """ def create_process_open_file_descriptor_count(meter: Meter) -> UpDownCounter: """Number of file descriptors in use by the process""" return meter.create_up_down_counter( name=PROCESS_OPEN_FILE_DESCRIPTOR_COUNT, description="Number of file descriptors in use by the process.", unit="{file_descriptor}", ) PROCESS_PAGING_FAULTS: Final = "process.paging.faults" """ Number of page faults the process has made Instrument: counter Unit: {fault} """ def create_process_paging_faults(meter: Meter) -> Counter: """Number of page faults the process has made""" return meter.create_counter( name=PROCESS_PAGING_FAULTS, description="Number of page faults the process has made.", unit="{fault}", ) PROCESS_THREAD_COUNT: Final = "process.thread.count" """ Process threads count Instrument: updowncounter Unit: {thread} """ def create_process_thread_count(meter: Meter) -> UpDownCounter: """Process threads count""" return meter.create_up_down_counter( name=PROCESS_THREAD_COUNT, description="Process threads count.", unit="{thread}", ) PROCESS_UPTIME: Final = "process.uptime" """ The time the process has been running Instrument: gauge Unit: s Note: Instrumentations SHOULD use a gauge with type `double` and measure uptime in seconds as a floating point number with the highest precision available. The actual accuracy would depend on the instrumentation and operating system. """ def create_process_uptime( meter: Meter, callbacks: Optional[Sequence[CallbackT]] ) -> ObservableGauge: """The time the process has been running""" return meter.create_observable_gauge( name=PROCESS_UPTIME, callbacks=callbacks, description="The time the process has been running.", unit="s", ) rpc_metrics.py000066400000000000000000000127511511654350100407610ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/metrics# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Final from opentelemetry.metrics import Histogram, Meter RPC_CLIENT_DURATION: Final = "rpc.client.duration" """ Measures the duration of outbound RPC Instrument: histogram Unit: ms Note: While streaming RPCs may record this metric as start-of-batch to end-of-batch, it's hard to interpret in practice. **Streaming**: N/A. """ def create_rpc_client_duration(meter: Meter) -> Histogram: """Measures the duration of outbound RPC""" return meter.create_histogram( name=RPC_CLIENT_DURATION, description="Measures the duration of outbound RPC.", unit="ms", ) RPC_CLIENT_REQUEST_SIZE: Final = "rpc.client.request.size" """ Measures the size of RPC request messages (uncompressed) Instrument: histogram Unit: By Note: **Streaming**: Recorded per message in a streaming batch. """ def create_rpc_client_request_size(meter: Meter) -> Histogram: """Measures the size of RPC request messages (uncompressed)""" return meter.create_histogram( name=RPC_CLIENT_REQUEST_SIZE, description="Measures the size of RPC request messages (uncompressed).", unit="By", ) RPC_CLIENT_REQUESTS_PER_RPC: Final = "rpc.client.requests_per_rpc" """ Deprecated: Removed, no replacement at this time. """ def create_rpc_client_requests_per_rpc(meter: Meter) -> Histogram: """Measures the number of messages received per RPC""" return meter.create_histogram( name=RPC_CLIENT_REQUESTS_PER_RPC, description="Measures the number of messages received per RPC.", unit="{count}", ) RPC_CLIENT_RESPONSE_SIZE: Final = "rpc.client.response.size" """ Measures the size of RPC response messages (uncompressed) Instrument: histogram Unit: By Note: **Streaming**: Recorded per response in a streaming batch. """ def create_rpc_client_response_size(meter: Meter) -> Histogram: """Measures the size of RPC response messages (uncompressed)""" return meter.create_histogram( name=RPC_CLIENT_RESPONSE_SIZE, description="Measures the size of RPC response messages (uncompressed).", unit="By", ) RPC_CLIENT_RESPONSES_PER_RPC: Final = "rpc.client.responses_per_rpc" """ Deprecated: Removed, no replacement at this time. """ def create_rpc_client_responses_per_rpc(meter: Meter) -> Histogram: """Measures the number of messages sent per RPC""" return meter.create_histogram( name=RPC_CLIENT_RESPONSES_PER_RPC, description="Measures the number of messages sent per RPC.", unit="{count}", ) RPC_SERVER_DURATION: Final = "rpc.server.duration" """ Measures the duration of inbound RPC Instrument: histogram Unit: ms Note: While streaming RPCs may record this metric as start-of-batch to end-of-batch, it's hard to interpret in practice. **Streaming**: N/A. """ def create_rpc_server_duration(meter: Meter) -> Histogram: """Measures the duration of inbound RPC""" return meter.create_histogram( name=RPC_SERVER_DURATION, description="Measures the duration of inbound RPC.", unit="ms", ) RPC_SERVER_REQUEST_SIZE: Final = "rpc.server.request.size" """ Measures the size of RPC request messages (uncompressed) Instrument: histogram Unit: By Note: **Streaming**: Recorded per message in a streaming batch. """ def create_rpc_server_request_size(meter: Meter) -> Histogram: """Measures the size of RPC request messages (uncompressed)""" return meter.create_histogram( name=RPC_SERVER_REQUEST_SIZE, description="Measures the size of RPC request messages (uncompressed).", unit="By", ) RPC_SERVER_REQUESTS_PER_RPC: Final = "rpc.server.requests_per_rpc" """ Deprecated: Removed, no replacement at this time. """ def create_rpc_server_requests_per_rpc(meter: Meter) -> Histogram: """Measures the number of messages received per RPC""" return meter.create_histogram( name=RPC_SERVER_REQUESTS_PER_RPC, description="Measures the number of messages received per RPC.", unit="{count}", ) RPC_SERVER_RESPONSE_SIZE: Final = "rpc.server.response.size" """ Measures the size of RPC response messages (uncompressed) Instrument: histogram Unit: By Note: **Streaming**: Recorded per response in a streaming batch. """ def create_rpc_server_response_size(meter: Meter) -> Histogram: """Measures the size of RPC response messages (uncompressed)""" return meter.create_histogram( name=RPC_SERVER_RESPONSE_SIZE, description="Measures the size of RPC response messages (uncompressed).", unit="By", ) RPC_SERVER_RESPONSES_PER_RPC: Final = "rpc.server.responses_per_rpc" """ Deprecated: Removed, no replacement at this time. """ def create_rpc_server_responses_per_rpc(meter: Meter) -> Histogram: """Measures the number of messages sent per RPC""" return meter.create_histogram( name=RPC_SERVER_RESPONSES_PER_RPC, description="Measures the number of messages sent per RPC.", unit="{count}", ) system_metrics.py000066400000000000000000000505451511654350100415240ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/metrics# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import ( Callable, Final, Generator, Iterable, Optional, Sequence, Union, ) from opentelemetry.metrics import ( CallbackOptions, Counter, Meter, ObservableGauge, Observation, UpDownCounter, ) # pylint: disable=invalid-name CallbackT = Union[ Callable[[CallbackOptions], Iterable[Observation]], Generator[Iterable[Observation], CallbackOptions, None], ] SYSTEM_CPU_FREQUENCY: Final = "system.cpu.frequency" """ Operating frequency of the logical CPU in Hertz Instrument: gauge Unit: Hz """ def create_system_cpu_frequency( meter: Meter, callbacks: Optional[Sequence[CallbackT]] ) -> ObservableGauge: """Operating frequency of the logical CPU in Hertz""" return meter.create_observable_gauge( name=SYSTEM_CPU_FREQUENCY, callbacks=callbacks, description="Operating frequency of the logical CPU in Hertz.", unit="Hz", ) SYSTEM_CPU_LOGICAL_COUNT: Final = "system.cpu.logical.count" """ Reports the number of logical (virtual) processor cores created by the operating system to manage multitasking Instrument: updowncounter Unit: {cpu} Note: Calculated by multiplying the number of sockets by the number of cores per socket, and then by the number of threads per core. """ def create_system_cpu_logical_count(meter: Meter) -> UpDownCounter: """Reports the number of logical (virtual) processor cores created by the operating system to manage multitasking""" return meter.create_up_down_counter( name=SYSTEM_CPU_LOGICAL_COUNT, description="Reports the number of logical (virtual) processor cores created by the operating system to manage multitasking.", unit="{cpu}", ) SYSTEM_CPU_PHYSICAL_COUNT: Final = "system.cpu.physical.count" """ Reports the number of actual physical processor cores on the hardware Instrument: updowncounter Unit: {cpu} Note: Calculated by multiplying the number of sockets by the number of cores per socket. """ def create_system_cpu_physical_count(meter: Meter) -> UpDownCounter: """Reports the number of actual physical processor cores on the hardware""" return meter.create_up_down_counter( name=SYSTEM_CPU_PHYSICAL_COUNT, description="Reports the number of actual physical processor cores on the hardware.", unit="{cpu}", ) SYSTEM_CPU_TIME: Final = "system.cpu.time" """ Seconds each logical CPU spent on each mode Instrument: counter Unit: s """ def create_system_cpu_time(meter: Meter) -> Counter: """Seconds each logical CPU spent on each mode""" return meter.create_counter( name=SYSTEM_CPU_TIME, description="Seconds each logical CPU spent on each mode.", unit="s", ) SYSTEM_CPU_UTILIZATION: Final = "system.cpu.utilization" """ For each logical CPU, the utilization is calculated as the change in cumulative CPU time (cpu.time) over a measurement interval, divided by the elapsed time Instrument: gauge Unit: 1 """ def create_system_cpu_utilization( meter: Meter, callbacks: Optional[Sequence[CallbackT]] ) -> ObservableGauge: """For each logical CPU, the utilization is calculated as the change in cumulative CPU time (cpu.time) over a measurement interval, divided by the elapsed time""" return meter.create_observable_gauge( name=SYSTEM_CPU_UTILIZATION, callbacks=callbacks, description="For each logical CPU, the utilization is calculated as the change in cumulative CPU time (cpu.time) over a measurement interval, divided by the elapsed time.", unit="1", ) SYSTEM_DISK_IO: Final = "system.disk.io" """ Disk bytes transferred Instrument: counter Unit: By """ def create_system_disk_io(meter: Meter) -> Counter: """Disk bytes transferred""" return meter.create_counter( name=SYSTEM_DISK_IO, description="Disk bytes transferred.", unit="By", ) SYSTEM_DISK_IO_TIME: Final = "system.disk.io_time" """ Time disk spent activated Instrument: counter Unit: s Note: The real elapsed time ("wall clock") used in the I/O path (time from operations running in parallel are not counted). Measured as: - Linux: Field 13 from [procfs-diskstats](https://www.kernel.org/doc/Documentation/ABI/testing/procfs-diskstats) - Windows: The complement of ["Disk\\% Idle Time"](https://learn.microsoft.com/archive/blogs/askcore/windows-performance-monitor-disk-counters-explained#windows-performance-monitor-disk-counters-explained) performance counter: `uptime * (100 - "Disk\\% Idle Time") / 100`. """ def create_system_disk_io_time(meter: Meter) -> Counter: """Time disk spent activated""" return meter.create_counter( name=SYSTEM_DISK_IO_TIME, description="Time disk spent activated.", unit="s", ) SYSTEM_DISK_LIMIT: Final = "system.disk.limit" """ The total storage capacity of the disk Instrument: updowncounter Unit: By """ def create_system_disk_limit(meter: Meter) -> UpDownCounter: """The total storage capacity of the disk""" return meter.create_up_down_counter( name=SYSTEM_DISK_LIMIT, description="The total storage capacity of the disk.", unit="By", ) SYSTEM_DISK_MERGED: Final = "system.disk.merged" """ The number of disk reads/writes merged into single physical disk access operations Instrument: counter Unit: {operation} """ def create_system_disk_merged(meter: Meter) -> Counter: """The number of disk reads/writes merged into single physical disk access operations""" return meter.create_counter( name=SYSTEM_DISK_MERGED, description="The number of disk reads/writes merged into single physical disk access operations.", unit="{operation}", ) SYSTEM_DISK_OPERATION_TIME: Final = "system.disk.operation_time" """ Sum of the time each operation took to complete Instrument: counter Unit: s Note: Because it is the sum of time each request took, parallel-issued requests each contribute to make the count grow. Measured as: - Linux: Fields 7 & 11 from [procfs-diskstats](https://www.kernel.org/doc/Documentation/ABI/testing/procfs-diskstats) - Windows: "Avg. Disk sec/Read" perf counter multiplied by "Disk Reads/sec" perf counter (similar for Writes). """ def create_system_disk_operation_time(meter: Meter) -> Counter: """Sum of the time each operation took to complete""" return meter.create_counter( name=SYSTEM_DISK_OPERATION_TIME, description="Sum of the time each operation took to complete.", unit="s", ) SYSTEM_DISK_OPERATIONS: Final = "system.disk.operations" """ Disk operations count Instrument: counter Unit: {operation} """ def create_system_disk_operations(meter: Meter) -> Counter: """Disk operations count""" return meter.create_counter( name=SYSTEM_DISK_OPERATIONS, description="Disk operations count.", unit="{operation}", ) SYSTEM_FILESYSTEM_LIMIT: Final = "system.filesystem.limit" """ The total storage capacity of the filesystem Instrument: updowncounter Unit: By """ def create_system_filesystem_limit(meter: Meter) -> UpDownCounter: """The total storage capacity of the filesystem""" return meter.create_up_down_counter( name=SYSTEM_FILESYSTEM_LIMIT, description="The total storage capacity of the filesystem.", unit="By", ) SYSTEM_FILESYSTEM_USAGE: Final = "system.filesystem.usage" """ Reports a filesystem's space usage across different states Instrument: updowncounter Unit: By Note: The sum of all `system.filesystem.usage` values over the different `system.filesystem.state` attributes SHOULD equal the total storage capacity of the filesystem, that is `system.filesystem.limit`. """ def create_system_filesystem_usage(meter: Meter) -> UpDownCounter: """Reports a filesystem's space usage across different states""" return meter.create_up_down_counter( name=SYSTEM_FILESYSTEM_USAGE, description="Reports a filesystem's space usage across different states.", unit="By", ) SYSTEM_FILESYSTEM_UTILIZATION: Final = "system.filesystem.utilization" """ Fraction of filesystem bytes used Instrument: gauge Unit: 1 """ def create_system_filesystem_utilization( meter: Meter, callbacks: Optional[Sequence[CallbackT]] ) -> ObservableGauge: """Fraction of filesystem bytes used""" return meter.create_observable_gauge( name=SYSTEM_FILESYSTEM_UTILIZATION, callbacks=callbacks, description="Fraction of filesystem bytes used.", unit="1", ) SYSTEM_LINUX_MEMORY_AVAILABLE: Final = "system.linux.memory.available" """ An estimate of how much memory is available for starting new applications, without causing swapping Instrument: updowncounter Unit: By Note: This is an alternative to `system.memory.usage` metric with `state=free`. Linux starting from 3.14 exports "available" memory. It takes "free" memory as a baseline, and then factors in kernel-specific values. This is supposed to be more accurate than just "free" memory. For reference, see the calculations [here](https://superuser.com/a/980821). See also `MemAvailable` in [/proc/meminfo](https://man7.org/linux/man-pages/man5/proc.5.html). """ def create_system_linux_memory_available(meter: Meter) -> UpDownCounter: """An estimate of how much memory is available for starting new applications, without causing swapping""" return meter.create_up_down_counter( name=SYSTEM_LINUX_MEMORY_AVAILABLE, description="An estimate of how much memory is available for starting new applications, without causing swapping.", unit="By", ) SYSTEM_LINUX_MEMORY_SLAB_USAGE: Final = "system.linux.memory.slab.usage" """ Reports the memory used by the Linux kernel for managing caches of frequently used objects Instrument: updowncounter Unit: By Note: The sum over the `reclaimable` and `unreclaimable` state values in `linux.memory.slab.usage` SHOULD be equal to the total slab memory available on the system. Note that the total slab memory is not constant and may vary over time. See also the [Slab allocator](https://blogs.oracle.com/linux/post/understanding-linux-kernel-memory-statistics) and `Slab` in [/proc/meminfo](https://man7.org/linux/man-pages/man5/proc.5.html). """ def create_system_linux_memory_slab_usage(meter: Meter) -> UpDownCounter: """Reports the memory used by the Linux kernel for managing caches of frequently used objects""" return meter.create_up_down_counter( name=SYSTEM_LINUX_MEMORY_SLAB_USAGE, description="Reports the memory used by the Linux kernel for managing caches of frequently used objects.", unit="By", ) SYSTEM_MEMORY_LIMIT: Final = "system.memory.limit" """ Total virtual memory available in the system Instrument: updowncounter Unit: By """ def create_system_memory_limit(meter: Meter) -> UpDownCounter: """Total virtual memory available in the system""" return meter.create_up_down_counter( name=SYSTEM_MEMORY_LIMIT, description="Total virtual memory available in the system.", unit="By", ) SYSTEM_MEMORY_SHARED: Final = "system.memory.shared" """ Shared memory used (mostly by tmpfs) Instrument: updowncounter Unit: By Note: Equivalent of `shared` from [`free` command](https://man7.org/linux/man-pages/man1/free.1.html) or `Shmem` from [`/proc/meminfo`](https://man7.org/linux/man-pages/man5/proc.5.html)". """ def create_system_memory_shared(meter: Meter) -> UpDownCounter: """Shared memory used (mostly by tmpfs)""" return meter.create_up_down_counter( name=SYSTEM_MEMORY_SHARED, description="Shared memory used (mostly by tmpfs).", unit="By", ) SYSTEM_MEMORY_USAGE: Final = "system.memory.usage" """ Reports memory in use by state Instrument: updowncounter Unit: By """ def create_system_memory_usage(meter: Meter) -> UpDownCounter: """Reports memory in use by state""" return meter.create_up_down_counter( name=SYSTEM_MEMORY_USAGE, description="Reports memory in use by state.", unit="By", ) SYSTEM_MEMORY_UTILIZATION: Final = "system.memory.utilization" """ Percentage of memory bytes in use Instrument: gauge Unit: 1 """ def create_system_memory_utilization( meter: Meter, callbacks: Optional[Sequence[CallbackT]] ) -> ObservableGauge: """Percentage of memory bytes in use""" return meter.create_observable_gauge( name=SYSTEM_MEMORY_UTILIZATION, callbacks=callbacks, description="Percentage of memory bytes in use.", unit="1", ) SYSTEM_NETWORK_CONNECTION_COUNT: Final = "system.network.connection.count" """ The number of connections Instrument: updowncounter Unit: {connection} """ def create_system_network_connection_count(meter: Meter) -> UpDownCounter: """The number of connections""" return meter.create_up_down_counter( name=SYSTEM_NETWORK_CONNECTION_COUNT, description="The number of connections.", unit="{connection}", ) SYSTEM_NETWORK_CONNECTIONS: Final = "system.network.connections" """ Deprecated: Replaced by `system.network.connection.count`. """ def create_system_network_connections(meter: Meter) -> UpDownCounter: """Deprecated, use `system.network.connection.count` instead""" return meter.create_up_down_counter( name=SYSTEM_NETWORK_CONNECTIONS, description="Deprecated, use `system.network.connection.count` instead.", unit="{connection}", ) SYSTEM_NETWORK_DROPPED: Final = "system.network.dropped" """ Deprecated: Replaced by `system.network.packet.dropped`. """ def create_system_network_dropped(meter: Meter) -> Counter: """Count of packets that are dropped or discarded even though there was no error""" return meter.create_counter( name=SYSTEM_NETWORK_DROPPED, description="Count of packets that are dropped or discarded even though there was no error.", unit="{packet}", ) SYSTEM_NETWORK_ERRORS: Final = "system.network.errors" """ Count of network errors detected Instrument: counter Unit: {error} Note: Measured as: - Linux: the `errs` column in `/proc/net/dev` ([source](https://web.archive.org/web/20180321091318/http://www.onlamp.com/pub/a/linux/2000/11/16/LinuxAdmin.html)). - Windows: [`InErrors`/`OutErrors`](https://docs.microsoft.com/windows/win32/api/netioapi/ns-netioapi-mib_if_row2) from [`GetIfEntry2`](https://docs.microsoft.com/windows/win32/api/netioapi/nf-netioapi-getifentry2). """ def create_system_network_errors(meter: Meter) -> Counter: """Count of network errors detected""" return meter.create_counter( name=SYSTEM_NETWORK_ERRORS, description="Count of network errors detected.", unit="{error}", ) SYSTEM_NETWORK_IO: Final = "system.network.io" """ The number of bytes transmitted and received Instrument: counter Unit: By """ def create_system_network_io(meter: Meter) -> Counter: """The number of bytes transmitted and received""" return meter.create_counter( name=SYSTEM_NETWORK_IO, description="The number of bytes transmitted and received.", unit="By", ) SYSTEM_NETWORK_PACKET_COUNT: Final = "system.network.packet.count" """ The number of packets transferred Instrument: counter Unit: {packet} """ def create_system_network_packet_count(meter: Meter) -> Counter: """The number of packets transferred""" return meter.create_counter( name=SYSTEM_NETWORK_PACKET_COUNT, description="The number of packets transferred.", unit="{packet}", ) SYSTEM_NETWORK_PACKET_DROPPED: Final = "system.network.packet.dropped" """ Count of packets that are dropped or discarded even though there was no error Instrument: counter Unit: {packet} Note: Measured as: - Linux: the `drop` column in `/proc/net/dev` ([source](https://web.archive.org/web/20180321091318/http://www.onlamp.com/pub/a/linux/2000/11/16/LinuxAdmin.html)) - Windows: [`InDiscards`/`OutDiscards`](https://docs.microsoft.com/windows/win32/api/netioapi/ns-netioapi-mib_if_row2) from [`GetIfEntry2`](https://docs.microsoft.com/windows/win32/api/netioapi/nf-netioapi-getifentry2). """ def create_system_network_packet_dropped(meter: Meter) -> Counter: """Count of packets that are dropped or discarded even though there was no error""" return meter.create_counter( name=SYSTEM_NETWORK_PACKET_DROPPED, description="Count of packets that are dropped or discarded even though there was no error.", unit="{packet}", ) SYSTEM_NETWORK_PACKETS: Final = "system.network.packets" """ Deprecated: Replaced by `system.network.packet.count`. """ def create_system_network_packets(meter: Meter) -> Counter: """The number of packets transferred""" return meter.create_counter( name=SYSTEM_NETWORK_PACKETS, description="The number of packets transferred.", unit="{packet}", ) SYSTEM_PAGING_FAULTS: Final = "system.paging.faults" """ The number of page faults Instrument: counter Unit: {fault} """ def create_system_paging_faults(meter: Meter) -> Counter: """The number of page faults""" return meter.create_counter( name=SYSTEM_PAGING_FAULTS, description="The number of page faults.", unit="{fault}", ) SYSTEM_PAGING_OPERATIONS: Final = "system.paging.operations" """ The number of paging operations Instrument: counter Unit: {operation} """ def create_system_paging_operations(meter: Meter) -> Counter: """The number of paging operations""" return meter.create_counter( name=SYSTEM_PAGING_OPERATIONS, description="The number of paging operations.", unit="{operation}", ) SYSTEM_PAGING_USAGE: Final = "system.paging.usage" """ Unix swap or windows pagefile usage Instrument: updowncounter Unit: By """ def create_system_paging_usage(meter: Meter) -> UpDownCounter: """Unix swap or windows pagefile usage""" return meter.create_up_down_counter( name=SYSTEM_PAGING_USAGE, description="Unix swap or windows pagefile usage.", unit="By", ) SYSTEM_PAGING_UTILIZATION: Final = "system.paging.utilization" """ Swap (unix) or pagefile (windows) utilization Instrument: gauge Unit: 1 """ def create_system_paging_utilization( meter: Meter, callbacks: Optional[Sequence[CallbackT]] ) -> ObservableGauge: """Swap (unix) or pagefile (windows) utilization""" return meter.create_observable_gauge( name=SYSTEM_PAGING_UTILIZATION, callbacks=callbacks, description="Swap (unix) or pagefile (windows) utilization.", unit="1", ) SYSTEM_PROCESS_COUNT: Final = "system.process.count" """ Total number of processes in each state Instrument: updowncounter Unit: {process} """ def create_system_process_count(meter: Meter) -> UpDownCounter: """Total number of processes in each state""" return meter.create_up_down_counter( name=SYSTEM_PROCESS_COUNT, description="Total number of processes in each state.", unit="{process}", ) SYSTEM_PROCESS_CREATED: Final = "system.process.created" """ Total number of processes created over uptime of the host Instrument: counter Unit: {process} """ def create_system_process_created(meter: Meter) -> Counter: """Total number of processes created over uptime of the host""" return meter.create_counter( name=SYSTEM_PROCESS_CREATED, description="Total number of processes created over uptime of the host.", unit="{process}", ) SYSTEM_UPTIME: Final = "system.uptime" """ The time the system has been running Instrument: gauge Unit: s Note: Instrumentations SHOULD use a gauge with type `double` and measure uptime in seconds as a floating point number with the highest precision available. The actual accuracy would depend on the instrumentation and operating system. """ def create_system_uptime( meter: Meter, callbacks: Optional[Sequence[CallbackT]] ) -> ObservableGauge: """The time the system has been running""" return meter.create_observable_gauge( name=SYSTEM_UPTIME, callbacks=callbacks, description="The time the system has been running.", unit="s", ) vcs_metrics.py000066400000000000000000000176731511654350100410000ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-semantic-conventions/src/opentelemetry/semconv/_incubating/metrics# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import ( Callable, Final, Generator, Iterable, Optional, Sequence, Union, ) from opentelemetry.metrics import ( CallbackOptions, Meter, ObservableGauge, Observation, UpDownCounter, ) # pylint: disable=invalid-name CallbackT = Union[ Callable[[CallbackOptions], Iterable[Observation]], Generator[Iterable[Observation], CallbackOptions, None], ] VCS_CHANGE_COUNT: Final = "vcs.change.count" """ The number of changes (pull requests/merge requests/changelists) in a repository, categorized by their state (e.g. open or merged) Instrument: updowncounter Unit: {change} """ def create_vcs_change_count(meter: Meter) -> UpDownCounter: """The number of changes (pull requests/merge requests/changelists) in a repository, categorized by their state (e.g. open or merged)""" return meter.create_up_down_counter( name=VCS_CHANGE_COUNT, description="The number of changes (pull requests/merge requests/changelists) in a repository, categorized by their state (e.g. open or merged).", unit="{change}", ) VCS_CHANGE_DURATION: Final = "vcs.change.duration" """ The time duration a change (pull request/merge request/changelist) has been in a given state Instrument: gauge Unit: s """ def create_vcs_change_duration( meter: Meter, callbacks: Optional[Sequence[CallbackT]] ) -> ObservableGauge: """The time duration a change (pull request/merge request/changelist) has been in a given state""" return meter.create_observable_gauge( name=VCS_CHANGE_DURATION, callbacks=callbacks, description="The time duration a change (pull request/merge request/changelist) has been in a given state.", unit="s", ) VCS_CHANGE_TIME_TO_APPROVAL: Final = "vcs.change.time_to_approval" """ The amount of time since its creation it took a change (pull request/merge request/changelist) to get the first approval Instrument: gauge Unit: s """ def create_vcs_change_time_to_approval( meter: Meter, callbacks: Optional[Sequence[CallbackT]] ) -> ObservableGauge: """The amount of time since its creation it took a change (pull request/merge request/changelist) to get the first approval""" return meter.create_observable_gauge( name=VCS_CHANGE_TIME_TO_APPROVAL, callbacks=callbacks, description="The amount of time since its creation it took a change (pull request/merge request/changelist) to get the first approval.", unit="s", ) VCS_CHANGE_TIME_TO_MERGE: Final = "vcs.change.time_to_merge" """ The amount of time since its creation it took a change (pull request/merge request/changelist) to get merged into the target(base) ref Instrument: gauge Unit: s """ def create_vcs_change_time_to_merge( meter: Meter, callbacks: Optional[Sequence[CallbackT]] ) -> ObservableGauge: """The amount of time since its creation it took a change (pull request/merge request/changelist) to get merged into the target(base) ref""" return meter.create_observable_gauge( name=VCS_CHANGE_TIME_TO_MERGE, callbacks=callbacks, description="The amount of time since its creation it took a change (pull request/merge request/changelist) to get merged into the target(base) ref.", unit="s", ) VCS_CONTRIBUTOR_COUNT: Final = "vcs.contributor.count" """ The number of unique contributors to a repository Instrument: gauge Unit: {contributor} """ def create_vcs_contributor_count( meter: Meter, callbacks: Optional[Sequence[CallbackT]] ) -> ObservableGauge: """The number of unique contributors to a repository""" return meter.create_observable_gauge( name=VCS_CONTRIBUTOR_COUNT, callbacks=callbacks, description="The number of unique contributors to a repository.", unit="{contributor}", ) VCS_REF_COUNT: Final = "vcs.ref.count" """ The number of refs of type branch or tag in a repository Instrument: updowncounter Unit: {ref} """ def create_vcs_ref_count(meter: Meter) -> UpDownCounter: """The number of refs of type branch or tag in a repository""" return meter.create_up_down_counter( name=VCS_REF_COUNT, description="The number of refs of type branch or tag in a repository.", unit="{ref}", ) VCS_REF_LINES_DELTA: Final = "vcs.ref.lines_delta" """ The number of lines added/removed in a ref (branch) relative to the ref from the `vcs.ref.base.name` attribute Instrument: gauge Unit: {line} Note: This metric should be reported for each `vcs.line_change.type` value. For example if a ref added 3 lines and removed 2 lines, instrumentation SHOULD report two measurements: 3 and 2 (both positive numbers). If number of lines added/removed should be calculated from the start of time, then `vcs.ref.base.name` SHOULD be set to an empty string. """ def create_vcs_ref_lines_delta( meter: Meter, callbacks: Optional[Sequence[CallbackT]] ) -> ObservableGauge: """The number of lines added/removed in a ref (branch) relative to the ref from the `vcs.ref.base.name` attribute""" return meter.create_observable_gauge( name=VCS_REF_LINES_DELTA, callbacks=callbacks, description="The number of lines added/removed in a ref (branch) relative to the ref from the `vcs.ref.base.name` attribute.", unit="{line}", ) VCS_REF_REVISIONS_DELTA: Final = "vcs.ref.revisions_delta" """ The number of revisions (commits) a ref (branch) is ahead/behind the branch from the `vcs.ref.base.name` attribute Instrument: gauge Unit: {revision} Note: This metric should be reported for each `vcs.revision_delta.direction` value. For example if branch `a` is 3 commits behind and 2 commits ahead of `trunk`, instrumentation SHOULD report two measurements: 3 and 2 (both positive numbers) and `vcs.ref.base.name` is set to `trunk`. """ def create_vcs_ref_revisions_delta( meter: Meter, callbacks: Optional[Sequence[CallbackT]] ) -> ObservableGauge: """The number of revisions (commits) a ref (branch) is ahead/behind the branch from the `vcs.ref.base.name` attribute""" return meter.create_observable_gauge( name=VCS_REF_REVISIONS_DELTA, callbacks=callbacks, description="The number of revisions (commits) a ref (branch) is ahead/behind the branch from the `vcs.ref.base.name` attribute.", unit="{revision}", ) VCS_REF_TIME: Final = "vcs.ref.time" """ Time a ref (branch) created from the default branch (trunk) has existed. The `ref.type` attribute will always be `branch` Instrument: gauge Unit: s """ def create_vcs_ref_time( meter: Meter, callbacks: Optional[Sequence[CallbackT]] ) -> ObservableGauge: """Time a ref (branch) created from the default branch (trunk) has existed. The `ref.type` attribute will always be `branch`""" return meter.create_observable_gauge( name=VCS_REF_TIME, callbacks=callbacks, description="Time a ref (branch) created from the default branch (trunk) has existed. The `ref.type` attribute will always be `branch`.", unit="s", ) VCS_REPOSITORY_COUNT: Final = "vcs.repository.count" """ The number of repositories in an organization Instrument: updowncounter Unit: {repository} """ def create_vcs_repository_count(meter: Meter) -> UpDownCounter: """The number of repositories in an organization""" return meter.create_up_down_counter( name=VCS_REPOSITORY_COUNT, description="The number of repositories in an organization.", unit="{repository}", ) python-opentelemetry-1.39.1/opentelemetry-semantic-conventions/src/opentelemetry/semconv/attributes/000077500000000000000000000000001511654350100344045ustar00rootroot00000000000000__init__.py000066400000000000000000000000001511654350100364240ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-semantic-conventions/src/opentelemetry/semconv/attributesclient_attributes.py000066400000000000000000000023541511654350100404270ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-semantic-conventions/src/opentelemetry/semconv/attributes# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Final CLIENT_ADDRESS: Final = "client.address" """ Client address - domain name if available without reverse DNS lookup; otherwise, IP address or Unix domain socket name. Note: When observed from the server side, and when communicating through an intermediary, `client.address` SHOULD represent the client address behind any intermediaries, for example proxies, if it's available. """ CLIENT_PORT: Final = "client.port" """ Client port number. Note: When observed from the server side, and when communicating through an intermediary, `client.port` SHOULD represent the client port behind any intermediaries, for example proxies, if it's available. """ code_attributes.py000066400000000000000000000064451511654350100400700ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-semantic-conventions/src/opentelemetry/semconv/attributes# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Final CODE_COLUMN_NUMBER: Final = "code.column.number" """ The column number in `code.file.path` best representing the operation. It SHOULD point within the code unit named in `code.function.name`. This attribute MUST NOT be used on the Profile signal since the data is already captured in 'message Line'. This constraint is imposed to prevent redundancy and maintain data integrity. """ CODE_FILE_PATH: Final = "code.file.path" """ The source code file name that identifies the code unit as uniquely as possible (preferably an absolute file path). This attribute MUST NOT be used on the Profile signal since the data is already captured in 'message Function'. This constraint is imposed to prevent redundancy and maintain data integrity. """ CODE_FUNCTION_NAME: Final = "code.function.name" """ The method or function fully-qualified name without arguments. The value should fit the natural representation of the language runtime, which is also likely the same used within `code.stacktrace` attribute value. This attribute MUST NOT be used on the Profile signal since the data is already captured in 'message Function'. This constraint is imposed to prevent redundancy and maintain data integrity. Note: Values and format depends on each language runtime, thus it is impossible to provide an exhaustive list of examples. The values are usually the same (or prefixes of) the ones found in native stack trace representation stored in `code.stacktrace` without information on arguments. Examples: * Java method: `com.example.MyHttpService.serveRequest` * Java anonymous class method: `com.mycompany.Main$1.myMethod` * Java lambda method: `com.mycompany.Main$$Lambda/0x0000748ae4149c00.myMethod` * PHP function: `GuzzleHttp\\Client::transfer` * Go function: `github.com/my/repo/pkg.foo.func5` * Elixir: `OpenTelemetry.Ctx.new` * Erlang: `opentelemetry_ctx:new` * Rust: `playground::my_module::my_cool_func` * C function: `fopen`. """ CODE_LINE_NUMBER: Final = "code.line.number" """ The line number in `code.file.path` best representing the operation. It SHOULD point within the code unit named in `code.function.name`. This attribute MUST NOT be used on the Profile signal since the data is already captured in 'message Line'. This constraint is imposed to prevent redundancy and maintain data integrity. """ CODE_STACKTRACE: Final = "code.stacktrace" """ A stacktrace as a string in the natural representation for the language runtime. The representation is identical to [`exception.stacktrace`](/docs/exceptions/exceptions-spans.md#stacktrace-representation). This attribute MUST NOT be used on the Profile signal since the data is already captured in 'message Location'. This constraint is imposed to prevent redundancy and maintain data integrity. """ db_attributes.py000066400000000000000000000133661511654350100375430ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-semantic-conventions/src/opentelemetry/semconv/attributes# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from enum import Enum from typing import Final DB_COLLECTION_NAME: Final = "db.collection.name" """ The name of a collection (table, container) within the database. Note: It is RECOMMENDED to capture the value as provided by the application without attempting to do any case normalization. The collection name SHOULD NOT be extracted from `db.query.text`, when the database system supports query text with multiple collections in non-batch operations. For batch operations, if the individual operations are known to have the same collection name then that collection name SHOULD be used. """ DB_NAMESPACE: Final = "db.namespace" """ The name of the database, fully qualified within the server address and port. Note: If a database system has multiple namespace components, they SHOULD be concatenated from the most general to the most specific namespace component, using `|` as a separator between the components. Any missing components (and their associated separators) SHOULD be omitted. Semantic conventions for individual database systems SHOULD document what `db.namespace` means in the context of that system. It is RECOMMENDED to capture the value as provided by the application without attempting to do any case normalization. """ DB_OPERATION_BATCH_SIZE: Final = "db.operation.batch.size" """ The number of queries included in a batch operation. Note: Operations are only considered batches when they contain two or more operations, and so `db.operation.batch.size` SHOULD never be `1`. """ DB_OPERATION_NAME: Final = "db.operation.name" """ The name of the operation or command being executed. Note: It is RECOMMENDED to capture the value as provided by the application without attempting to do any case normalization. The operation name SHOULD NOT be extracted from `db.query.text`, when the database system supports query text with multiple operations in non-batch operations. If spaces can occur in the operation name, multiple consecutive spaces SHOULD be normalized to a single space. For batch operations, if the individual operations are known to have the same operation name then that operation name SHOULD be used prepended by `BATCH `, otherwise `db.operation.name` SHOULD be `BATCH` or some other database system specific term if more applicable. """ DB_QUERY_SUMMARY: Final = "db.query.summary" """ Low cardinality summary of a database query. Note: The query summary describes a class of database queries and is useful as a grouping key, especially when analyzing telemetry for database calls involving complex queries. Summary may be available to the instrumentation through instrumentation hooks or other means. If it is not available, instrumentations that support query parsing SHOULD generate a summary following [Generating query summary](/docs/database/database-spans.md#generating-a-summary-of-the-query) section. """ DB_QUERY_TEXT: Final = "db.query.text" """ The database query being executed. Note: For sanitization see [Sanitization of `db.query.text`](/docs/database/database-spans.md#sanitization-of-dbquerytext). For batch operations, if the individual operations are known to have the same query text then that query text SHOULD be used, otherwise all of the individual query texts SHOULD be concatenated with separator `; ` or some other database system specific separator if more applicable. Parameterized query text SHOULD NOT be sanitized. Even though parameterized query text can potentially have sensitive data, by using a parameterized query the user is giving a strong signal that any sensitive data will be passed as parameter values, and the benefit to observability of capturing the static part of the query text by default outweighs the risk. """ DB_RESPONSE_STATUS_CODE: Final = "db.response.status_code" """ Database response status code. Note: The status code returned by the database. Usually it represents an error code, but may also represent partial success, warning, or differentiate between various types of successful outcomes. Semantic conventions for individual database systems SHOULD document what `db.response.status_code` means in the context of that system. """ DB_STORED_PROCEDURE_NAME: Final = "db.stored_procedure.name" """ The name of a stored procedure within the database. Note: It is RECOMMENDED to capture the value as provided by the application without attempting to do any case normalization. For batch operations, if the individual operations are known to have the same stored procedure name then that stored procedure name SHOULD be used. """ DB_SYSTEM_NAME: Final = "db.system.name" """ The database management system (DBMS) product as identified by the client instrumentation. Note: The actual DBMS may differ from the one identified by the client. For example, when using PostgreSQL client libraries to connect to a CockroachDB, the `db.system.name` is set to `postgresql` based on the instrumentation's best knowledge. """ class DbSystemNameValues(Enum): MARIADB = "mariadb" """[MariaDB](https://mariadb.org/).""" MICROSOFT_SQL_SERVER = "microsoft.sql_server" """[Microsoft SQL Server](https://www.microsoft.com/sql-server).""" MYSQL = "mysql" """[MySQL](https://www.mysql.com/).""" POSTGRESQL = "postgresql" """[PostgreSQL](https://www.postgresql.org/).""" error_attributes.py000066400000000000000000000034501511654350100403000ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-semantic-conventions/src/opentelemetry/semconv/attributes# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from enum import Enum from typing import Final ERROR_TYPE: Final = "error.type" """ Describes a class of error the operation ended with. Note: The `error.type` SHOULD be predictable, and SHOULD have low cardinality. When `error.type` is set to a type (e.g., an exception type), its canonical class name identifying the type within the artifact SHOULD be used. Instrumentations SHOULD document the list of errors they report. The cardinality of `error.type` within one instrumentation library SHOULD be low. Telemetry consumers that aggregate data from multiple instrumentation libraries and applications should be prepared for `error.type` to have high cardinality at query time when no additional filters are applied. If the operation has completed successfully, instrumentations SHOULD NOT set `error.type`. If a specific domain defines its own set of error identifiers (such as HTTP or gRPC status codes), it's RECOMMENDED to: - Use a domain-specific attribute - Set `error.type` to capture all errors, regardless of whether they are defined within the domain-specific set or not. """ class ErrorTypeValues(Enum): OTHER = "_OTHER" """A fallback error value to be used when the instrumentation doesn't define a custom value.""" exception_attributes.py000066400000000000000000000024361511654350100411500ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-semantic-conventions/src/opentelemetry/semconv/attributes# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Final EXCEPTION_ESCAPED: Final = "exception.escaped" """ Deprecated: It's no longer recommended to record exceptions that are handled and do not escape the scope of a span. """ EXCEPTION_MESSAGE: Final = "exception.message" """ The exception message. """ EXCEPTION_STACKTRACE: Final = "exception.stacktrace" """ A stacktrace as a string in the natural representation for the language runtime. The representation is to be determined and documented by each language SIG. """ EXCEPTION_TYPE: Final = "exception.type" """ The type of the exception (its fully-qualified class name, if applicable). The dynamic type of the exception should be preferred over the static type in languages that support it. """ http_attributes.py000066400000000000000000000153201511654350100401250ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-semantic-conventions/src/opentelemetry/semconv/attributes# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from enum import Enum from typing import Final HTTP_REQUEST_HEADER_TEMPLATE: Final = "http.request.header" """ HTTP request headers, `` being the normalized HTTP Header name (lowercase), the value being the header values. Note: Instrumentations SHOULD require an explicit configuration of which headers are to be captured. Including all request headers can be a security risk - explicit configuration helps avoid leaking sensitive information. The `User-Agent` header is already captured in the `user_agent.original` attribute. Users MAY explicitly configure instrumentations to capture them even though it is not recommended. The attribute value MUST consist of either multiple header values as an array of strings or a single-item array containing a possibly comma-concatenated string, depending on the way the HTTP library provides access to headers. Examples: - A header `Content-Type: application/json` SHOULD be recorded as the `http.request.header.content-type` attribute with value `["application/json"]`. - A header `X-Forwarded-For: 1.2.3.4, 1.2.3.5` SHOULD be recorded as the `http.request.header.x-forwarded-for` attribute with value `["1.2.3.4", "1.2.3.5"]` or `["1.2.3.4, 1.2.3.5"]` depending on the HTTP library. """ HTTP_REQUEST_METHOD: Final = "http.request.method" """ HTTP request method. Note: HTTP request method value SHOULD be "known" to the instrumentation. By default, this convention defines "known" methods as the ones listed in [RFC9110](https://www.rfc-editor.org/rfc/rfc9110.html#name-methods), the PATCH method defined in [RFC5789](https://www.rfc-editor.org/rfc/rfc5789.html) and the QUERY method defined in [httpbis-safe-method-w-body](https://datatracker.ietf.org/doc/draft-ietf-httpbis-safe-method-w-body/?include_text=1). If the HTTP request method is not known to instrumentation, it MUST set the `http.request.method` attribute to `_OTHER`. If the HTTP instrumentation could end up converting valid HTTP request methods to `_OTHER`, then it MUST provide a way to override the list of known HTTP methods. If this override is done via environment variable, then the environment variable MUST be named OTEL_INSTRUMENTATION_HTTP_KNOWN_METHODS and support a comma-separated list of case-sensitive known HTTP methods (this list MUST be a full override of the default known method, it is not a list of known methods in addition to the defaults). HTTP method names are case-sensitive and `http.request.method` attribute value MUST match a known HTTP method name exactly. Instrumentations for specific web frameworks that consider HTTP methods to be case insensitive, SHOULD populate a canonical equivalent. Tracing instrumentations that do so, MUST also set `http.request.method_original` to the original value. """ HTTP_REQUEST_METHOD_ORIGINAL: Final = "http.request.method_original" """ Original HTTP method sent by the client in the request line. """ HTTP_REQUEST_RESEND_COUNT: Final = "http.request.resend_count" """ The ordinal number of request resending attempt (for any reason, including redirects). Note: The resend count SHOULD be updated each time an HTTP request gets resent by the client, regardless of what was the cause of the resending (e.g. redirection, authorization failure, 503 Server Unavailable, network issues, or any other). """ HTTP_RESPONSE_HEADER_TEMPLATE: Final = "http.response.header" """ HTTP response headers, `` being the normalized HTTP Header name (lowercase), the value being the header values. Note: Instrumentations SHOULD require an explicit configuration of which headers are to be captured. Including all response headers can be a security risk - explicit configuration helps avoid leaking sensitive information. Users MAY explicitly configure instrumentations to capture them even though it is not recommended. The attribute value MUST consist of either multiple header values as an array of strings or a single-item array containing a possibly comma-concatenated string, depending on the way the HTTP library provides access to headers. Examples: - A header `Content-Type: application/json` header SHOULD be recorded as the `http.request.response.content-type` attribute with value `["application/json"]`. - A header `My-custom-header: abc, def` header SHOULD be recorded as the `http.response.header.my-custom-header` attribute with value `["abc", "def"]` or `["abc, def"]` depending on the HTTP library. """ HTTP_RESPONSE_STATUS_CODE: Final = "http.response.status_code" """ [HTTP response status code](https://tools.ietf.org/html/rfc7231#section-6). """ HTTP_ROUTE: Final = "http.route" """ The matched route template for the request. This MUST be low-cardinality and include all static path segments, with dynamic path segments represented with placeholders. Note: MUST NOT be populated when this is not supported by the HTTP server framework as the route attribute should have low-cardinality and the URI path can NOT substitute it. SHOULD include the [application root](/docs/http/http-spans.md#http-server-definitions) if there is one. A static path segment is a part of the route template with a fixed, low-cardinality value. This includes literal strings like `/users/` and placeholders that are constrained to a finite, predefined set of values, e.g. `{controller}` or `{action}`. A dynamic path segment is a placeholder for a value that can have high cardinality and is not constrained to a predefined list like static path segments. Instrumentations SHOULD use routing information provided by the corresponding web framework. They SHOULD pick the most precise source of routing information and MAY support custom route formatting. Instrumentations SHOULD document the format and the API used to obtain the route string. """ class HttpRequestMethodValues(Enum): CONNECT = "CONNECT" """CONNECT method.""" DELETE = "DELETE" """DELETE method.""" GET = "GET" """GET method.""" HEAD = "HEAD" """HEAD method.""" OPTIONS = "OPTIONS" """OPTIONS method.""" PATCH = "PATCH" """PATCH method.""" POST = "POST" """POST method.""" PUT = "PUT" """PUT method.""" TRACE = "TRACE" """TRACE method.""" OTHER = "_OTHER" """Any HTTP method that the instrumentation has no prior knowledge of.""" network_attributes.py000066400000000000000000000052571511654350100406470ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-semantic-conventions/src/opentelemetry/semconv/attributes# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from enum import Enum from typing import Final NETWORK_LOCAL_ADDRESS: Final = "network.local.address" """ Local address of the network connection - IP address or Unix domain socket name. """ NETWORK_LOCAL_PORT: Final = "network.local.port" """ Local port number of the network connection. """ NETWORK_PEER_ADDRESS: Final = "network.peer.address" """ Peer address of the network connection - IP address or Unix domain socket name. """ NETWORK_PEER_PORT: Final = "network.peer.port" """ Peer port number of the network connection. """ NETWORK_PROTOCOL_NAME: Final = "network.protocol.name" """ [OSI application layer](https://wikipedia.org/wiki/Application_layer) or non-OSI equivalent. Note: The value SHOULD be normalized to lowercase. """ NETWORK_PROTOCOL_VERSION: Final = "network.protocol.version" """ The actual version of the protocol used for network communication. Note: If protocol version is subject to negotiation (for example using [ALPN](https://www.rfc-editor.org/rfc/rfc7301.html)), this attribute SHOULD be set to the negotiated version. If the actual protocol version is not known, this attribute SHOULD NOT be set. """ NETWORK_TRANSPORT: Final = "network.transport" """ [OSI transport layer](https://wikipedia.org/wiki/Transport_layer) or [inter-process communication method](https://wikipedia.org/wiki/Inter-process_communication). Note: The value SHOULD be normalized to lowercase. Consider always setting the transport when setting a port number, since a port number is ambiguous without knowing the transport. For example different processes could be listening on TCP port 12345 and UDP port 12345. """ NETWORK_TYPE: Final = "network.type" """ [OSI network layer](https://wikipedia.org/wiki/Network_layer) or non-OSI equivalent. Note: The value SHOULD be normalized to lowercase. """ class NetworkTransportValues(Enum): TCP = "tcp" """TCP.""" UDP = "udp" """UDP.""" PIPE = "pipe" """Named or anonymous pipe.""" UNIX = "unix" """Unix domain socket.""" QUIC = "quic" """QUIC.""" class NetworkTypeValues(Enum): IPV4 = "ipv4" """IPv4.""" IPV6 = "ipv6" """IPv6.""" otel_attributes.py000066400000000000000000000025771511654350100401230ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-semantic-conventions/src/opentelemetry/semconv/attributes# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from enum import Enum from typing import Final OTEL_SCOPE_NAME: Final = "otel.scope.name" """ The name of the instrumentation scope - (`InstrumentationScope.Name` in OTLP). """ OTEL_SCOPE_VERSION: Final = "otel.scope.version" """ The version of the instrumentation scope - (`InstrumentationScope.Version` in OTLP). """ OTEL_STATUS_CODE: Final = "otel.status_code" """ Name of the code, either "OK" or "ERROR". MUST NOT be set if the status code is UNSET. """ OTEL_STATUS_DESCRIPTION: Final = "otel.status_description" """ Description of the Status if it has a value, otherwise not set. """ class OtelStatusCodeValues(Enum): OK = "OK" """The operation has been validated by an Application developer or Operator to have completed successfully.""" ERROR = "ERROR" """The operation contains an error.""" server_attributes.py000066400000000000000000000023401511654350100404520ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-semantic-conventions/src/opentelemetry/semconv/attributes# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Final SERVER_ADDRESS: Final = "server.address" """ Server domain name if available without reverse DNS lookup; otherwise, IP address or Unix domain socket name. Note: When observed from the client side, and when communicating through an intermediary, `server.address` SHOULD represent the server address behind any intermediaries, for example proxies, if it's available. """ SERVER_PORT: Final = "server.port" """ Server port number. Note: When observed from the client side, and when communicating through an intermediary, `server.port` SHOULD represent the server port behind any intermediaries, for example proxies, if it's available. """ service_attributes.py000066400000000000000000000022201511654350100406010ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-semantic-conventions/src/opentelemetry/semconv/attributes# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Final SERVICE_NAME: Final = "service.name" """ Logical name of the service. Note: MUST be the same for all instances of horizontally scaled services. If the value was not specified, SDKs MUST fallback to `unknown_service:` concatenated with [`process.executable.name`](process.md), e.g. `unknown_service:bash`. If `process.executable.name` is not available, the value MUST be set to `unknown_service`. """ SERVICE_VERSION: Final = "service.version" """ The version string of the service API or implementation. The format is not defined by these conventions. """ telemetry_attributes.py000066400000000000000000000036121511654350100411610ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-semantic-conventions/src/opentelemetry/semconv/attributes# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from enum import Enum from typing import Final TELEMETRY_SDK_LANGUAGE: Final = "telemetry.sdk.language" """ The language of the telemetry SDK. """ TELEMETRY_SDK_NAME: Final = "telemetry.sdk.name" """ The name of the telemetry SDK as defined above. Note: The OpenTelemetry SDK MUST set the `telemetry.sdk.name` attribute to `opentelemetry`. If another SDK, like a fork or a vendor-provided implementation, is used, this SDK MUST set the `telemetry.sdk.name` attribute to the fully-qualified class or module name of this SDK's main entry point or another suitable identifier depending on the language. The identifier `opentelemetry` is reserved and MUST NOT be used in this case. All custom identifiers SHOULD be stable across different versions of an implementation. """ TELEMETRY_SDK_VERSION: Final = "telemetry.sdk.version" """ The version string of the telemetry SDK. """ class TelemetrySdkLanguageValues(Enum): CPP = "cpp" """cpp.""" DOTNET = "dotnet" """dotnet.""" ERLANG = "erlang" """erlang.""" GO = "go" """go.""" JAVA = "java" """java.""" NODEJS = "nodejs" """nodejs.""" PHP = "php" """php.""" PYTHON = "python" """python.""" RUBY = "ruby" """ruby.""" RUST = "rust" """rust.""" SWIFT = "swift" """swift.""" WEBJS = "webjs" """webjs.""" url_attributes.py000066400000000000000000000072061511654350100377540ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-semantic-conventions/src/opentelemetry/semconv/attributes# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Final URL_FRAGMENT: Final = "url.fragment" """ The [URI fragment](https://www.rfc-editor.org/rfc/rfc3986#section-3.5) component. """ URL_FULL: Final = "url.full" """ Absolute URL describing a network resource according to [RFC3986](https://www.rfc-editor.org/rfc/rfc3986). Note: For network calls, URL usually has `scheme://host[:port][path][?query][#fragment]` format, where the fragment is not transmitted over HTTP, but if it is known, it SHOULD be included nevertheless. `url.full` MUST NOT contain credentials passed via URL in form of `https://username:password@www.example.com/`. In such case username and password SHOULD be redacted and attribute's value SHOULD be `https://REDACTED:REDACTED@www.example.com/`. `url.full` SHOULD capture the absolute URL when it is available (or can be reconstructed). Sensitive content provided in `url.full` SHOULD be scrubbed when instrumentations can identify it. ![Development](https://img.shields.io/badge/-development-blue) Query string values for the following keys SHOULD be redacted by default and replaced by the value `REDACTED`: * [`AWSAccessKeyId`](https://docs.aws.amazon.com/AmazonS3/latest/userguide/RESTAuthentication.html#RESTAuthenticationQueryStringAuth) * [`Signature`](https://docs.aws.amazon.com/AmazonS3/latest/userguide/RESTAuthentication.html#RESTAuthenticationQueryStringAuth) * [`sig`](https://learn.microsoft.com/azure/storage/common/storage-sas-overview#sas-token) * [`X-Goog-Signature`](https://cloud.google.com/storage/docs/access-control/signed-urls) This list is subject to change over time. When a query string value is redacted, the query string key SHOULD still be preserved, e.g. `https://www.example.com/path?color=blue&sig=REDACTED`. """ URL_PATH: Final = "url.path" """ The [URI path](https://www.rfc-editor.org/rfc/rfc3986#section-3.3) component. Note: Sensitive content provided in `url.path` SHOULD be scrubbed when instrumentations can identify it. """ URL_QUERY: Final = "url.query" """ The [URI query](https://www.rfc-editor.org/rfc/rfc3986#section-3.4) component. Note: Sensitive content provided in `url.query` SHOULD be scrubbed when instrumentations can identify it. ![Development](https://img.shields.io/badge/-development-blue) Query string values for the following keys SHOULD be redacted by default and replaced by the value `REDACTED`: * [`AWSAccessKeyId`](https://docs.aws.amazon.com/AmazonS3/latest/userguide/RESTAuthentication.html#RESTAuthenticationQueryStringAuth) * [`Signature`](https://docs.aws.amazon.com/AmazonS3/latest/userguide/RESTAuthentication.html#RESTAuthenticationQueryStringAuth) * [`sig`](https://learn.microsoft.com/azure/storage/common/storage-sas-overview#sas-token) * [`X-Goog-Signature`](https://cloud.google.com/storage/docs/access-control/signed-urls) This list is subject to change over time. When a query string value is redacted, the query string key SHOULD still be preserved, e.g. `q=OpenTelemetry&sig=REDACTED`. """ URL_SCHEME: Final = "url.scheme" """ The [URI scheme](https://www.rfc-editor.org/rfc/rfc3986#section-3.1) component identifying the used protocol. """ user_agent_attributes.py000066400000000000000000000014261511654350100413040ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-semantic-conventions/src/opentelemetry/semconv/attributes# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Final USER_AGENT_ORIGINAL: Final = "user_agent.original" """ Value of the [HTTP User-Agent](https://www.rfc-editor.org/rfc/rfc9110.html#field.user-agent) header sent by the client. """ python-opentelemetry-1.39.1/opentelemetry-semantic-conventions/src/opentelemetry/semconv/metrics/000077500000000000000000000000001511654350100336645ustar00rootroot00000000000000__init__.py000066400000000000000000000132601511654350100357200ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-semantic-conventions/src/opentelemetry/semconv/metrics# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing_extensions import deprecated @deprecated( "Use metrics defined in the :py:const:`opentelemetry.semconv.metrics` and :py:const:`opentelemetry.semconv._incubating.metrics` modules instead. Deprecated since version 1.25.0.", ) class MetricInstruments: SCHEMA_URL = "https://opentelemetry.io/schemas/1.21.0" """ The URL of the OpenTelemetry schema for these keys and values. """ HTTP_SERVER_DURATION = "http.server.duration" """ Measures the duration of inbound HTTP requests Instrument: histogram Unit: s """ HTTP_SERVER_ACTIVE_REQUESTS = "http.server.active_requests" """ Measures the number of concurrent HTTP requests that are currently in-flight Instrument: updowncounter Unit: {request} """ HTTP_SERVER_REQUEST_SIZE = "http.server.request.size" """ Measures the size of HTTP request messages (compressed) Instrument: histogram Unit: By """ HTTP_SERVER_RESPONSE_SIZE = "http.server.response.size" """ Measures the size of HTTP response messages (compressed) Instrument: histogram Unit: By """ HTTP_CLIENT_DURATION = "http.client.duration" """ Measures the duration of outbound HTTP requests Instrument: histogram Unit: s """ HTTP_CLIENT_REQUEST_SIZE = "http.client.request.size" """ Measures the size of HTTP request messages (compressed) Instrument: histogram Unit: By """ HTTP_CLIENT_RESPONSE_SIZE = "http.client.response.size" """ Measures the size of HTTP response messages (compressed) Instrument: histogram Unit: By """ PROCESS_RUNTIME_JVM_MEMORY_INIT = "process.runtime.jvm.memory.init" """ Measure of initial memory requested Instrument: updowncounter Unit: By """ PROCESS_RUNTIME_JVM_SYSTEM_CPU_UTILIZATION = ( "process.runtime.jvm.system.cpu.utilization" ) """ Recent CPU utilization for the whole system as reported by the JVM Instrument: gauge Unit: 1 """ PROCESS_RUNTIME_JVM_SYSTEM_CPU_LOAD_1M = ( "process.runtime.jvm.system.cpu.load_1m" ) """ Average CPU load of the whole system for the last minute as reported by the JVM Instrument: gauge Unit: 1 """ PROCESS_RUNTIME_JVM_BUFFER_USAGE = "process.runtime.jvm.buffer.usage" """ Measure of memory used by buffers Instrument: updowncounter Unit: By """ PROCESS_RUNTIME_JVM_BUFFER_LIMIT = "process.runtime.jvm.buffer.limit" """ Measure of total memory capacity of buffers Instrument: updowncounter Unit: By """ PROCESS_RUNTIME_JVM_BUFFER_COUNT = "process.runtime.jvm.buffer.count" """ Number of buffers in the pool Instrument: updowncounter Unit: {buffer} """ PROCESS_RUNTIME_JVM_MEMORY_USAGE = "process.runtime.jvm.memory.usage" """ Measure of memory used Instrument: updowncounter Unit: By """ PROCESS_RUNTIME_JVM_MEMORY_COMMITTED = ( "process.runtime.jvm.memory.committed" ) """ Measure of memory committed Instrument: updowncounter Unit: By """ PROCESS_RUNTIME_JVM_MEMORY_LIMIT = "process.runtime.jvm.memory.limit" """ Measure of max obtainable memory Instrument: updowncounter Unit: By """ PROCESS_RUNTIME_JVM_MEMORY_USAGE_AFTER_LAST_GC = ( "process.runtime.jvm.memory.usage_after_last_gc" ) """ Measure of memory used, as measured after the most recent garbage collection event on this pool Instrument: updowncounter Unit: By """ PROCESS_RUNTIME_JVM_GC_DURATION = "process.runtime.jvm.gc.duration" """ Duration of JVM garbage collection actions Instrument: histogram Unit: s """ PROCESS_RUNTIME_JVM_THREADS_COUNT = "process.runtime.jvm.threads.count" """ Number of executing platform threads Instrument: updowncounter Unit: {thread} """ PROCESS_RUNTIME_JVM_CLASSES_LOADED = "process.runtime.jvm.classes.loaded" """ Number of classes loaded since JVM start Instrument: counter Unit: {class} """ PROCESS_RUNTIME_JVM_CLASSES_UNLOADED = ( "process.runtime.jvm.classes.unloaded" ) """ Number of classes unloaded since JVM start Instrument: counter Unit: {class} """ PROCESS_RUNTIME_JVM_CLASSES_CURRENT_LOADED = ( "process.runtime.jvm.classes.current_loaded" ) """ Number of classes currently loaded Instrument: updowncounter Unit: {class} """ PROCESS_RUNTIME_JVM_CPU_TIME = "process.runtime.jvm.cpu.time" """ CPU time used by the process as reported by the JVM Instrument: counter Unit: s """ PROCESS_RUNTIME_JVM_CPU_RECENT_UTILIZATION = ( "process.runtime.jvm.cpu.recent_utilization" ) """ Recent CPU utilization for the process as reported by the JVM Instrument: gauge Unit: 1 """ # Manually defined metrics DB_CLIENT_CONNECTIONS_USAGE = "db.client.connections.usage" """ The number of connections that are currently in state described by the `state` attribute Instrument: UpDownCounter Unit: {connection} """ db_metrics.py000066400000000000000000000014671511654350100363020ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-semantic-conventions/src/opentelemetry/semconv/metrics# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Final DB_CLIENT_OPERATION_DURATION: Final = "db.client.operation.duration" """ Duration of database client operations Instrument: histogram Unit: s Note: Batch operations SHOULD be recorded as a single operation. """ http_metrics.py000066400000000000000000000015761511654350100366750ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-semantic-conventions/src/opentelemetry/semconv/metrics# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Final HTTP_CLIENT_REQUEST_DURATION: Final = "http.client.request.duration" """ Duration of HTTP client requests Instrument: histogram Unit: s """ HTTP_SERVER_REQUEST_DURATION: Final = "http.server.request.duration" """ Duration of HTTP server requests Instrument: histogram Unit: s """ python-opentelemetry-1.39.1/opentelemetry-semantic-conventions/src/opentelemetry/semconv/py.typed000066400000000000000000000000001511654350100337030ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-semantic-conventions/src/opentelemetry/semconv/resource/000077500000000000000000000000001511654350100340455ustar00rootroot00000000000000__init__.py000066400000000000000000001003221511654350100360750ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-semantic-conventions/src/opentelemetry/semconv/resource# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # pylint: disable=too-many-lines from enum import Enum from typing_extensions import deprecated @deprecated( "Use attributes defined in the :py:const:`opentelemetry.semconv.attributes` and :py:const:`opentelemetry.semconv._incubating.attributes` modules instead. Deprecated since version 1.25.0.", ) class ResourceAttributes: SCHEMA_URL = "https://opentelemetry.io/schemas/1.21.0" """ The URL of the OpenTelemetry schema for these keys and values. """ BROWSER_BRANDS = "browser.brands" """ Array of brand name and version separated by a space. Note: This value is intended to be taken from the [UA client hints API](https://wicg.github.io/ua-client-hints/#interface) (`navigator.userAgentData.brands`). """ BROWSER_PLATFORM = "browser.platform" """ The platform on which the browser is running. Note: This value is intended to be taken from the [UA client hints API](https://wicg.github.io/ua-client-hints/#interface) (`navigator.userAgentData.platform`). If unavailable, the legacy `navigator.platform` API SHOULD NOT be used instead and this attribute SHOULD be left unset in order for the values to be consistent. The list of possible values is defined in the [W3C User-Agent Client Hints specification](https://wicg.github.io/ua-client-hints/#sec-ch-ua-platform). Note that some (but not all) of these values can overlap with values in the [`os.type` and `os.name` attributes](./os.md). However, for consistency, the values in the `browser.platform` attribute should capture the exact value that the user agent provides. """ BROWSER_MOBILE = "browser.mobile" """ A boolean that is true if the browser is running on a mobile device. Note: This value is intended to be taken from the [UA client hints API](https://wicg.github.io/ua-client-hints/#interface) (`navigator.userAgentData.mobile`). If unavailable, this attribute SHOULD be left unset. """ BROWSER_LANGUAGE = "browser.language" """ Preferred language of the user using the browser. Note: This value is intended to be taken from the Navigator API `navigator.language`. """ USER_AGENT_ORIGINAL = "user_agent.original" """ Full user-agent string provided by the browser. Note: The user-agent value SHOULD be provided only from browsers that do not have a mechanism to retrieve brands and platform individually from the User-Agent Client Hints API. To retrieve the value, the legacy `navigator.userAgent` API can be used. """ CLOUD_PROVIDER = "cloud.provider" """ Name of the cloud provider. """ CLOUD_ACCOUNT_ID = "cloud.account.id" """ The cloud account ID the resource is assigned to. """ CLOUD_REGION = "cloud.region" """ The geographical region the resource is running. Note: Refer to your provider's docs to see the available regions, for example [Alibaba Cloud regions](https://www.alibabacloud.com/help/doc-detail/40654.htm), [AWS regions](https://aws.amazon.com/about-aws/global-infrastructure/regions_az/), [Azure regions](https://azure.microsoft.com/en-us/global-infrastructure/geographies/), [Google Cloud regions](https://cloud.google.com/about/locations), or [Tencent Cloud regions](https://www.tencentcloud.com/document/product/213/6091). """ CLOUD_RESOURCE_ID = "cloud.resource_id" """ Cloud provider-specific native identifier of the monitored cloud resource (e.g. an [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) on AWS, a [fully qualified resource ID](https://learn.microsoft.com/en-us/rest/api/resources/resources/get-by-id) on Azure, a [full resource name](https://cloud.google.com/apis/design/resource_names#full_resource_name) on GCP). Note: On some cloud providers, it may not be possible to determine the full ID at startup, so it may be necessary to set `cloud.resource_id` as a span attribute instead. The exact value to use for `cloud.resource_id` depends on the cloud provider. The following well-known definitions MUST be used if you set this attribute and they apply: * **AWS Lambda:** The function [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html). Take care not to use the "invoked ARN" directly but replace any [alias suffix](https://docs.aws.amazon.com/lambda/latest/dg/configuration-aliases.html) with the resolved function version, as the same runtime instance may be invokable with multiple different aliases. * **GCP:** The [URI of the resource](https://cloud.google.com/iam/docs/full-resource-names) * **Azure:** The [Fully Qualified Resource ID](https://docs.microsoft.com/en-us/rest/api/resources/resources/get-by-id) of the invoked function, *not* the function app, having the form `/subscriptions//resourceGroups//providers/Microsoft.Web/sites//functions/`. This means that a span attribute MUST be used, as an Azure function app can host multiple functions that would usually share a TracerProvider. """ CLOUD_AVAILABILITY_ZONE = "cloud.availability_zone" """ Cloud regions often have multiple, isolated locations known as zones to increase availability. Availability zone represents the zone where the resource is running. Note: Availability zones are called "zones" on Alibaba Cloud and Google Cloud. """ CLOUD_PLATFORM = "cloud.platform" """ The cloud platform in use. Note: The prefix of the service SHOULD match the one specified in `cloud.provider`. """ AWS_ECS_CONTAINER_ARN = "aws.ecs.container.arn" """ The Amazon Resource Name (ARN) of an [ECS container instance](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_instances.html). """ AWS_ECS_CLUSTER_ARN = "aws.ecs.cluster.arn" """ The ARN of an [ECS cluster](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/clusters.html). """ AWS_ECS_LAUNCHTYPE = "aws.ecs.launchtype" """ The [launch type](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/launch_types.html) for an ECS task. """ AWS_ECS_TASK_ARN = "aws.ecs.task.arn" """ The ARN of an [ECS task definition](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html). """ AWS_ECS_TASK_FAMILY = "aws.ecs.task.family" """ The task definition family this task definition is a member of. """ AWS_ECS_TASK_REVISION = "aws.ecs.task.revision" """ The revision for this task definition. """ AWS_EKS_CLUSTER_ARN = "aws.eks.cluster.arn" """ The ARN of an EKS cluster. """ AWS_LOG_GROUP_NAMES = "aws.log.group.names" """ The name(s) of the AWS log group(s) an application is writing to. Note: Multiple log groups must be supported for cases like multi-container applications, where a single application has sidecar containers, and each write to their own log group. """ AWS_LOG_GROUP_ARNS = "aws.log.group.arns" """ The Amazon Resource Name(s) (ARN) of the AWS log group(s). Note: See the [log group ARN format documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format). """ AWS_LOG_STREAM_NAMES = "aws.log.stream.names" """ The name(s) of the AWS log stream(s) an application is writing to. """ AWS_LOG_STREAM_ARNS = "aws.log.stream.arns" """ The ARN(s) of the AWS log stream(s). Note: See the [log stream ARN format documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format). One log group can contain several log streams, so these ARNs necessarily identify both a log group and a log stream. """ GCP_CLOUD_RUN_JOB_EXECUTION = "gcp.cloud_run.job.execution" """ The name of the Cloud Run [execution](https://cloud.google.com/run/docs/managing/job-executions) being run for the Job, as set by the [`CLOUD_RUN_EXECUTION`](https://cloud.google.com/run/docs/container-contract#jobs-env-vars) environment variable. """ GCP_CLOUD_RUN_JOB_TASK_INDEX = "gcp.cloud_run.job.task_index" """ The index for a task within an execution as provided by the [`CLOUD_RUN_TASK_INDEX`](https://cloud.google.com/run/docs/container-contract#jobs-env-vars) environment variable. """ GCP_GCE_INSTANCE_NAME = "gcp.gce.instance.name" """ The instance name of a GCE instance. This is the value provided by `host.name`, the visible name of the instance in the Cloud Console UI, and the prefix for the default hostname of the instance as defined by the [default internal DNS name](https://cloud.google.com/compute/docs/internal-dns#instance-fully-qualified-domain-names). """ GCP_GCE_INSTANCE_HOSTNAME = "gcp.gce.instance.hostname" """ The hostname of a GCE instance. This is the full value of the default or [custom hostname](https://cloud.google.com/compute/docs/instances/custom-hostname-vm). """ HEROKU_RELEASE_CREATION_TIMESTAMP = "heroku.release.creation_timestamp" """ Time and date the release was created. """ HEROKU_RELEASE_COMMIT = "heroku.release.commit" """ Commit hash for the current release. """ HEROKU_APP_ID = "heroku.app.id" """ Unique identifier for the application. """ CONTAINER_NAME = "container.name" """ Container name used by container runtime. """ CONTAINER_ID = "container.id" """ Container ID. Usually a UUID, as for example used to [identify Docker containers](https://docs.docker.com/engine/reference/run/#container-identification). The UUID might be abbreviated. """ CONTAINER_RUNTIME = "container.runtime" """ The container runtime managing this container. """ CONTAINER_IMAGE_NAME = "container.image.name" """ Name of the image the container was built on. """ CONTAINER_IMAGE_TAG = "container.image.tag" """ Container image tag. """ CONTAINER_IMAGE_ID = "container.image.id" """ Runtime specific image identifier. Usually a hash algorithm followed by a UUID. Note: Docker defines a sha256 of the image id; `container.image.id` corresponds to the `Image` field from the Docker container inspect [API](https://docs.docker.com/engine/api/v1.43/#tag/Container/operation/ContainerInspect) endpoint. K8s defines a link to the container registry repository with digest `"imageID": "registry.azurecr.io /namespace/service/dockerfile@sha256:bdeabd40c3a8a492eaf9e8e44d0ebbb84bac7ee25ac0cf8a7159d25f62555625"`. OCI defines a digest of manifest. """ CONTAINER_COMMAND = "container.command" """ The command used to run the container (i.e. the command name). Note: If using embedded credentials or sensitive data, it is recommended to remove them to prevent potential leakage. """ CONTAINER_COMMAND_LINE = "container.command_line" """ The full command run by the container as a single string representing the full command. [2]. """ CONTAINER_COMMAND_ARGS = "container.command_args" """ All the command arguments (including the command/executable itself) run by the container. [2]. """ DEPLOYMENT_ENVIRONMENT = "deployment.environment" """ Name of the [deployment environment](https://en.wikipedia.org/wiki/Deployment_environment) (aka deployment tier). """ DEVICE_ID = "device.id" """ A unique identifier representing the device. Note: The device identifier MUST only be defined using the values outlined below. This value is not an advertising identifier and MUST NOT be used as such. On iOS (Swift or Objective-C), this value MUST be equal to the [vendor identifier](https://developer.apple.com/documentation/uikit/uidevice/1620059-identifierforvendor). On Android (Java or Kotlin), this value MUST be equal to the Firebase Installation ID or a globally unique UUID which is persisted across sessions in your application. More information can be found [here](https://developer.android.com/training/articles/user-data-ids) on best practices and exact implementation details. Caution should be taken when storing personal data or anything which can identify a user. GDPR and data protection laws may apply, ensure you do your own due diligence. """ DEVICE_MODEL_IDENTIFIER = "device.model.identifier" """ The model identifier for the device. Note: It's recommended this value represents a machine readable version of the model identifier rather than the market or consumer-friendly name of the device. """ DEVICE_MODEL_NAME = "device.model.name" """ The marketing name for the device model. Note: It's recommended this value represents a human readable version of the device model rather than a machine readable alternative. """ DEVICE_MANUFACTURER = "device.manufacturer" """ The name of the device manufacturer. Note: The Android OS provides this field via [Build](https://developer.android.com/reference/android/os/Build#MANUFACTURER). iOS apps SHOULD hardcode the value `Apple`. """ FAAS_NAME = "faas.name" """ The name of the single function that this runtime instance executes. Note: This is the name of the function as configured/deployed on the FaaS platform and is usually different from the name of the callback function (which may be stored in the [`code.namespace`/`code.function`](/docs/general/general-attributes.md#source-code-attributes) span attributes). For some cloud providers, the above definition is ambiguous. The following definition of function name MUST be used for this attribute (and consequently the span name) for the listed cloud providers/products: * **Azure:** The full name `/`, i.e., function app name followed by a forward slash followed by the function name (this form can also be seen in the resource JSON for the function). This means that a span attribute MUST be used, as an Azure function app can host multiple functions that would usually share a TracerProvider (see also the `cloud.resource_id` attribute). """ FAAS_VERSION = "faas.version" """ The immutable version of the function being executed. Note: Depending on the cloud provider and platform, use: * **AWS Lambda:** The [function version](https://docs.aws.amazon.com/lambda/latest/dg/configuration-versions.html) (an integer represented as a decimal string). * **Google Cloud Run (Services):** The [revision](https://cloud.google.com/run/docs/managing/revisions) (i.e., the function name plus the revision suffix). * **Google Cloud Functions:** The value of the [`K_REVISION` environment variable](https://cloud.google.com/functions/docs/env-var#runtime_environment_variables_set_automatically). * **Azure Functions:** Not applicable. Do not set this attribute. """ FAAS_INSTANCE = "faas.instance" """ The execution environment ID as a string, that will be potentially reused for other invocations to the same function/function version. Note: * **AWS Lambda:** Use the (full) log stream name. """ FAAS_MAX_MEMORY = "faas.max_memory" """ The amount of memory available to the serverless function converted to Bytes. Note: It's recommended to set this attribute since e.g. too little memory can easily stop a Java AWS Lambda function from working correctly. On AWS Lambda, the environment variable `AWS_LAMBDA_FUNCTION_MEMORY_SIZE` provides this information (which must be multiplied by 1,048,576). """ HOST_ID = "host.id" """ Unique host ID. For Cloud, this must be the instance_id assigned by the cloud provider. For non-containerized systems, this should be the `machine-id`. See the table below for the sources to use to determine the `machine-id` based on operating system. """ HOST_NAME = "host.name" """ Name of the host. On Unix systems, it may contain what the hostname command returns, or the fully qualified hostname, or another name specified by the user. """ HOST_TYPE = "host.type" """ Type of host. For Cloud, this must be the machine type. """ HOST_ARCH = "host.arch" """ The CPU architecture the host system is running on. """ HOST_IMAGE_NAME = "host.image.name" """ Name of the VM image or OS install the host was instantiated from. """ HOST_IMAGE_ID = "host.image.id" """ VM image ID or host OS image ID. For Cloud, this value is from the provider. """ HOST_IMAGE_VERSION = "host.image.version" """ The version string of the VM image or host OS as defined in [Version Attributes](README.md#version-attributes). """ K8S_CLUSTER_NAME = "k8s.cluster.name" """ The name of the cluster. """ K8S_CLUSTER_UID = "k8s.cluster.uid" """ A pseudo-ID for the cluster, set to the UID of the `kube-system` namespace. Note: K8s does not have support for obtaining a cluster ID. If this is ever added, we will recommend collecting the `k8s.cluster.uid` through the official APIs. In the meantime, we are able to use the `uid` of the `kube-system` namespace as a proxy for cluster ID. Read on for the rationale. Every object created in a K8s cluster is assigned a distinct UID. The `kube-system` namespace is used by Kubernetes itself and will exist for the lifetime of the cluster. Using the `uid` of the `kube-system` namespace is a reasonable proxy for the K8s ClusterID as it will only change if the cluster is rebuilt. Furthermore, Kubernetes UIDs are UUIDs as standardized by [ISO/IEC 9834-8 and ITU-T X.667](https://www.itu.int/ITU-T/studygroups/com17/oid.html). Which states: > If generated according to one of the mechanisms defined in Rec. ITU-T X.667 | ISO/IEC 9834-8, a UUID is either guaranteed to be different from all other UUIDs generated before 3603 A.D., or is extremely likely to be different (depending on the mechanism chosen). Therefore, UIDs between clusters should be extremely unlikely to conflict. """ K8S_NODE_NAME = "k8s.node.name" """ The name of the Node. """ K8S_NODE_UID = "k8s.node.uid" """ The UID of the Node. """ K8S_NAMESPACE_NAME = "k8s.namespace.name" """ The name of the namespace that the pod is running in. """ K8S_POD_UID = "k8s.pod.uid" """ The UID of the Pod. """ K8S_POD_NAME = "k8s.pod.name" """ The name of the Pod. """ K8S_CONTAINER_NAME = "k8s.container.name" """ The name of the Container from Pod specification, must be unique within a Pod. Container runtime usually uses different globally unique name (`container.name`). """ K8S_CONTAINER_RESTART_COUNT = "k8s.container.restart_count" """ Number of times the container was restarted. This attribute can be used to identify a particular container (running or stopped) within a container spec. """ K8S_REPLICASET_UID = "k8s.replicaset.uid" """ The UID of the ReplicaSet. """ K8S_REPLICASET_NAME = "k8s.replicaset.name" """ The name of the ReplicaSet. """ K8S_DEPLOYMENT_UID = "k8s.deployment.uid" """ The UID of the Deployment. """ K8S_DEPLOYMENT_NAME = "k8s.deployment.name" """ The name of the Deployment. """ K8S_STATEFULSET_UID = "k8s.statefulset.uid" """ The UID of the StatefulSet. """ K8S_STATEFULSET_NAME = "k8s.statefulset.name" """ The name of the StatefulSet. """ K8S_DAEMONSET_UID = "k8s.daemonset.uid" """ The UID of the DaemonSet. """ K8S_DAEMONSET_NAME = "k8s.daemonset.name" """ The name of the DaemonSet. """ K8S_JOB_UID = "k8s.job.uid" """ The UID of the Job. """ K8S_JOB_NAME = "k8s.job.name" """ The name of the Job. """ K8S_CRONJOB_UID = "k8s.cronjob.uid" """ The UID of the CronJob. """ K8S_CRONJOB_NAME = "k8s.cronjob.name" """ The name of the CronJob. """ OS_TYPE = "os.type" """ The operating system type. """ OS_DESCRIPTION = "os.description" """ Human readable (not intended to be parsed) OS version information, like e.g. reported by `ver` or `lsb_release -a` commands. """ OS_NAME = "os.name" """ Human readable operating system name. """ OS_VERSION = "os.version" """ The version string of the operating system as defined in [Version Attributes](/docs/resource/README.md#version-attributes). """ PROCESS_PID = "process.pid" """ Process identifier (PID). """ PROCESS_PARENT_PID = "process.parent_pid" """ Parent Process identifier (PID). """ PROCESS_EXECUTABLE_NAME = "process.executable.name" """ The name of the process executable. On Linux based systems, can be set to the `Name` in `proc/[pid]/status`. On Windows, can be set to the base name of `GetProcessImageFileNameW`. """ PROCESS_EXECUTABLE_PATH = "process.executable.path" """ The full path to the process executable. On Linux based systems, can be set to the target of `proc/[pid]/exe`. On Windows, can be set to the result of `GetProcessImageFileNameW`. """ PROCESS_COMMAND = "process.command" """ The command used to launch the process (i.e. the command name). On Linux based systems, can be set to the zeroth string in `proc/[pid]/cmdline`. On Windows, can be set to the first parameter extracted from `GetCommandLineW`. """ PROCESS_COMMAND_LINE = "process.command_line" """ The full command used to launch the process as a single string representing the full command. On Windows, can be set to the result of `GetCommandLineW`. Do not set this if you have to assemble it just for monitoring; use `process.command_args` instead. """ PROCESS_COMMAND_ARGS = "process.command_args" """ All the command arguments (including the command/executable itself) as received by the process. On Linux-based systems (and some other Unixoid systems supporting procfs), can be set according to the list of null-delimited strings extracted from `proc/[pid]/cmdline`. For libc-based executables, this would be the full argv vector passed to `main`. """ PROCESS_OWNER = "process.owner" """ The username of the user that owns the process. """ PROCESS_RUNTIME_NAME = "process.runtime.name" """ The name of the runtime of this process. For compiled native binaries, this SHOULD be the name of the compiler. """ PROCESS_RUNTIME_VERSION = "process.runtime.version" """ The version of the runtime of this process, as returned by the runtime without modification. """ PROCESS_RUNTIME_DESCRIPTION = "process.runtime.description" """ An additional description about the runtime of the process, for example a specific vendor customization of the runtime environment. """ SERVICE_NAME = "service.name" """ Logical name of the service. Note: MUST be the same for all instances of horizontally scaled services. If the value was not specified, SDKs MUST fallback to `unknown_service:` concatenated with [`process.executable.name`](process.md#process), e.g. `unknown_service:bash`. If `process.executable.name` is not available, the value MUST be set to `unknown_service`. """ SERVICE_VERSION = "service.version" """ The version string of the service API or implementation. The format is not defined by these conventions. """ SERVICE_NAMESPACE = "service.namespace" """ A namespace for `service.name`. Note: A string value having a meaning that helps to distinguish a group of services, for example the team name that owns a group of services. `service.name` is expected to be unique within the same namespace. If `service.namespace` is not specified in the Resource then `service.name` is expected to be unique for all services that have no explicit namespace defined (so the empty/unspecified namespace is simply one more valid namespace). Zero-length namespace string is assumed equal to unspecified namespace. """ SERVICE_INSTANCE_ID = "service.instance.id" """ The string ID of the service instance. Note: MUST be unique for each instance of the same `service.namespace,service.name` pair (in other words `service.namespace,service.name,service.instance.id` triplet MUST be globally unique). The ID helps to distinguish instances of the same service that exist at the same time (e.g. instances of a horizontally scaled service). It is preferable for the ID to be persistent and stay the same for the lifetime of the service instance, however it is acceptable that the ID is ephemeral and changes during important lifetime events for the service (e.g. service restarts). If the service has no inherent unique ID that can be used as the value of this attribute it is recommended to generate a random Version 1 or Version 4 RFC 4122 UUID (services aiming for reproducible UUIDs may also use Version 5, see RFC 4122 for more recommendations). """ TELEMETRY_SDK_NAME = "telemetry.sdk.name" """ The name of the telemetry SDK as defined above. Note: The OpenTelemetry SDK MUST set the `telemetry.sdk.name` attribute to `opentelemetry`. If another SDK, like a fork or a vendor-provided implementation, is used, this SDK MUST set the `telemetry.sdk.name` attribute to the fully-qualified class or module name of this SDK's main entry point or another suitable identifier depending on the language. The identifier `opentelemetry` is reserved and MUST NOT be used in this case. All custom identifiers SHOULD be stable across different versions of an implementation. """ TELEMETRY_SDK_LANGUAGE = "telemetry.sdk.language" """ The language of the telemetry SDK. """ TELEMETRY_SDK_VERSION = "telemetry.sdk.version" """ The version string of the telemetry SDK. """ TELEMETRY_AUTO_VERSION = "telemetry.auto.version" """ The version string of the auto instrumentation agent, if used. """ WEBENGINE_NAME = "webengine.name" """ The name of the web engine. """ WEBENGINE_VERSION = "webengine.version" """ The version of the web engine. """ WEBENGINE_DESCRIPTION = "webengine.description" """ Additional description of the web engine (e.g. detailed version and edition information). """ OTEL_SCOPE_NAME = "otel.scope.name" """ The name of the instrumentation scope - (`InstrumentationScope.Name` in OTLP). """ OTEL_SCOPE_VERSION = "otel.scope.version" """ The version of the instrumentation scope - (`InstrumentationScope.Version` in OTLP). """ OTEL_LIBRARY_NAME = "otel.library.name" """ Deprecated, use the `otel.scope.name` attribute. """ OTEL_LIBRARY_VERSION = "otel.library.version" """ Deprecated, use the `otel.scope.version` attribute. """ # Manually defined deprecated attributes FAAS_ID = "faas.id" """ Deprecated, use the `cloud.resource.id` attribute. """ @deprecated( "Use :py:const:`opentelemetry.semconv._incubating.attributes.CloudProviderValues` instead. Deprecated since version 1.25.0.", ) class CloudProviderValues(Enum): ALIBABA_CLOUD = "alibaba_cloud" """Alibaba Cloud.""" AWS = "aws" """Amazon Web Services.""" AZURE = "azure" """Microsoft Azure.""" GCP = "gcp" """Google Cloud Platform.""" HEROKU = "heroku" """Heroku Platform as a Service.""" IBM_CLOUD = "ibm_cloud" """IBM Cloud.""" TENCENT_CLOUD = "tencent_cloud" """Tencent Cloud.""" @deprecated( "Use :py:const:`opentelemetry.semconv._incubating.attributes.CloudPlatformValues` instead. Deprecated since version 1.25.0.", ) class CloudPlatformValues(Enum): ALIBABA_CLOUD_ECS = "alibaba_cloud_ecs" """Alibaba Cloud Elastic Compute Service.""" ALIBABA_CLOUD_FC = "alibaba_cloud_fc" """Alibaba Cloud Function Compute.""" ALIBABA_CLOUD_OPENSHIFT = "alibaba_cloud_openshift" """Red Hat OpenShift on Alibaba Cloud.""" AWS_EC2 = "aws_ec2" """AWS Elastic Compute Cloud.""" AWS_ECS = "aws_ecs" """AWS Elastic Container Service.""" AWS_EKS = "aws_eks" """AWS Elastic Kubernetes Service.""" AWS_LAMBDA = "aws_lambda" """AWS Lambda.""" AWS_ELASTIC_BEANSTALK = "aws_elastic_beanstalk" """AWS Elastic Beanstalk.""" AWS_APP_RUNNER = "aws_app_runner" """AWS App Runner.""" AWS_OPENSHIFT = "aws_openshift" """Red Hat OpenShift on AWS (ROSA).""" AZURE_VM = "azure_vm" """Azure Virtual Machines.""" AZURE_CONTAINER_INSTANCES = "azure_container_instances" """Azure Container Instances.""" AZURE_AKS = "azure_aks" """Azure Kubernetes Service.""" AZURE_FUNCTIONS = "azure_functions" """Azure Functions.""" AZURE_APP_SERVICE = "azure_app_service" """Azure App Service.""" AZURE_OPENSHIFT = "azure_openshift" """Azure Red Hat OpenShift.""" GCP_BARE_METAL_SOLUTION = "gcp_bare_metal_solution" """Google Bare Metal Solution (BMS).""" GCP_COMPUTE_ENGINE = "gcp_compute_engine" """Google Cloud Compute Engine (GCE).""" GCP_CLOUD_RUN = "gcp_cloud_run" """Google Cloud Run.""" GCP_KUBERNETES_ENGINE = "gcp_kubernetes_engine" """Google Cloud Kubernetes Engine (GKE).""" GCP_CLOUD_FUNCTIONS = "gcp_cloud_functions" """Google Cloud Functions (GCF).""" GCP_APP_ENGINE = "gcp_app_engine" """Google Cloud App Engine (GAE).""" GCP_OPENSHIFT = "gcp_openshift" """Red Hat OpenShift on Google Cloud.""" IBM_CLOUD_OPENSHIFT = "ibm_cloud_openshift" """Red Hat OpenShift on IBM Cloud.""" TENCENT_CLOUD_CVM = "tencent_cloud_cvm" """Tencent Cloud Cloud Virtual Machine (CVM).""" TENCENT_CLOUD_EKS = "tencent_cloud_eks" """Tencent Cloud Elastic Kubernetes Service (EKS).""" TENCENT_CLOUD_SCF = "tencent_cloud_scf" """Tencent Cloud Serverless Cloud Function (SCF).""" @deprecated( "Use :py:const:`opentelemetry.semconv._incubating.attributes.AwsEcsLaunchtypeValues` instead. Deprecated since version 1.25.0.", ) class AwsEcsLaunchtypeValues(Enum): EC2 = "ec2" """ec2.""" FARGATE = "fargate" """fargate.""" @deprecated( "Use :py:const:`opentelemetry.semconv._incubating.attributes.HostArchValues` instead. Deprecated since version 1.25.0.", ) class HostArchValues(Enum): AMD64 = "amd64" """AMD64.""" ARM32 = "arm32" """ARM32.""" ARM64 = "arm64" """ARM64.""" IA64 = "ia64" """Itanium.""" PPC32 = "ppc32" """32-bit PowerPC.""" PPC64 = "ppc64" """64-bit PowerPC.""" S390X = "s390x" """IBM z/Architecture.""" X86 = "x86" """32-bit x86.""" @deprecated( "Use :py:const:`opentelemetry.semconv._incubating.attributes.OsTypeValues` instead. Deprecated since version 1.25.0.", ) class OsTypeValues(Enum): WINDOWS = "windows" """Microsoft Windows.""" LINUX = "linux" """Linux.""" DARWIN = "darwin" """Apple Darwin.""" FREEBSD = "freebsd" """FreeBSD.""" NETBSD = "netbsd" """NetBSD.""" OPENBSD = "openbsd" """OpenBSD.""" DRAGONFLYBSD = "dragonflybsd" """DragonFly BSD.""" HPUX = "hpux" """HP-UX (Hewlett Packard Unix).""" AIX = "aix" """AIX (Advanced Interactive eXecutive).""" SOLARIS = "solaris" """SunOS, Oracle Solaris.""" Z_OS = "z_os" """IBM z/OS.""" @deprecated( "Use :py:const:`opentelemetry.semconv.attributes.TelemetrySdkLanguageValues` instead. Deprecated since version 1.25.0.", ) class TelemetrySdkLanguageValues(Enum): CPP = "cpp" """cpp.""" DOTNET = "dotnet" """dotnet.""" ERLANG = "erlang" """erlang.""" GO = "go" """go.""" JAVA = "java" """java.""" NODEJS = "nodejs" """nodejs.""" PHP = "php" """php.""" PYTHON = "python" """python.""" RUBY = "ruby" """ruby.""" RUST = "rust" """rust.""" SWIFT = "swift" """swift.""" WEBJS = "webjs" """webjs.""" python-opentelemetry-1.39.1/opentelemetry-semantic-conventions/src/opentelemetry/semconv/schemas.py000066400000000000000000000051461511654350100342210ustar00rootroot00000000000000# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from enum import Enum class Schemas(Enum): V1_21_0 = "https://opentelemetry.io/schemas/1.21.0" """ The URL of the OpenTelemetry schema version 1.21.0. """ V1_23_1 = "https://opentelemetry.io/schemas/1.23.1" """ The URL of the OpenTelemetry schema version 1.23.1. """ V1_25_0 = "https://opentelemetry.io/schemas/1.25.0" """ The URL of the OpenTelemetry schema version 1.25.0. """ V1_26_0 = "https://opentelemetry.io/schemas/1.26.0" """ The URL of the OpenTelemetry schema version 1.26.0. """ V1_27_0 = "https://opentelemetry.io/schemas/1.27.0" """ The URL of the OpenTelemetry schema version 1.27.0. """ V1_28_0 = "https://opentelemetry.io/schemas/1.28.0" """ The URL of the OpenTelemetry schema version 1.28.0. """ V1_29_0 = "https://opentelemetry.io/schemas/1.29.0" """ The URL of the OpenTelemetry schema version 1.29.0. """ V1_30_0 = "https://opentelemetry.io/schemas/1.30.0" """ The URL of the OpenTelemetry schema version 1.30.0. """ V1_31_0 = "https://opentelemetry.io/schemas/1.31.0" """ The URL of the OpenTelemetry schema version 1.31.0. """ V1_32_0 = "https://opentelemetry.io/schemas/1.32.0" """ The URL of the OpenTelemetry schema version 1.32.0. """ V1_33_0 = "https://opentelemetry.io/schemas/1.33.0" """ The URL of the OpenTelemetry schema version 1.33.0. """ V1_34_0 = "https://opentelemetry.io/schemas/1.34.0" """ The URL of the OpenTelemetry schema version 1.34.0. """ V1_36_0 = "https://opentelemetry.io/schemas/1.36.0" """ The URL of the OpenTelemetry schema version 1.36.0. """ V1_37_0 = "https://opentelemetry.io/schemas/1.37.0" """ The URL of the OpenTelemetry schema version 1.37.0. """ V1_38_0 = "https://opentelemetry.io/schemas/1.38.0" """ The URL of the OpenTelemetry schema version 1.38.0. """ # when generating new semantic conventions, # make sure to add new versions version here. python-opentelemetry-1.39.1/opentelemetry-semantic-conventions/src/opentelemetry/semconv/trace/000077500000000000000000000000001511654350100333145ustar00rootroot00000000000000__init__.py000066400000000000000000002074161511654350100353600ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-semantic-conventions/src/opentelemetry/semconv/trace# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # pylint: disable=too-many-lines from enum import Enum from typing_extensions import deprecated @deprecated( "Use attributes defined in the :py:const:`opentelemetry.semconv.attributes` and :py:const:`opentelemetry.semconv._incubating.attributes` modules instead. Deprecated since version 1.25.0.", ) class SpanAttributes: SCHEMA_URL = "https://opentelemetry.io/schemas/1.21.0" """ The URL of the OpenTelemetry schema for these keys and values. """ CLIENT_ADDRESS = "client.address" """ Client address - unix domain socket name, IPv4 or IPv6 address. Note: When observed from the server side, and when communicating through an intermediary, `client.address` SHOULD represent client address behind any intermediaries (e.g. proxies) if it's available. """ CLIENT_PORT = "client.port" """ Client port number. Note: When observed from the server side, and when communicating through an intermediary, `client.port` SHOULD represent client port behind any intermediaries (e.g. proxies) if it's available. """ CLIENT_SOCKET_ADDRESS = "client.socket.address" """ Immediate client peer address - unix domain socket name, IPv4 or IPv6 address. """ CLIENT_SOCKET_PORT = "client.socket.port" """ Immediate client peer port number. """ HTTP_METHOD = "http.method" """ Deprecated, use `http.request.method` instead. """ HTTP_STATUS_CODE = "http.status_code" """ Deprecated, use `http.response.status_code` instead. """ HTTP_SCHEME = "http.scheme" """ Deprecated, use `url.scheme` instead. """ HTTP_URL = "http.url" """ Deprecated, use `url.full` instead. """ HTTP_TARGET = "http.target" """ Deprecated, use `url.path` and `url.query` instead. """ HTTP_REQUEST_CONTENT_LENGTH = "http.request_content_length" """ Deprecated, use `http.request.body.size` instead. """ HTTP_RESPONSE_CONTENT_LENGTH = "http.response_content_length" """ Deprecated, use `http.response.body.size` instead. """ NET_SOCK_PEER_NAME = "net.sock.peer.name" """ Deprecated, use `server.socket.domain` on client spans. """ NET_SOCK_PEER_ADDR = "net.sock.peer.addr" """ Deprecated, use `server.socket.address` on client spans and `client.socket.address` on server spans. """ NET_SOCK_PEER_PORT = "net.sock.peer.port" """ Deprecated, use `server.socket.port` on client spans and `client.socket.port` on server spans. """ NET_PEER_NAME = "net.peer.name" """ Deprecated, use `server.address` on client spans and `client.address` on server spans. """ NET_PEER_PORT = "net.peer.port" """ Deprecated, use `server.port` on client spans and `client.port` on server spans. """ NET_HOST_NAME = "net.host.name" """ Deprecated, use `server.address`. """ NET_HOST_PORT = "net.host.port" """ Deprecated, use `server.port`. """ NET_SOCK_HOST_ADDR = "net.sock.host.addr" """ Deprecated, use `server.socket.address`. """ NET_SOCK_HOST_PORT = "net.sock.host.port" """ Deprecated, use `server.socket.port`. """ NET_TRANSPORT = "net.transport" """ Deprecated, use `network.transport`. """ NET_PROTOCOL_NAME = "net.protocol.name" """ Deprecated, use `network.protocol.name`. """ NET_PROTOCOL_VERSION = "net.protocol.version" """ Deprecated, use `network.protocol.version`. """ NET_SOCK_FAMILY = "net.sock.family" """ Deprecated, use `network.transport` and `network.type`. """ DESTINATION_DOMAIN = "destination.domain" """ The domain name of the destination system. Note: This value may be a host name, a fully qualified domain name, or another host naming format. """ DESTINATION_ADDRESS = "destination.address" """ Peer address, for example IP address or UNIX socket name. """ DESTINATION_PORT = "destination.port" """ Peer port number. """ EXCEPTION_TYPE = "exception.type" """ The type of the exception (its fully-qualified class name, if applicable). The dynamic type of the exception should be preferred over the static type in languages that support it. """ EXCEPTION_MESSAGE = "exception.message" """ The exception message. """ EXCEPTION_STACKTRACE = "exception.stacktrace" """ A stacktrace as a string in the natural representation for the language runtime. The representation is to be determined and documented by each language SIG. """ HTTP_REQUEST_METHOD = "http.request.method" """ HTTP request method. Note: HTTP request method value SHOULD be "known" to the instrumentation. By default, this convention defines "known" methods as the ones listed in [RFC9110](https://www.rfc-editor.org/rfc/rfc9110.html#name-methods) and the PATCH method defined in [RFC5789](https://www.rfc-editor.org/rfc/rfc5789.html). If the HTTP request method is not known to instrumentation, it MUST set the `http.request.method` attribute to `_OTHER` and, except if reporting a metric, MUST set the exact method received in the request line as value of the `http.request.method_original` attribute. If the HTTP instrumentation could end up converting valid HTTP request methods to `_OTHER`, then it MUST provide a way to override the list of known HTTP methods. If this override is done via environment variable, then the environment variable MUST be named OTEL_INSTRUMENTATION_HTTP_KNOWN_METHODS and support a comma-separated list of case-sensitive known HTTP methods (this list MUST be a full override of the default known method, it is not a list of known methods in addition to the defaults). HTTP method names are case-sensitive and `http.request.method` attribute value MUST match a known HTTP method name exactly. Instrumentations for specific web frameworks that consider HTTP methods to be case insensitive, SHOULD populate a canonical equivalent. Tracing instrumentations that do so, MUST also set `http.request.method_original` to the original value. """ HTTP_RESPONSE_STATUS_CODE = "http.response.status_code" """ [HTTP response status code](https://tools.ietf.org/html/rfc7231#section-6). """ NETWORK_PROTOCOL_NAME = "network.protocol.name" """ [OSI Application Layer](https://osi-model.com/application-layer/) or non-OSI equivalent. The value SHOULD be normalized to lowercase. """ NETWORK_PROTOCOL_VERSION = "network.protocol.version" """ Version of the application layer protocol used. See note below. Note: `network.protocol.version` refers to the version of the protocol used and might be different from the protocol client's version. If the HTTP client used has a version of `0.27.2`, but sends HTTP version `1.1`, this attribute should be set to `1.1`. """ SERVER_ADDRESS = "server.address" """ Host identifier of the ["URI origin"](https://www.rfc-editor.org/rfc/rfc9110.html#name-uri-origin) HTTP request is sent to. Note: Determined by using the first of the following that applies - Host identifier of the [request target](https://www.rfc-editor.org/rfc/rfc9110.html#target.resource) if it's sent in absolute-form - Host identifier of the `Host` header SHOULD NOT be set if capturing it would require an extra DNS lookup. """ SERVER_PORT = "server.port" """ Port identifier of the ["URI origin"](https://www.rfc-editor.org/rfc/rfc9110.html#name-uri-origin) HTTP request is sent to. Note: When [request target](https://www.rfc-editor.org/rfc/rfc9110.html#target.resource) is absolute URI, `server.port` MUST match URI port identifier, otherwise it MUST match `Host` header port identifier. """ HTTP_ROUTE = "http.route" """ The matched route (path template in the format used by the respective server framework). See note below. Note: MUST NOT be populated when this is not supported by the HTTP server framework as the route attribute should have low-cardinality and the URI path can NOT substitute it. SHOULD include the [application root](/docs/http/http-spans.md#http-server-definitions) if there is one. """ URL_SCHEME = "url.scheme" """ The [URI scheme](https://www.rfc-editor.org/rfc/rfc3986#section-3.1) component identifying the used protocol. """ EVENT_NAME = "event.name" """ The name identifies the event. """ EVENT_DOMAIN = "event.domain" """ The domain identifies the business context for the events. Note: Events across different domains may have same `event.name`, yet be unrelated events. """ LOG_RECORD_UID = "log.record.uid" """ A unique identifier for the Log Record. Note: If an id is provided, other log records with the same id will be considered duplicates and can be removed safely. This means, that two distinguishable log records MUST have different values. The id MAY be an [Universally Unique Lexicographically Sortable Identifier (ULID)](https://github.com/ulid/spec), but other identifiers (e.g. UUID) may be used as needed. """ FEATURE_FLAG_KEY = "feature_flag.key" """ The unique identifier of the feature flag. """ FEATURE_FLAG_PROVIDER_NAME = "feature_flag.provider_name" """ The name of the service provider that performs the flag evaluation. """ FEATURE_FLAG_VARIANT = "feature_flag.variant" """ SHOULD be a semantic identifier for a value. If one is unavailable, a stringified version of the value can be used. Note: A semantic identifier, commonly referred to as a variant, provides a means for referring to a value without including the value itself. This can provide additional context for understanding the meaning behind a value. For example, the variant `red` maybe be used for the value `#c05543`. A stringified version of the value can be used in situations where a semantic identifier is unavailable. String representation of the value should be determined by the implementer. """ LOG_IOSTREAM = "log.iostream" """ The stream associated with the log. See below for a list of well-known values. """ LOG_FILE_NAME = "log.file.name" """ The basename of the file. """ LOG_FILE_PATH = "log.file.path" """ The full path to the file. """ LOG_FILE_NAME_RESOLVED = "log.file.name_resolved" """ The basename of the file, with symlinks resolved. """ LOG_FILE_PATH_RESOLVED = "log.file.path_resolved" """ The full path to the file, with symlinks resolved. """ SERVER_SOCKET_ADDRESS = "server.socket.address" """ Physical server IP address or Unix socket address. If set from the client, should simply use the socket's peer address, and not attempt to find any actual server IP (i.e., if set from client, this may represent some proxy server instead of the logical server). """ POOL = "pool" """ Name of the buffer pool. Note: Pool names are generally obtained via [BufferPoolMXBean#getName()](https://docs.oracle.com/en/java/javase/11/docs/api/java.management/java/lang/management/BufferPoolMXBean.html#getName()). """ TYPE = "type" """ The type of memory. """ SERVER_SOCKET_DOMAIN = "server.socket.domain" """ The domain name of an immediate peer. Note: Typically observed from the client side, and represents a proxy or other intermediary domain name. """ SERVER_SOCKET_PORT = "server.socket.port" """ Physical server port. """ SOURCE_DOMAIN = "source.domain" """ The domain name of the source system. Note: This value may be a host name, a fully qualified domain name, or another host naming format. """ SOURCE_ADDRESS = "source.address" """ Source address, for example IP address or Unix socket name. """ SOURCE_PORT = "source.port" """ Source port number. """ AWS_LAMBDA_INVOKED_ARN = "aws.lambda.invoked_arn" """ The full invoked ARN as provided on the `Context` passed to the function (`Lambda-Runtime-Invoked-Function-Arn` header on the `/runtime/invocation/next` applicable). Note: This may be different from `cloud.resource_id` if an alias is involved. """ CLOUDEVENTS_EVENT_ID = "cloudevents.event_id" """ The [event_id](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#id) uniquely identifies the event. """ CLOUDEVENTS_EVENT_SOURCE = "cloudevents.event_source" """ The [source](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#source-1) identifies the context in which an event happened. """ CLOUDEVENTS_EVENT_SPEC_VERSION = "cloudevents.event_spec_version" """ The [version of the CloudEvents specification](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#specversion) which the event uses. """ CLOUDEVENTS_EVENT_TYPE = "cloudevents.event_type" """ The [event_type](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#type) contains a value describing the type of event related to the originating occurrence. """ CLOUDEVENTS_EVENT_SUBJECT = "cloudevents.event_subject" """ The [subject](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#subject) of the event in the context of the event producer (identified by source). """ OPENTRACING_REF_TYPE = "opentracing.ref_type" """ Parent-child Reference type. Note: The causal relationship between a child Span and a parent Span. """ DB_SYSTEM = "db.system" """ An identifier for the database management system (DBMS) product being used. See below for a list of well-known identifiers. """ DB_CONNECTION_STRING = "db.connection_string" """ The connection string used to connect to the database. It is recommended to remove embedded credentials. """ DB_USER = "db.user" """ Username for accessing the database. """ DB_JDBC_DRIVER_CLASSNAME = "db.jdbc.driver_classname" """ The fully-qualified class name of the [Java Database Connectivity (JDBC)](https://docs.oracle.com/javase/8/docs/technotes/guides/jdbc/) driver used to connect. """ DB_NAME = "db.name" """ This attribute is used to report the name of the database being accessed. For commands that switch the database, this should be set to the target database (even if the command fails). Note: In some SQL databases, the database name to be used is called "schema name". In case there are multiple layers that could be considered for database name (e.g. Oracle instance name and schema name), the database name to be used is the more specific layer (e.g. Oracle schema name). """ DB_STATEMENT = "db.statement" """ The database statement being executed. """ DB_OPERATION = "db.operation" """ The name of the operation being executed, e.g. the [MongoDB command name](https://docs.mongodb.com/manual/reference/command/#database-operations) such as `findAndModify`, or the SQL keyword. Note: When setting this to an SQL keyword, it is not recommended to attempt any client-side parsing of `db.statement` just to get this property, but it should be set if the operation name is provided by the library being instrumented. If the SQL statement has an ambiguous operation, or performs more than one operation, this value may be omitted. """ NETWORK_TRANSPORT = "network.transport" """ [OSI Transport Layer](https://osi-model.com/transport-layer/) or [Inter-process Communication method](https://en.wikipedia.org/wiki/Inter-process_communication). The value SHOULD be normalized to lowercase. """ NETWORK_TYPE = "network.type" """ [OSI Network Layer](https://osi-model.com/network-layer/) or non-OSI equivalent. The value SHOULD be normalized to lowercase. """ DB_MSSQL_INSTANCE_NAME = "db.mssql.instance_name" """ The Microsoft SQL Server [instance name](https://docs.microsoft.com/en-us/sql/connect/jdbc/building-the-connection-url?view=sql-server-ver15) connecting to. This name is used to determine the port of a named instance. Note: If setting a `db.mssql.instance_name`, `server.port` is no longer required (but still recommended if non-standard). """ DB_CASSANDRA_PAGE_SIZE = "db.cassandra.page_size" """ The fetch size used for paging, i.e. how many rows will be returned at once. """ DB_CASSANDRA_CONSISTENCY_LEVEL = "db.cassandra.consistency_level" """ The consistency level of the query. Based on consistency values from [CQL](https://docs.datastax.com/en/cassandra-oss/3.0/cassandra/dml/dmlConfigConsistency.html). """ DB_CASSANDRA_TABLE = "db.cassandra.table" """ The name of the primary table that the operation is acting upon, including the keyspace name (if applicable). Note: This mirrors the db.sql.table attribute but references cassandra rather than sql. It is not recommended to attempt any client-side parsing of `db.statement` just to get this property, but it should be set if it is provided by the library being instrumented. If the operation is acting upon an anonymous table, or more than one table, this value MUST NOT be set. """ DB_CASSANDRA_IDEMPOTENCE = "db.cassandra.idempotence" """ Whether or not the query is idempotent. """ DB_CASSANDRA_SPECULATIVE_EXECUTION_COUNT = ( "db.cassandra.speculative_execution_count" ) """ The number of times a query was speculatively executed. Not set or `0` if the query was not executed speculatively. """ DB_CASSANDRA_COORDINATOR_ID = "db.cassandra.coordinator.id" """ The ID of the coordinating node for a query. """ DB_CASSANDRA_COORDINATOR_DC = "db.cassandra.coordinator.dc" """ The data center of the coordinating node for a query. """ DB_REDIS_DATABASE_INDEX = "db.redis.database_index" """ The index of the database being accessed as used in the [`SELECT` command](https://redis.io/commands/select), provided as an integer. To be used instead of the generic `db.name` attribute. """ DB_MONGODB_COLLECTION = "db.mongodb.collection" """ The collection being accessed within the database stated in `db.name`. """ URL_FULL = "url.full" """ Absolute URL describing a network resource according to [RFC3986](https://www.rfc-editor.org/rfc/rfc3986). Note: For network calls, URL usually has `scheme://host[:port][path][?query][#fragment]` format, where the fragment is not transmitted over HTTP, but if it is known, it should be included nevertheless. `url.full` MUST NOT contain credentials passed via URL in form of `https://username:password@www.example.com/`. In such case username and password should be redacted and attribute's value should be `https://REDACTED:REDACTED@www.example.com/`. `url.full` SHOULD capture the absolute URL when it is available (or can be reconstructed) and SHOULD NOT be validated or modified except for sanitizing purposes. """ DB_SQL_TABLE = "db.sql.table" """ The name of the primary table that the operation is acting upon, including the database name (if applicable). Note: It is not recommended to attempt any client-side parsing of `db.statement` just to get this property, but it should be set if it is provided by the library being instrumented. If the operation is acting upon an anonymous table, or more than one table, this value MUST NOT be set. """ DB_COSMOSDB_CLIENT_ID = "db.cosmosdb.client_id" """ Unique Cosmos client instance id. """ DB_COSMOSDB_OPERATION_TYPE = "db.cosmosdb.operation_type" """ CosmosDB Operation Type. """ USER_AGENT_ORIGINAL = "user_agent.original" """ Full user-agent string is generated by Cosmos DB SDK. Note: The user-agent value is generated by SDK which is a combination of
    `sdk_version` : Current version of SDK. e.g. 'cosmos-netstandard-sdk/3.23.0'
    `direct_pkg_version` : Direct package version used by Cosmos DB SDK. e.g. '3.23.1'
    `number_of_client_instances` : Number of cosmos client instances created by the application. e.g. '1'
    `type_of_machine_architecture` : Machine architecture. e.g. 'X64'
    `operating_system` : Operating System. e.g. 'Linux 5.4.0-1098-azure 104 18'
    `runtime_framework` : Runtime Framework. e.g. '.NET Core 3.1.32'
    `failover_information` : Generated key to determine if region failover enabled. Format Reg-{D (Disabled discovery)}-S(application region)|L(List of preferred regions)|N(None, user did not configure it). Default value is "NS". """ DB_COSMOSDB_CONNECTION_MODE = "db.cosmosdb.connection_mode" """ Cosmos client connection mode. """ DB_COSMOSDB_CONTAINER = "db.cosmosdb.container" """ Cosmos DB container name. """ DB_COSMOSDB_REQUEST_CONTENT_LENGTH = "db.cosmosdb.request_content_length" """ Request payload size in bytes. """ DB_COSMOSDB_STATUS_CODE = "db.cosmosdb.status_code" """ Cosmos DB status code. """ DB_COSMOSDB_SUB_STATUS_CODE = "db.cosmosdb.sub_status_code" """ Cosmos DB sub status code. """ DB_COSMOSDB_REQUEST_CHARGE = "db.cosmosdb.request_charge" """ RU consumed for that operation. """ OTEL_STATUS_CODE = "otel.status_code" """ Name of the code, either "OK" or "ERROR". MUST NOT be set if the status code is UNSET. """ OTEL_STATUS_DESCRIPTION = "otel.status_description" """ Description of the Status if it has a value, otherwise not set. """ FAAS_TRIGGER = "faas.trigger" """ Type of the trigger which caused this function invocation. Note: For the server/consumer span on the incoming side, `faas.trigger` MUST be set. Clients invoking FaaS instances usually cannot set `faas.trigger`, since they would typically need to look in the payload to determine the event type. If clients set it, it should be the same as the trigger that corresponding incoming would have (i.e., this has nothing to do with the underlying transport used to make the API call to invoke the lambda, which is often HTTP). """ FAAS_INVOCATION_ID = "faas.invocation_id" """ The invocation ID of the current function invocation. """ CLOUD_RESOURCE_ID = "cloud.resource_id" """ Cloud provider-specific native identifier of the monitored cloud resource (e.g. an [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) on AWS, a [fully qualified resource ID](https://learn.microsoft.com/en-us/rest/api/resources/resources/get-by-id) on Azure, a [full resource name](https://cloud.google.com/apis/design/resource_names#full_resource_name) on GCP). Note: On some cloud providers, it may not be possible to determine the full ID at startup, so it may be necessary to set `cloud.resource_id` as a span attribute instead. The exact value to use for `cloud.resource_id` depends on the cloud provider. The following well-known definitions MUST be used if you set this attribute and they apply: * **AWS Lambda:** The function [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html). Take care not to use the "invoked ARN" directly but replace any [alias suffix](https://docs.aws.amazon.com/lambda/latest/dg/configuration-aliases.html) with the resolved function version, as the same runtime instance may be invokable with multiple different aliases. * **GCP:** The [URI of the resource](https://cloud.google.com/iam/docs/full-resource-names) * **Azure:** The [Fully Qualified Resource ID](https://docs.microsoft.com/en-us/rest/api/resources/resources/get-by-id) of the invoked function, *not* the function app, having the form `/subscriptions//resourceGroups//providers/Microsoft.Web/sites//functions/`. This means that a span attribute MUST be used, as an Azure function app can host multiple functions that would usually share a TracerProvider. """ FAAS_DOCUMENT_COLLECTION = "faas.document.collection" """ The name of the source on which the triggering operation was performed. For example, in Cloud Storage or S3 corresponds to the bucket name, and in Cosmos DB to the database name. """ FAAS_DOCUMENT_OPERATION = "faas.document.operation" """ Describes the type of the operation that was performed on the data. """ FAAS_DOCUMENT_TIME = "faas.document.time" """ A string containing the time when the data was accessed in the [ISO 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). """ FAAS_DOCUMENT_NAME = "faas.document.name" """ The document name/table subjected to the operation. For example, in Cloud Storage or S3 is the name of the file, and in Cosmos DB the table name. """ URL_PATH = "url.path" """ The [URI path](https://www.rfc-editor.org/rfc/rfc3986#section-3.3) component. Note: When missing, the value is assumed to be `/`. """ URL_QUERY = "url.query" """ The [URI query](https://www.rfc-editor.org/rfc/rfc3986#section-3.4) component. Note: Sensitive content provided in query string SHOULD be scrubbed when instrumentations can identify it. """ MESSAGING_SYSTEM = "messaging.system" """ A string identifying the messaging system. """ MESSAGING_OPERATION = "messaging.operation" """ A string identifying the kind of messaging operation as defined in the [Operation names](#operation-names) section above. Note: If a custom value is used, it MUST be of low cardinality. """ MESSAGING_BATCH_MESSAGE_COUNT = "messaging.batch.message_count" """ The number of messages sent, received, or processed in the scope of the batching operation. Note: Instrumentations SHOULD NOT set `messaging.batch.message_count` on spans that operate with a single message. When a messaging client library supports both batch and single-message API for the same operation, instrumentations SHOULD use `messaging.batch.message_count` for batching APIs and SHOULD NOT use it for single-message APIs. """ MESSAGING_CLIENT_ID = "messaging.client_id" """ A unique identifier for the client that consumes or produces a message. """ MESSAGING_DESTINATION_NAME = "messaging.destination.name" """ The message destination name. Note: Destination name SHOULD uniquely identify a specific queue, topic or other entity within the broker. If the broker does not have such notion, the destination name SHOULD uniquely identify the broker. """ MESSAGING_DESTINATION_TEMPLATE = "messaging.destination.template" """ Low cardinality representation of the messaging destination name. Note: Destination names could be constructed from templates. An example would be a destination name involving a user name or product id. Although the destination name in this case is of high cardinality, the underlying template is of low cardinality and can be effectively used for grouping and aggregation. """ MESSAGING_DESTINATION_TEMPORARY = "messaging.destination.temporary" """ A boolean that is true if the message destination is temporary and might not exist anymore after messages are processed. """ MESSAGING_DESTINATION_ANONYMOUS = "messaging.destination.anonymous" """ A boolean that is true if the message destination is anonymous (could be unnamed or have auto-generated name). """ MESSAGING_MESSAGE_ID = "messaging.message.id" """ A value used by the messaging system as an identifier for the message, represented as a string. """ MESSAGING_MESSAGE_CONVERSATION_ID = "messaging.message.conversation_id" """ The [conversation ID](#conversations) identifying the conversation to which the message belongs, represented as a string. Sometimes called "Correlation ID". """ MESSAGING_MESSAGE_PAYLOAD_SIZE_BYTES = ( "messaging.message.payload_size_bytes" ) """ The (uncompressed) size of the message payload in bytes. Also use this attribute if it is unknown whether the compressed or uncompressed payload size is reported. """ MESSAGING_MESSAGE_PAYLOAD_COMPRESSED_SIZE_BYTES = ( "messaging.message.payload_compressed_size_bytes" ) """ The compressed size of the message payload in bytes. """ FAAS_TIME = "faas.time" """ A string containing the function invocation time in the [ISO 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). """ FAAS_CRON = "faas.cron" """ A string containing the schedule period as [Cron Expression](https://docs.oracle.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm). """ FAAS_COLDSTART = "faas.coldstart" """ A boolean that is true if the serverless function is executed for the first time (aka cold-start). """ FAAS_INVOKED_NAME = "faas.invoked_name" """ The name of the invoked function. Note: SHOULD be equal to the `faas.name` resource attribute of the invoked function. """ FAAS_INVOKED_PROVIDER = "faas.invoked_provider" """ The cloud provider of the invoked function. Note: SHOULD be equal to the `cloud.provider` resource attribute of the invoked function. """ FAAS_INVOKED_REGION = "faas.invoked_region" """ The cloud region of the invoked function. Note: SHOULD be equal to the `cloud.region` resource attribute of the invoked function. """ NETWORK_CONNECTION_TYPE = "network.connection.type" """ The internet connection type. """ NETWORK_CONNECTION_SUBTYPE = "network.connection.subtype" """ This describes more details regarding the connection.type. It may be the type of cell technology connection, but it could be used for describing details about a wifi connection. """ NETWORK_CARRIER_NAME = "network.carrier.name" """ The name of the mobile carrier. """ NETWORK_CARRIER_MCC = "network.carrier.mcc" """ The mobile carrier country code. """ NETWORK_CARRIER_MNC = "network.carrier.mnc" """ The mobile carrier network code. """ NETWORK_CARRIER_ICC = "network.carrier.icc" """ The ISO 3166-1 alpha-2 2-character country code associated with the mobile carrier network. """ PEER_SERVICE = "peer.service" """ The [`service.name`](/docs/resource/README.md#service) of the remote service. SHOULD be equal to the actual `service.name` resource attribute of the remote service if any. """ ENDUSER_ID = "enduser.id" """ Username or client_id extracted from the access token or [Authorization](https://tools.ietf.org/html/rfc7235#section-4.2) header in the inbound request from outside the system. """ ENDUSER_ROLE = "enduser.role" """ Actual/assumed role the client is making the request under extracted from token or application security context. """ ENDUSER_SCOPE = "enduser.scope" """ Scopes or granted authorities the client currently possesses extracted from token or application security context. The value would come from the scope associated with an [OAuth 2.0 Access Token](https://tools.ietf.org/html/rfc6749#section-3.3) or an attribute value in a [SAML 2.0 Assertion](http://docs.oasis-open.org/security/saml/Post2.0/sstc-saml-tech-overview-2.0.html). """ THREAD_ID = "thread.id" """ Current "managed" thread ID (as opposed to OS thread ID). """ THREAD_NAME = "thread.name" """ Current thread name. """ CODE_FUNCTION = "code.function" """ The method or function name, or equivalent (usually rightmost part of the code unit's name). """ CODE_NAMESPACE = "code.namespace" """ The "namespace" within which `code.function` is defined. Usually the qualified class or module name, such that `code.namespace` + some separator + `code.function` form a unique identifier for the code unit. """ CODE_FILEPATH = "code.filepath" """ The source code file name that identifies the code unit as uniquely as possible (preferably an absolute file path). """ CODE_LINENO = "code.lineno" """ The line number in `code.filepath` best representing the operation. It SHOULD point within the code unit named in `code.function`. """ CODE_COLUMN = "code.column" """ The column number in `code.filepath` best representing the operation. It SHOULD point within the code unit named in `code.function`. """ HTTP_REQUEST_METHOD_ORIGINAL = "http.request.method_original" """ Original HTTP method sent by the client in the request line. """ HTTP_REQUEST_BODY_SIZE = "http.request.body.size" """ The size of the request payload body in bytes. This is the number of bytes transferred excluding headers and is often, but not always, present as the [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) header. For requests using transport encoding, this should be the compressed size. """ HTTP_RESPONSE_BODY_SIZE = "http.response.body.size" """ The size of the response payload body in bytes. This is the number of bytes transferred excluding headers and is often, but not always, present as the [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) header. For requests using transport encoding, this should be the compressed size. """ HTTP_RESEND_COUNT = "http.resend_count" """ The ordinal number of request resending attempt (for any reason, including redirects). Note: The resend count SHOULD be updated each time an HTTP request gets resent by the client, regardless of what was the cause of the resending (e.g. redirection, authorization failure, 503 Server Unavailable, network issues, or any other). """ RPC_SYSTEM = "rpc.system" """ The value `aws-api`. """ RPC_SERVICE = "rpc.service" """ The name of the service to which a request is made, as returned by the AWS SDK. Note: This is the logical name of the service from the RPC interface perspective, which can be different from the name of any implementing class. The `code.namespace` attribute may be used to store the latter (despite the attribute name, it may include a class name; e.g., class with method actually executing the call on the server side, RPC client stub class on the client side). """ RPC_METHOD = "rpc.method" """ The name of the operation corresponding to the request, as returned by the AWS SDK. Note: This is the logical name of the method from the RPC interface perspective, which can be different from the name of any implementing method/function. The `code.function` attribute may be used to store the latter (e.g., method actually executing the call on the server side, RPC client stub method on the client side). """ AWS_REQUEST_ID = "aws.request_id" """ The AWS request ID as returned in the response headers `x-amz-request-id` or `x-amz-requestid`. """ AWS_DYNAMODB_TABLE_NAMES = "aws.dynamodb.table_names" """ The keys in the `RequestItems` object field. """ AWS_DYNAMODB_CONSUMED_CAPACITY = "aws.dynamodb.consumed_capacity" """ The JSON-serialized value of each item in the `ConsumedCapacity` response field. """ AWS_DYNAMODB_ITEM_COLLECTION_METRICS = ( "aws.dynamodb.item_collection_metrics" ) """ The JSON-serialized value of the `ItemCollectionMetrics` response field. """ AWS_DYNAMODB_PROVISIONED_READ_CAPACITY = ( "aws.dynamodb.provisioned_read_capacity" ) """ The value of the `ProvisionedThroughput.ReadCapacityUnits` request parameter. """ AWS_DYNAMODB_PROVISIONED_WRITE_CAPACITY = ( "aws.dynamodb.provisioned_write_capacity" ) """ The value of the `ProvisionedThroughput.WriteCapacityUnits` request parameter. """ AWS_DYNAMODB_CONSISTENT_READ = "aws.dynamodb.consistent_read" """ The value of the `ConsistentRead` request parameter. """ AWS_DYNAMODB_PROJECTION = "aws.dynamodb.projection" """ The value of the `ProjectionExpression` request parameter. """ AWS_DYNAMODB_LIMIT = "aws.dynamodb.limit" """ The value of the `Limit` request parameter. """ AWS_DYNAMODB_ATTRIBUTES_TO_GET = "aws.dynamodb.attributes_to_get" """ The value of the `AttributesToGet` request parameter. """ AWS_DYNAMODB_INDEX_NAME = "aws.dynamodb.index_name" """ The value of the `IndexName` request parameter. """ AWS_DYNAMODB_SELECT = "aws.dynamodb.select" """ The value of the `Select` request parameter. """ AWS_DYNAMODB_GLOBAL_SECONDARY_INDEXES = ( "aws.dynamodb.global_secondary_indexes" ) """ The JSON-serialized value of each item of the `GlobalSecondaryIndexes` request field. """ AWS_DYNAMODB_LOCAL_SECONDARY_INDEXES = ( "aws.dynamodb.local_secondary_indexes" ) """ The JSON-serialized value of each item of the `LocalSecondaryIndexes` request field. """ AWS_DYNAMODB_EXCLUSIVE_START_TABLE = "aws.dynamodb.exclusive_start_table" """ The value of the `ExclusiveStartTableName` request parameter. """ AWS_DYNAMODB_TABLE_COUNT = "aws.dynamodb.table_count" """ The the number of items in the `TableNames` response parameter. """ AWS_DYNAMODB_SCAN_FORWARD = "aws.dynamodb.scan_forward" """ The value of the `ScanIndexForward` request parameter. """ AWS_DYNAMODB_SEGMENT = "aws.dynamodb.segment" """ The value of the `Segment` request parameter. """ AWS_DYNAMODB_TOTAL_SEGMENTS = "aws.dynamodb.total_segments" """ The value of the `TotalSegments` request parameter. """ AWS_DYNAMODB_COUNT = "aws.dynamodb.count" """ The value of the `Count` response parameter. """ AWS_DYNAMODB_SCANNED_COUNT = "aws.dynamodb.scanned_count" """ The value of the `ScannedCount` response parameter. """ AWS_DYNAMODB_ATTRIBUTE_DEFINITIONS = "aws.dynamodb.attribute_definitions" """ The JSON-serialized value of each item in the `AttributeDefinitions` request field. """ AWS_DYNAMODB_GLOBAL_SECONDARY_INDEX_UPDATES = ( "aws.dynamodb.global_secondary_index_updates" ) """ The JSON-serialized value of each item in the the `GlobalSecondaryIndexUpdates` request field. """ AWS_S3_BUCKET = "aws.s3.bucket" """ The S3 bucket name the request refers to. Corresponds to the `--bucket` parameter of the [S3 API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) operations. Note: The `bucket` attribute is applicable to all S3 operations that reference a bucket, i.e. that require the bucket name as a mandatory parameter. This applies to almost all S3 operations except `list-buckets`. """ AWS_S3_KEY = "aws.s3.key" """ The S3 object key the request refers to. Corresponds to the `--key` parameter of the [S3 API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) operations. Note: The `key` attribute is applicable to all object-related S3 operations, i.e. that require the object key as a mandatory parameter. This applies in particular to the following operations: - [copy-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html) - [delete-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-object.html) - [get-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/get-object.html) - [head-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/head-object.html) - [put-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/put-object.html) - [restore-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/restore-object.html) - [select-object-content](https://docs.aws.amazon.com/cli/latest/reference/s3api/select-object-content.html) - [abort-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/abort-multipart-upload.html) - [complete-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/complete-multipart-upload.html) - [create-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/create-multipart-upload.html) - [list-parts](https://docs.aws.amazon.com/cli/latest/reference/s3api/list-parts.html) - [upload-part](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html) - [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html). """ AWS_S3_COPY_SOURCE = "aws.s3.copy_source" """ The source object (in the form `bucket`/`key`) for the copy operation. Note: The `copy_source` attribute applies to S3 copy operations and corresponds to the `--copy-source` parameter of the [copy-object operation within the S3 API](https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html). This applies in particular to the following operations: - [copy-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html) - [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html). """ AWS_S3_UPLOAD_ID = "aws.s3.upload_id" """ Upload ID that identifies the multipart upload. Note: The `upload_id` attribute applies to S3 multipart-upload operations and corresponds to the `--upload-id` parameter of the [S3 API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) multipart operations. This applies in particular to the following operations: - [abort-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/abort-multipart-upload.html) - [complete-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/complete-multipart-upload.html) - [list-parts](https://docs.aws.amazon.com/cli/latest/reference/s3api/list-parts.html) - [upload-part](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html) - [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html). """ AWS_S3_DELETE = "aws.s3.delete" """ The delete request container that specifies the objects to be deleted. Note: The `delete` attribute is only applicable to the [delete-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-object.html) operation. The `delete` attribute corresponds to the `--delete` parameter of the [delete-objects operation within the S3 API](https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-objects.html). """ AWS_S3_PART_NUMBER = "aws.s3.part_number" """ The part number of the part being uploaded in a multipart-upload operation. This is a positive integer between 1 and 10,000. Note: The `part_number` attribute is only applicable to the [upload-part](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html) and [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html) operations. The `part_number` attribute corresponds to the `--part-number` parameter of the [upload-part operation within the S3 API](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html). """ GRAPHQL_OPERATION_NAME = "graphql.operation.name" """ The name of the operation being executed. """ GRAPHQL_OPERATION_TYPE = "graphql.operation.type" """ The type of the operation being executed. """ GRAPHQL_DOCUMENT = "graphql.document" """ The GraphQL document being executed. Note: The value may be sanitized to exclude sensitive information. """ MESSAGING_RABBITMQ_DESTINATION_ROUTING_KEY = ( "messaging.rabbitmq.destination.routing_key" ) """ RabbitMQ message routing key. """ MESSAGING_KAFKA_MESSAGE_KEY = "messaging.kafka.message.key" """ Message keys in Kafka are used for grouping alike messages to ensure they're processed on the same partition. They differ from `messaging.message.id` in that they're not unique. If the key is `null`, the attribute MUST NOT be set. Note: If the key type is not string, it's string representation has to be supplied for the attribute. If the key has no unambiguous, canonical string form, don't include its value. """ MESSAGING_KAFKA_CONSUMER_GROUP = "messaging.kafka.consumer.group" """ Name of the Kafka Consumer Group that is handling the message. Only applies to consumers, not producers. """ MESSAGING_KAFKA_DESTINATION_PARTITION = ( "messaging.kafka.destination.partition" ) """ Partition the message is sent to. """ MESSAGING_KAFKA_MESSAGE_OFFSET = "messaging.kafka.message.offset" """ The offset of a record in the corresponding Kafka partition. """ MESSAGING_KAFKA_MESSAGE_TOMBSTONE = "messaging.kafka.message.tombstone" """ A boolean that is true if the message is a tombstone. """ MESSAGING_ROCKETMQ_NAMESPACE = "messaging.rocketmq.namespace" """ Namespace of RocketMQ resources, resources in different namespaces are individual. """ MESSAGING_ROCKETMQ_CLIENT_GROUP = "messaging.rocketmq.client_group" """ Name of the RocketMQ producer/consumer group that is handling the message. The client type is identified by the SpanKind. """ MESSAGING_ROCKETMQ_MESSAGE_DELIVERY_TIMESTAMP = ( "messaging.rocketmq.message.delivery_timestamp" ) """ The timestamp in milliseconds that the delay message is expected to be delivered to consumer. """ MESSAGING_ROCKETMQ_MESSAGE_DELAY_TIME_LEVEL = ( "messaging.rocketmq.message.delay_time_level" ) """ The delay time level for delay message, which determines the message delay time. """ MESSAGING_ROCKETMQ_MESSAGE_GROUP = "messaging.rocketmq.message.group" """ It is essential for FIFO message. Messages that belong to the same message group are always processed one by one within the same consumer group. """ MESSAGING_ROCKETMQ_MESSAGE_TYPE = "messaging.rocketmq.message.type" """ Type of message. """ MESSAGING_ROCKETMQ_MESSAGE_TAG = "messaging.rocketmq.message.tag" """ The secondary classifier of message besides topic. """ MESSAGING_ROCKETMQ_MESSAGE_KEYS = "messaging.rocketmq.message.keys" """ Key(s) of message, another way to mark message besides message id. """ MESSAGING_ROCKETMQ_CONSUMPTION_MODEL = ( "messaging.rocketmq.consumption_model" ) """ Model of message consumption. This only applies to consumer spans. """ RPC_GRPC_STATUS_CODE = "rpc.grpc.status_code" """ The [numeric status code](https://github.com/grpc/grpc/blob/v1.33.2/doc/statuscodes.md) of the gRPC request. """ RPC_JSONRPC_VERSION = "rpc.jsonrpc.version" """ Protocol version as in `jsonrpc` property of request/response. Since JSON-RPC 1.0 does not specify this, the value can be omitted. """ RPC_JSONRPC_REQUEST_ID = "rpc.jsonrpc.request_id" """ `id` property of request or response. Since protocol allows id to be int, string, `null` or missing (for notifications), value is expected to be cast to string for simplicity. Use empty string in case of `null` value. Omit entirely if this is a notification. """ RPC_JSONRPC_ERROR_CODE = "rpc.jsonrpc.error_code" """ `error.code` property of response if it is an error response. """ RPC_JSONRPC_ERROR_MESSAGE = "rpc.jsonrpc.error_message" """ `error.message` property of response if it is an error response. """ MESSAGE_TYPE = "message.type" """ Whether this is a received or sent message. """ MESSAGE_ID = "message.id" """ MUST be calculated as two different counters starting from `1` one for sent messages and one for received message. Note: This way we guarantee that the values will be consistent between different implementations. """ MESSAGE_COMPRESSED_SIZE = "message.compressed_size" """ Compressed size of the message in bytes. """ MESSAGE_UNCOMPRESSED_SIZE = "message.uncompressed_size" """ Uncompressed size of the message in bytes. """ RPC_CONNECT_RPC_ERROR_CODE = "rpc.connect_rpc.error_code" """ The [error codes](https://connect.build/docs/protocol/#error-codes) of the Connect request. Error codes are always string values. """ EXCEPTION_ESCAPED = "exception.escaped" """ SHOULD be set to true if the exception event is recorded at a point where it is known that the exception is escaping the scope of the span. Note: An exception is considered to have escaped (or left) the scope of a span, if that span is ended while the exception is still logically "in flight". This may be actually "in flight" in some languages (e.g. if the exception is passed to a Context manager's `__exit__` method in Python) but will usually be caught at the point of recording the exception in most languages. It is usually not possible to determine at the point where an exception is thrown whether it will escape the scope of a span. However, it is trivial to know that an exception will escape, if one checks for an active exception just before ending the span, as done in the [example above](#recording-an-exception). It follows that an exception may still escape the scope of the span even if the `exception.escaped` attribute was not set or set to false, since the event might have been recorded at a time where it was not clear whether the exception will escape. """ URL_FRAGMENT = "url.fragment" """ The [URI fragment](https://www.rfc-editor.org/rfc/rfc3986#section-3.5) component. """ # Manually defined deprecated attributes NET_PEER_IP = "net.peer.ip" """ Deprecated, use the `client.socket.address` attribute. """ NET_HOST_IP = "net.host.ip" """ Deprecated, use the `server.socket.address` attribute. """ HTTP_SERVER_NAME = "http.server_name" """ Deprecated, use the `server.address` attribute. """ HTTP_HOST = "http.host" """ Deprecated, use the `server.address` and `server.port` attributes. """ HTTP_RETRY_COUNT = "http.retry_count" """ Deprecated, use the `http.resend_count` attribute. """ HTTP_REQUEST_CONTENT_LENGTH_UNCOMPRESSED = ( "http.request_content_length_uncompressed" ) """ Deprecated, use the `http.request.body.size` attribute. """ HTTP_RESPONSE_CONTENT_LENGTH_UNCOMPRESSED = ( "http.response_content_length_uncompressed" ) """ Deprecated, use the `http.response.body.size` attribute. """ MESSAGING_DESTINATION = "messaging.destination" """ Deprecated, use the `messaging.destination.name` attribute. """ MESSAGING_DESTINATION_KIND = "messaging.destination_kind" """ Deprecated. """ MESSAGING_TEMP_DESTINATION = "messaging.temp_destination" """ Deprecated. Use `messaging.destination.temporary` attribute. """ MESSAGING_PROTOCOL = "messaging.protocol" """ Deprecated. Use `network.protocol.name` attribute. """ MESSAGING_PROTOCOL_VERSION = "messaging.protocol_version" """ Deprecated. Use `network.protocol.version` attribute. """ MESSAGING_URL = "messaging.url" """ Deprecated. Use `server.address` and `server.port` attributes. """ MESSAGING_CONVERSATION_ID = "messaging.conversation_id" """ Deprecated. Use `messaging.message.conversation.id` attribute. """ MESSAGING_KAFKA_PARTITION = "messaging.kafka.partition" """ Deprecated. Use `messaging.kafka.destination.partition` attribute. """ FAAS_EXECUTION = "faas.execution" """ Deprecated. Use `faas.invocation_id` attribute. """ HTTP_USER_AGENT = "http.user_agent" """ Deprecated. Use `user_agent.original` attribute. """ MESSAGING_RABBITMQ_ROUTING_KEY = "messaging.rabbitmq.routing_key" """ Deprecated. Use `messaging.rabbitmq.destination.routing_key` attribute. """ MESSAGING_KAFKA_TOMBSTONE = "messaging.kafka.tombstone" """ Deprecated. Use `messaging.kafka.destination.tombstone` attribute. """ NET_APP_PROTOCOL_NAME = "net.app.protocol.name" """ Deprecated. Use `network.protocol.name` attribute. """ NET_APP_PROTOCOL_VERSION = "net.app.protocol.version" """ Deprecated. Use `network.protocol.version` attribute. """ HTTP_CLIENT_IP = "http.client_ip" """ Deprecated. Use `client.address` attribute. """ HTTP_FLAVOR = "http.flavor" """ Deprecated. Use `network.protocol.name` and `network.protocol.version` attributes. """ NET_HOST_CONNECTION_TYPE = "net.host.connection.type" """ Deprecated. Use `network.connection.type` attribute. """ NET_HOST_CONNECTION_SUBTYPE = "net.host.connection.subtype" """ Deprecated. Use `network.connection.subtype` attribute. """ NET_HOST_CARRIER_NAME = "net.host.carrier.name" """ Deprecated. Use `network.carrier.name` attribute. """ NET_HOST_CARRIER_MCC = "net.host.carrier.mcc" """ Deprecated. Use `network.carrier.mcc` attribute. """ NET_HOST_CARRIER_MNC = "net.host.carrier.mnc" """ Deprecated. Use `network.carrier.mnc` attribute. """ MESSAGING_CONSUMER_ID = "messaging.consumer_id" """ Deprecated. Use `messaging.client_id` attribute. """ MESSAGING_KAFKA_CLIENT_ID = "messaging.kafka.client_id" """ Deprecated. Use `messaging.client_id` attribute. """ MESSAGING_ROCKETMQ_CLIENT_ID = "messaging.rocketmq.client_id" """ Deprecated. Use `messaging.client_id` attribute. """ @deprecated( "Removed from the specification in favor of `network.protocol.name` and `network.protocol.version` attributes. Deprecated since version 1.18.0.", ) class HttpFlavorValues(Enum): HTTP_1_0 = "1.0" HTTP_1_1 = "1.1" HTTP_2_0 = "2.0" HTTP_3_0 = "3.0" SPDY = "SPDY" QUIC = "QUIC" @deprecated( "Removed from the specification. Deprecated since version 1.18.0.", ) class MessagingDestinationKindValues(Enum): QUEUE = "queue" """A message sent to a queue.""" TOPIC = "topic" """A message sent to a topic.""" @deprecated( "Renamed to NetworkConnectionTypeValues. Deprecated since version 1.21.0.", ) class NetHostConnectionTypeValues(Enum): WIFI = "wifi" """wifi.""" WIRED = "wired" """wired.""" CELL = "cell" """cell.""" UNAVAILABLE = "unavailable" """unavailable.""" UNKNOWN = "unknown" """unknown.""" @deprecated( "Renamed to NetworkConnectionSubtypeValues. Deprecated since version 1.21.0.", ) class NetHostConnectionSubtypeValues(Enum): GPRS = "gprs" """GPRS.""" EDGE = "edge" """EDGE.""" UMTS = "umts" """UMTS.""" CDMA = "cdma" """CDMA.""" EVDO_0 = "evdo_0" """EVDO Rel. 0.""" EVDO_A = "evdo_a" """EVDO Rev. A.""" CDMA2000_1XRTT = "cdma2000_1xrtt" """CDMA2000 1XRTT.""" HSDPA = "hsdpa" """HSDPA.""" HSUPA = "hsupa" """HSUPA.""" HSPA = "hspa" """HSPA.""" IDEN = "iden" """IDEN.""" EVDO_B = "evdo_b" """EVDO Rev. B.""" LTE = "lte" """LTE.""" EHRPD = "ehrpd" """EHRPD.""" HSPAP = "hspap" """HSPAP.""" GSM = "gsm" """GSM.""" TD_SCDMA = "td_scdma" """TD-SCDMA.""" IWLAN = "iwlan" """IWLAN.""" NR = "nr" """5G NR (New Radio).""" NRNSA = "nrnsa" """5G NRNSA (New Radio Non-Standalone).""" LTE_CA = "lte_ca" """LTE CA.""" @deprecated( "Use :py:const:`opentelemetry.semconv.attributes.NetworkTransportValues` instead. Deprecated since version 1.25.0.", ) class NetTransportValues(Enum): IP_TCP = "ip_tcp" """ip_tcp.""" IP_UDP = "ip_udp" """ip_udp.""" PIPE = "pipe" """Named or anonymous pipe.""" INPROC = "inproc" """In-process communication.""" OTHER = "other" """Something else (non IP-based).""" @deprecated( "Use :py:const:`opentelemetry.semconv.attributes.NetworkType` instead. Deprecated since version 1.25.0.", ) class NetSockFamilyValues(Enum): INET = "inet" """IPv4 address.""" INET6 = "inet6" """IPv6 address.""" UNIX = "unix" """Unix domain socket path.""" @deprecated( "Use :py:const:`opentelemetry.semconv.attributes.HttpRequestMethodValues` instead. Deprecated since version 1.25.0.", ) class HttpRequestMethodValues(Enum): CONNECT = "CONNECT" """CONNECT method.""" DELETE = "DELETE" """DELETE method.""" GET = "GET" """GET method.""" HEAD = "HEAD" """HEAD method.""" OPTIONS = "OPTIONS" """OPTIONS method.""" PATCH = "PATCH" """PATCH method.""" POST = "POST" """POST method.""" PUT = "PUT" """PUT method.""" TRACE = "TRACE" """TRACE method.""" OTHER = "_OTHER" """Any HTTP method that the instrumentation has no prior knowledge of.""" @deprecated("Removed from the specification. Deprecated since version 1.25.0.") class EventDomainValues(Enum): BROWSER = "browser" """Events from browser apps.""" DEVICE = "device" """Events from mobile apps.""" K8S = "k8s" """Events from Kubernetes.""" @deprecated( "Use :py:const:`opentelemetry.semconv._incubating.attributes.LogIostreamValues` instead. Deprecated since version 1.25.0.", ) class LogIostreamValues(Enum): STDOUT = "stdout" """Logs from stdout stream.""" STDERR = "stderr" """Events from stderr stream.""" @deprecated("Removed from the specification. Deprecated since version 1.25.0.") class TypeValues(Enum): HEAP = "heap" """Heap memory.""" NON_HEAP = "non_heap" """Non-heap memory.""" @deprecated( "Use :py:const:`opentelemetry.semconv._incubating.attributes.OpentracingRefTypeValues` instead. Deprecated since version 1.25.0.", ) class OpentracingRefTypeValues(Enum): CHILD_OF = "child_of" """The parent Span depends on the child Span in some capacity.""" FOLLOWS_FROM = "follows_from" """The parent Span does not depend in any way on the result of the child Span.""" class DbSystemValues(Enum): OTHER_SQL = "other_sql" """Some other SQL database. Fallback only. See notes.""" MSSQL = "mssql" """Microsoft SQL Server.""" MSSQLCOMPACT = "mssqlcompact" """Microsoft SQL Server Compact.""" MYSQL = "mysql" """MySQL.""" ORACLE = "oracle" """Oracle Database.""" DB2 = "db2" """IBM Db2.""" POSTGRESQL = "postgresql" """PostgreSQL.""" REDSHIFT = "redshift" """Amazon Redshift.""" HIVE = "hive" """Apache Hive.""" CLOUDSCAPE = "cloudscape" """Cloudscape.""" HSQLDB = "hsqldb" """HyperSQL DataBase.""" PROGRESS = "progress" """Progress Database.""" MAXDB = "maxdb" """SAP MaxDB.""" HANADB = "hanadb" """SAP HANA.""" INGRES = "ingres" """Ingres.""" FIRSTSQL = "firstsql" """FirstSQL.""" EDB = "edb" """EnterpriseDB.""" CACHE = "cache" """InterSystems Caché.""" ADABAS = "adabas" """Adabas (Adaptable Database System).""" FIREBIRD = "firebird" """Firebird.""" DERBY = "derby" """Apache Derby.""" FILEMAKER = "filemaker" """FileMaker.""" INFORMIX = "informix" """Informix.""" INSTANTDB = "instantdb" """InstantDB.""" INTERBASE = "interbase" """InterBase.""" MARIADB = "mariadb" """MariaDB.""" NETEZZA = "netezza" """Netezza.""" PERVASIVE = "pervasive" """Pervasive PSQL.""" POINTBASE = "pointbase" """PointBase.""" SQLITE = "sqlite" """SQLite.""" SYBASE = "sybase" """Sybase.""" TERADATA = "teradata" """Teradata.""" VERTICA = "vertica" """Vertica.""" H2 = "h2" """H2.""" COLDFUSION = "coldfusion" """ColdFusion IMQ.""" CASSANDRA = "cassandra" """Apache Cassandra.""" HBASE = "hbase" """Apache HBase.""" MONGODB = "mongodb" """MongoDB.""" REDIS = "redis" """Redis.""" COUCHBASE = "couchbase" """Couchbase.""" COUCHDB = "couchdb" """CouchDB.""" COSMOSDB = "cosmosdb" """Microsoft Azure Cosmos DB.""" DYNAMODB = "dynamodb" """Amazon DynamoDB.""" NEO4J = "neo4j" """Neo4j.""" GEODE = "geode" """Apache Geode.""" ELASTICSEARCH = "elasticsearch" """Elasticsearch.""" MEMCACHED = "memcached" """Memcached.""" COCKROACHDB = "cockroachdb" """CockroachDB.""" OPENSEARCH = "opensearch" """OpenSearch.""" CLICKHOUSE = "clickhouse" """ClickHouse.""" SPANNER = "spanner" """Cloud Spanner.""" TRINO = "trino" """Trino.""" class NetworkTransportValues(Enum): TCP = "tcp" """TCP.""" UDP = "udp" """UDP.""" PIPE = "pipe" """Named or anonymous pipe. See note below.""" UNIX = "unix" """Unix domain socket.""" class NetworkTypeValues(Enum): IPV4 = "ipv4" """IPv4.""" IPV6 = "ipv6" """IPv6.""" class DbCassandraConsistencyLevelValues(Enum): ALL = "all" """all.""" EACH_QUORUM = "each_quorum" """each_quorum.""" QUORUM = "quorum" """quorum.""" LOCAL_QUORUM = "local_quorum" """local_quorum.""" ONE = "one" """one.""" TWO = "two" """two.""" THREE = "three" """three.""" LOCAL_ONE = "local_one" """local_one.""" ANY = "any" """any.""" SERIAL = "serial" """serial.""" LOCAL_SERIAL = "local_serial" """local_serial.""" class DbCosmosdbOperationTypeValues(Enum): INVALID = "Invalid" """invalid.""" CREATE = "Create" """create.""" PATCH = "Patch" """patch.""" READ = "Read" """read.""" READ_FEED = "ReadFeed" """read_feed.""" DELETE = "Delete" """delete.""" REPLACE = "Replace" """replace.""" EXECUTE = "Execute" """execute.""" QUERY = "Query" """query.""" HEAD = "Head" """head.""" HEAD_FEED = "HeadFeed" """head_feed.""" UPSERT = "Upsert" """upsert.""" BATCH = "Batch" """batch.""" QUERY_PLAN = "QueryPlan" """query_plan.""" EXECUTE_JAVASCRIPT = "ExecuteJavaScript" """execute_javascript.""" class DbCosmosdbConnectionModeValues(Enum): GATEWAY = "gateway" """Gateway (HTTP) connections mode.""" DIRECT = "direct" """Direct connection.""" class OtelStatusCodeValues(Enum): OK = "OK" """The operation has been validated by an Application developer or Operator to have completed successfully.""" ERROR = "ERROR" """The operation contains an error.""" class FaasTriggerValues(Enum): DATASOURCE = "datasource" """A response to some data source operation such as a database or filesystem read/write.""" HTTP = "http" """To provide an answer to an inbound HTTP request.""" PUBSUB = "pubsub" """A function is set to be executed when messages are sent to a messaging system.""" TIMER = "timer" """A function is scheduled to be executed regularly.""" OTHER = "other" """If none of the others apply.""" class FaasDocumentOperationValues(Enum): INSERT = "insert" """When a new object is created.""" EDIT = "edit" """When an object is modified.""" DELETE = "delete" """When an object is deleted.""" class MessagingOperationValues(Enum): PUBLISH = "publish" """publish.""" RECEIVE = "receive" """receive.""" PROCESS = "process" """process.""" class FaasInvokedProviderValues(Enum): ALIBABA_CLOUD = "alibaba_cloud" """Alibaba Cloud.""" AWS = "aws" """Amazon Web Services.""" AZURE = "azure" """Microsoft Azure.""" GCP = "gcp" """Google Cloud Platform.""" TENCENT_CLOUD = "tencent_cloud" """Tencent Cloud.""" class NetworkConnectionTypeValues(Enum): WIFI = "wifi" """wifi.""" WIRED = "wired" """wired.""" CELL = "cell" """cell.""" UNAVAILABLE = "unavailable" """unavailable.""" UNKNOWN = "unknown" """unknown.""" class NetworkConnectionSubtypeValues(Enum): GPRS = "gprs" """GPRS.""" EDGE = "edge" """EDGE.""" UMTS = "umts" """UMTS.""" CDMA = "cdma" """CDMA.""" EVDO_0 = "evdo_0" """EVDO Rel. 0.""" EVDO_A = "evdo_a" """EVDO Rev. A.""" CDMA2000_1XRTT = "cdma2000_1xrtt" """CDMA2000 1XRTT.""" HSDPA = "hsdpa" """HSDPA.""" HSUPA = "hsupa" """HSUPA.""" HSPA = "hspa" """HSPA.""" IDEN = "iden" """IDEN.""" EVDO_B = "evdo_b" """EVDO Rev. B.""" LTE = "lte" """LTE.""" EHRPD = "ehrpd" """EHRPD.""" HSPAP = "hspap" """HSPAP.""" GSM = "gsm" """GSM.""" TD_SCDMA = "td_scdma" """TD-SCDMA.""" IWLAN = "iwlan" """IWLAN.""" NR = "nr" """5G NR (New Radio).""" NRNSA = "nrnsa" """5G NRNSA (New Radio Non-Standalone).""" LTE_CA = "lte_ca" """LTE CA.""" class RpcSystemValues(Enum): GRPC = "grpc" """gRPC.""" JAVA_RMI = "java_rmi" """Java RMI.""" DOTNET_WCF = "dotnet_wcf" """.NET WCF.""" APACHE_DUBBO = "apache_dubbo" """Apache Dubbo.""" CONNECT_RPC = "connect_rpc" """Connect RPC.""" class GraphqlOperationTypeValues(Enum): QUERY = "query" """GraphQL query.""" MUTATION = "mutation" """GraphQL mutation.""" SUBSCRIPTION = "subscription" """GraphQL subscription.""" class MessagingRocketmqMessageTypeValues(Enum): NORMAL = "normal" """Normal message.""" FIFO = "fifo" """FIFO message.""" DELAY = "delay" """Delay message.""" TRANSACTION = "transaction" """Transaction message.""" class MessagingRocketmqConsumptionModelValues(Enum): CLUSTERING = "clustering" """Clustering consumption model.""" BROADCASTING = "broadcasting" """Broadcasting consumption model.""" class RpcGrpcStatusCodeValues(Enum): OK = 0 """OK.""" CANCELLED = 1 """CANCELLED.""" UNKNOWN = 2 """UNKNOWN.""" INVALID_ARGUMENT = 3 """INVALID_ARGUMENT.""" DEADLINE_EXCEEDED = 4 """DEADLINE_EXCEEDED.""" NOT_FOUND = 5 """NOT_FOUND.""" ALREADY_EXISTS = 6 """ALREADY_EXISTS.""" PERMISSION_DENIED = 7 """PERMISSION_DENIED.""" RESOURCE_EXHAUSTED = 8 """RESOURCE_EXHAUSTED.""" FAILED_PRECONDITION = 9 """FAILED_PRECONDITION.""" ABORTED = 10 """ABORTED.""" OUT_OF_RANGE = 11 """OUT_OF_RANGE.""" UNIMPLEMENTED = 12 """UNIMPLEMENTED.""" INTERNAL = 13 """INTERNAL.""" UNAVAILABLE = 14 """UNAVAILABLE.""" DATA_LOSS = 15 """DATA_LOSS.""" UNAUTHENTICATED = 16 """UNAUTHENTICATED.""" class MessageTypeValues(Enum): SENT = "SENT" """sent.""" RECEIVED = "RECEIVED" """received.""" class RpcConnectRpcErrorCodeValues(Enum): CANCELLED = "cancelled" """cancelled.""" UNKNOWN = "unknown" """unknown.""" INVALID_ARGUMENT = "invalid_argument" """invalid_argument.""" DEADLINE_EXCEEDED = "deadline_exceeded" """deadline_exceeded.""" NOT_FOUND = "not_found" """not_found.""" ALREADY_EXISTS = "already_exists" """already_exists.""" PERMISSION_DENIED = "permission_denied" """permission_denied.""" RESOURCE_EXHAUSTED = "resource_exhausted" """resource_exhausted.""" FAILED_PRECONDITION = "failed_precondition" """failed_precondition.""" ABORTED = "aborted" """aborted.""" OUT_OF_RANGE = "out_of_range" """out_of_range.""" UNIMPLEMENTED = "unimplemented" """unimplemented.""" INTERNAL = "internal" """internal.""" UNAVAILABLE = "unavailable" """unavailable.""" DATA_LOSS = "data_loss" """data_loss.""" UNAUTHENTICATED = "unauthenticated" """unauthenticated.""" python-opentelemetry-1.39.1/opentelemetry-semantic-conventions/src/opentelemetry/semconv/version/000077500000000000000000000000001511654350100337035ustar00rootroot00000000000000__init__.py000066400000000000000000000011401511654350100357310ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-semantic-conventions/src/opentelemetry/semconv/version# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. __version__ = "0.60b1" python-opentelemetry-1.39.1/opentelemetry-semantic-conventions/test-requirements.txt000066400000000000000000000003661511654350100313270ustar00rootroot00000000000000asgiref==3.7.2 importlib-metadata==6.11.0 iniconfig==2.0.0 packaging==24.0 pluggy==1.5.0 py-cpuinfo==9.0.0 pytest==7.4.4 tomli==2.0.1 typing_extensions==4.10.0 wrapt==1.16.0 zipp==3.19.2 -e opentelemetry-api -e opentelemetry-semantic-conventions python-opentelemetry-1.39.1/opentelemetry-semantic-conventions/tests/000077500000000000000000000000001511654350100262235ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-semantic-conventions/tests/__init__.py000066400000000000000000000000001511654350100303220ustar00rootroot00000000000000python-opentelemetry-1.39.1/opentelemetry-semantic-conventions/tests/test_semconv.py000066400000000000000000000015601511654350100313100ustar00rootroot00000000000000# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # type: ignore from importlib.util import find_spec from unittest import TestCase class TestSemanticConventions(TestCase): def test_semantic_conventions(self): if find_spec("opentelemetry.semconv") is None: self.fail("opentelemetry-semantic-conventions not installed") python-opentelemetry-1.39.1/propagator/000077500000000000000000000000001511654350100201775ustar00rootroot00000000000000python-opentelemetry-1.39.1/propagator/opentelemetry-propagator-b3/000077500000000000000000000000001511654350100255515ustar00rootroot00000000000000python-opentelemetry-1.39.1/propagator/opentelemetry-propagator-b3/LICENSE000066400000000000000000000261351511654350100265650ustar00rootroot00000000000000 Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. Copyright [yyyy] [name of copyright owner] Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. python-opentelemetry-1.39.1/propagator/opentelemetry-propagator-b3/README.rst000066400000000000000000000007271511654350100272460ustar00rootroot00000000000000OpenTelemetry B3 Propagator =========================== |pypi| .. |pypi| image:: https://badge.fury.io/py/opentelemetry-propagator-b3.svg :target: https://pypi.org/project/opentelemetry-propagator-b3/ This library provides a propagator for the B3 format Installation ------------ :: pip install opentelemetry-propagator-b3 References ---------- * `OpenTelemetry `_ * `B3 format `_ python-opentelemetry-1.39.1/propagator/opentelemetry-propagator-b3/benchmark-requirements.txt000066400000000000000000000000301511654350100327560ustar00rootroot00000000000000pytest-benchmark==4.0.0 python-opentelemetry-1.39.1/propagator/opentelemetry-propagator-b3/benchmarks/000077500000000000000000000000001511654350100276665ustar00rootroot00000000000000python-opentelemetry-1.39.1/propagator/opentelemetry-propagator-b3/benchmarks/trace/000077500000000000000000000000001511654350100307645ustar00rootroot00000000000000python-opentelemetry-1.39.1/propagator/opentelemetry-propagator-b3/benchmarks/trace/propagation/000077500000000000000000000000001511654350100333075ustar00rootroot00000000000000test_benchmark_b3_format.py000066400000000000000000000026371511654350100405370ustar00rootroot00000000000000python-opentelemetry-1.39.1/propagator/opentelemetry-propagator-b3/benchmarks/trace/propagation# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import opentelemetry.propagators.b3 as b3_format from opentelemetry.sdk.trace import TracerProvider FORMAT = b3_format.B3Format() def test_extract_single_header(benchmark): benchmark( FORMAT.extract, { FORMAT.SINGLE_HEADER_KEY: "bdb5b63237ed38aea578af665aa5aa60-c32d953d73ad2251-1" }, ) def test_inject_empty_context(benchmark): tracer = TracerProvider().get_tracer("sdk_tracer_provider") with tracer.start_as_current_span("Root Span"): with tracer.start_as_current_span("Child Span"): benchmark( FORMAT.inject, { FORMAT.TRACE_ID_KEY: "bdb5b63237ed38aea578af665aa5aa60", FORMAT.SPAN_ID_KEY: "00000000000000000c32d953d73ad225", FORMAT.SAMPLED_KEY: "1", }, ) python-opentelemetry-1.39.1/propagator/opentelemetry-propagator-b3/py.typed000066400000000000000000000000001511654350100272360ustar00rootroot00000000000000python-opentelemetry-1.39.1/propagator/opentelemetry-propagator-b3/pyproject.toml000066400000000000000000000027111511654350100304660ustar00rootroot00000000000000[build-system] requires = ["hatchling"] build-backend = "hatchling.build" [project] name = "opentelemetry-propagator-b3" dynamic = ["version"] description = "OpenTelemetry B3 Propagator" readme = "README.rst" license = "Apache-2.0" requires-python = ">=3.9" authors = [ { name = "OpenTelemetry Authors", email = "cncf-opentelemetry-contributors@lists.cncf.io" }, ] classifiers = [ "Development Status :: 5 - Production/Stable", "Framework :: OpenTelemetry", "Intended Audience :: Developers", "Programming Language :: Python", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.9", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", "Programming Language :: Python :: 3.12", "Programming Language :: Python :: 3.13", "Typing :: Typed", ] dependencies = [ "typing-extensions >= 4.5.0", "opentelemetry-api ~= 1.3", ] [project.entry-points.opentelemetry_propagator] b3 = "opentelemetry.propagators.b3:B3SingleFormat" b3multi = "opentelemetry.propagators.b3:B3MultiFormat" [project.urls] Homepage = "https://github.com/open-telemetry/opentelemetry-python/tree/main/propagator/opentelemetry-propagator-b3" Repository = "https://github.com/open-telemetry/opentelemetry-python" [tool.hatch.version] path = "src/opentelemetry/propagators/b3/version/__init__.py" [tool.hatch.build.targets.sdist] include = [ "/src", "/tests", ] [tool.hatch.build.targets.wheel] packages = ["src/opentelemetry"] python-opentelemetry-1.39.1/propagator/opentelemetry-propagator-b3/src/000077500000000000000000000000001511654350100263405ustar00rootroot00000000000000python-opentelemetry-1.39.1/propagator/opentelemetry-propagator-b3/src/opentelemetry/000077500000000000000000000000001511654350100312345ustar00rootroot00000000000000python-opentelemetry-1.39.1/propagator/opentelemetry-propagator-b3/src/opentelemetry/propagators/000077500000000000000000000000001511654350100335755ustar00rootroot00000000000000python-opentelemetry-1.39.1/propagator/opentelemetry-propagator-b3/src/opentelemetry/propagators/b3/000077500000000000000000000000001511654350100341015ustar00rootroot00000000000000__init__.py000066400000000000000000000150641511654350100361410ustar00rootroot00000000000000python-opentelemetry-1.39.1/propagator/opentelemetry-propagator-b3/src/opentelemetry/propagators/b3# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import typing from re import compile as re_compile from typing_extensions import deprecated from opentelemetry import trace from opentelemetry.context import Context from opentelemetry.propagators.textmap import ( CarrierT, Getter, Setter, TextMapPropagator, default_getter, default_setter, ) from opentelemetry.trace import format_span_id, format_trace_id class B3MultiFormat(TextMapPropagator): """Propagator for the B3 HTTP multi-header format. See: https://github.com/openzipkin/b3-propagation https://github.com/openzipkin/b3-propagation#multiple-headers """ SINGLE_HEADER_KEY = "b3" TRACE_ID_KEY = "x-b3-traceid" SPAN_ID_KEY = "x-b3-spanid" SAMPLED_KEY = "x-b3-sampled" FLAGS_KEY = "x-b3-flags" _SAMPLE_PROPAGATE_VALUES = {"1", "True", "true", "d"} _trace_id_regex = re_compile(r"[\da-fA-F]{16}|[\da-fA-F]{32}") _span_id_regex = re_compile(r"[\da-fA-F]{16}") def extract( self, carrier: CarrierT, context: typing.Optional[Context] = None, getter: Getter = default_getter, ) -> Context: if context is None: context = Context() trace_id = trace.INVALID_TRACE_ID span_id = trace.INVALID_SPAN_ID sampled = "0" flags = None single_header = _extract_first_element( getter.get(carrier, self.SINGLE_HEADER_KEY) ) if single_header: # The b3 spec calls for the sampling state to be # "deferred", which is unspecified. This concept does not # translate to SpanContext, so we set it as recorded. sampled = "1" fields = single_header.split("-", 4) if len(fields) == 1: sampled = fields[0] elif len(fields) == 2: trace_id, span_id = fields elif len(fields) == 3: trace_id, span_id, sampled = fields elif len(fields) == 4: trace_id, span_id, sampled, _ = fields else: trace_id = ( _extract_first_element(getter.get(carrier, self.TRACE_ID_KEY)) or trace_id ) span_id = ( _extract_first_element(getter.get(carrier, self.SPAN_ID_KEY)) or span_id ) sampled = ( _extract_first_element(getter.get(carrier, self.SAMPLED_KEY)) or sampled ) flags = ( _extract_first_element(getter.get(carrier, self.FLAGS_KEY)) or flags ) if ( trace_id == trace.INVALID_TRACE_ID or span_id == trace.INVALID_SPAN_ID or self._trace_id_regex.fullmatch(trace_id) is None or self._span_id_regex.fullmatch(span_id) is None ): return context trace_id = int(trace_id, 16) span_id = int(span_id, 16) options = 0 # The b3 spec provides no defined behavior for both sample and # flag values set. Since the setting of at least one implies # the desire for some form of sampling, propagate if either # header is set to allow. if sampled in self._SAMPLE_PROPAGATE_VALUES or flags == "1": options |= trace.TraceFlags.SAMPLED return trace.set_span_in_context( trace.NonRecordingSpan( trace.SpanContext( # trace an span ids are encoded in hex, so must be converted trace_id=trace_id, span_id=span_id, is_remote=True, trace_flags=trace.TraceFlags(options), trace_state=trace.TraceState(), ) ), context, ) def inject( self, carrier: CarrierT, context: typing.Optional[Context] = None, setter: Setter = default_setter, ) -> None: span = trace.get_current_span(context=context) span_context = span.get_span_context() if span_context == trace.INVALID_SPAN_CONTEXT: return sampled = (trace.TraceFlags.SAMPLED & span_context.trace_flags) != 0 setter.set( carrier, self.TRACE_ID_KEY, format_trace_id(span_context.trace_id), ) setter.set( carrier, self.SPAN_ID_KEY, format_span_id(span_context.span_id) ) setter.set(carrier, self.SAMPLED_KEY, "1" if sampled else "0") @property def fields(self) -> typing.Set[str]: return { self.TRACE_ID_KEY, self.SPAN_ID_KEY, self.SAMPLED_KEY, } class B3SingleFormat(B3MultiFormat): """Propagator for the B3 HTTP single-header format. See: https://github.com/openzipkin/b3-propagation https://github.com/openzipkin/b3-propagation#single-header """ def inject( self, carrier: CarrierT, context: typing.Optional[Context] = None, setter: Setter = default_setter, ) -> None: span = trace.get_current_span(context=context) span_context = span.get_span_context() if span_context == trace.INVALID_SPAN_CONTEXT: return sampled = (trace.TraceFlags.SAMPLED & span_context.trace_flags) != 0 fields = [ format_trace_id(span_context.trace_id), format_span_id(span_context.span_id), "1" if sampled else "0", ] setter.set(carrier, self.SINGLE_HEADER_KEY, "-".join(fields)) @property def fields(self) -> typing.Set[str]: return {self.SINGLE_HEADER_KEY} class B3Format(B3MultiFormat): @deprecated( "B3Format is deprecated in favor of B3MultiFormat. Deprecated since version 1.2.0.", ) def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) def _extract_first_element( items: typing.Iterable[CarrierT], ) -> typing.Optional[CarrierT]: if items is None: return None return next(iter(items), None) py.typed000066400000000000000000000000001511654350100355070ustar00rootroot00000000000000python-opentelemetry-1.39.1/propagator/opentelemetry-propagator-b3/src/opentelemetry/propagators/b3version/000077500000000000000000000000001511654350100355075ustar00rootroot00000000000000python-opentelemetry-1.39.1/propagator/opentelemetry-propagator-b3/src/opentelemetry/propagators/b3__init__.py000066400000000000000000000011401511654350100376140ustar00rootroot00000000000000python-opentelemetry-1.39.1/propagator/opentelemetry-propagator-b3/src/opentelemetry/propagators/b3/version# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. __version__ = "1.39.1" python-opentelemetry-1.39.1/propagator/opentelemetry-propagator-b3/test-requirements.txt000066400000000000000000000004651511654350100320170ustar00rootroot00000000000000asgiref==3.7.2 importlib-metadata==6.11.0 iniconfig==2.0.0 packaging==24.0 pluggy==1.5.0 py-cpuinfo==9.0.0 pytest==7.4.4 tomli==2.0.1 typing_extensions==4.10.0 wrapt==1.16.0 zipp==3.19.2 -e opentelemetry-api -e opentelemetry-sdk -e opentelemetry-semantic-conventions -e propagator/opentelemetry-propagator-b3 python-opentelemetry-1.39.1/propagator/opentelemetry-propagator-b3/tests/000077500000000000000000000000001511654350100267135ustar00rootroot00000000000000python-opentelemetry-1.39.1/propagator/opentelemetry-propagator-b3/tests/__init__.py000066400000000000000000000011101511654350100310150ustar00rootroot00000000000000# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. python-opentelemetry-1.39.1/propagator/opentelemetry-propagator-b3/tests/test_b3_format.py000066400000000000000000000403601511654350100322030ustar00rootroot00000000000000# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from abc import abstractmethod from unittest.mock import Mock import opentelemetry.trace as trace_api from opentelemetry.context import Context, get_current from opentelemetry.propagators.b3 import ( # pylint: disable=no-name-in-module,import-error B3MultiFormat, B3SingleFormat, ) from opentelemetry.propagators.textmap import DefaultGetter from opentelemetry.sdk import trace from opentelemetry.sdk.trace import id_generator from opentelemetry.trace.propagation import _SPAN_KEY def get_child_parent_new_carrier(old_carrier, propagator): ctx = propagator.extract(old_carrier) parent_span_context = trace_api.get_current_span(ctx).get_span_context() parent = trace._Span("parent", parent_span_context) child = trace._Span( "child", trace_api.SpanContext( parent_span_context.trace_id, id_generator.RandomIdGenerator().generate_span_id(), is_remote=False, trace_flags=parent_span_context.trace_flags, trace_state=parent_span_context.trace_state, ), parent=parent.get_span_context(), ) new_carrier = {} ctx = trace_api.set_span_in_context(child) propagator.inject(new_carrier, context=ctx) return child, parent, new_carrier class AbstractB3FormatTestCase: # pylint: disable=too-many-public-methods,no-member,invalid-name @classmethod def setUpClass(cls): generator = id_generator.RandomIdGenerator() cls.serialized_trace_id = trace_api.format_trace_id( generator.generate_trace_id() ) cls.serialized_span_id = trace_api.format_span_id( generator.generate_span_id() ) def setUp(self) -> None: tracer_provider = trace.TracerProvider() patcher = unittest.mock.patch.object( trace_api, "get_tracer_provider", return_value=tracer_provider ) patcher.start() self.addCleanup(patcher.stop) @classmethod def get_child_parent_new_carrier(cls, old_carrier): return get_child_parent_new_carrier(old_carrier, cls.get_propagator()) @classmethod @abstractmethod def get_propagator(cls): pass @classmethod @abstractmethod def get_trace_id(cls, carrier): pass def assertSampled(self, carrier): pass def assertNotSampled(self, carrier): pass def test_extract_multi_header(self): """Test the extraction of B3 headers.""" propagator = self.get_propagator() context = { propagator.TRACE_ID_KEY: self.serialized_trace_id, propagator.SPAN_ID_KEY: self.serialized_span_id, propagator.SAMPLED_KEY: "1", } child, parent, _ = self.get_child_parent_new_carrier(context) self.assertEqual( context[propagator.TRACE_ID_KEY], trace_api.format_trace_id(child.context.trace_id), ) self.assertEqual( context[propagator.SPAN_ID_KEY], trace_api.format_span_id(child.parent.span_id), ) self.assertTrue(parent.context.is_remote) self.assertTrue(parent.context.trace_flags.sampled) def test_extract_single_header(self): """Test the extraction from a single b3 header.""" propagator = self.get_propagator() child, parent, _ = self.get_child_parent_new_carrier( { propagator.SINGLE_HEADER_KEY: f"{self.serialized_trace_id}-{self.serialized_span_id}" } ) self.assertEqual( self.serialized_trace_id, trace_api.format_trace_id(child.context.trace_id), ) self.assertEqual( self.serialized_span_id, trace_api.format_span_id(child.parent.span_id), ) self.assertTrue(parent.context.is_remote) self.assertTrue(parent.context.trace_flags.sampled) child, parent, _ = self.get_child_parent_new_carrier( { propagator.SINGLE_HEADER_KEY: f"{self.serialized_trace_id}-{self.serialized_span_id}-1" } ) self.assertEqual( self.serialized_trace_id, trace_api.format_trace_id(child.context.trace_id), ) self.assertEqual( self.serialized_span_id, trace_api.format_span_id(child.parent.span_id), ) self.assertTrue(parent.context.is_remote) self.assertTrue(parent.context.trace_flags.sampled) def test_extract_header_precedence(self): """A single b3 header should take precedence over multiple headers. """ propagator = self.get_propagator() single_header_trace_id = self.serialized_trace_id[:-3] + "123" _, _, new_carrier = self.get_child_parent_new_carrier( { propagator.SINGLE_HEADER_KEY: f"{single_header_trace_id}-{self.serialized_span_id}", propagator.TRACE_ID_KEY: self.serialized_trace_id, propagator.SPAN_ID_KEY: self.serialized_span_id, propagator.SAMPLED_KEY: "1", } ) self.assertEqual( self.get_trace_id(new_carrier), single_header_trace_id ) def test_enabled_sampling(self): """Test b3 sample key variants that turn on sampling.""" propagator = self.get_propagator() for variant in ["1", "True", "true", "d"]: _, _, new_carrier = self.get_child_parent_new_carrier( { propagator.TRACE_ID_KEY: self.serialized_trace_id, propagator.SPAN_ID_KEY: self.serialized_span_id, propagator.SAMPLED_KEY: variant, } ) self.assertSampled(new_carrier) def test_disabled_sampling(self): """Test b3 sample key variants that turn off sampling.""" propagator = self.get_propagator() for variant in ["0", "False", "false", None]: _, _, new_carrier = self.get_child_parent_new_carrier( { propagator.TRACE_ID_KEY: self.serialized_trace_id, propagator.SPAN_ID_KEY: self.serialized_span_id, propagator.SAMPLED_KEY: variant, } ) self.assertNotSampled(new_carrier) def test_flags(self): """x-b3-flags set to "1" should result in propagation.""" propagator = self.get_propagator() _, _, new_carrier = self.get_child_parent_new_carrier( { propagator.TRACE_ID_KEY: self.serialized_trace_id, propagator.SPAN_ID_KEY: self.serialized_span_id, propagator.FLAGS_KEY: "1", } ) self.assertSampled(new_carrier) def test_flags_and_sampling(self): """Propagate if b3 flags and sampling are set.""" propagator = self.get_propagator() _, _, new_carrier = self.get_child_parent_new_carrier( { propagator.TRACE_ID_KEY: self.serialized_trace_id, propagator.SPAN_ID_KEY: self.serialized_span_id, propagator.FLAGS_KEY: "1", } ) self.assertSampled(new_carrier) def test_derived_ctx_is_returned_for_success(self): """Ensure returned context is derived from the given context.""" old_ctx = Context({"k1": "v1"}) propagator = self.get_propagator() new_ctx = propagator.extract( { propagator.TRACE_ID_KEY: self.serialized_trace_id, propagator.SPAN_ID_KEY: self.serialized_span_id, propagator.FLAGS_KEY: "1", }, old_ctx, ) self.assertIn(_SPAN_KEY, new_ctx) for key, value in old_ctx.items(): # pylint:disable=no-member self.assertIn(key, new_ctx) # pylint:disable=unsubscriptable-object self.assertEqual(new_ctx[key], value) def test_derived_ctx_is_returned_for_failure(self): """Ensure returned context is derived from the given context.""" old_ctx = Context({"k2": "v2"}) new_ctx = self.get_propagator().extract({}, old_ctx) self.assertNotIn(_SPAN_KEY, new_ctx) for key, value in old_ctx.items(): # pylint:disable=no-member self.assertIn(key, new_ctx) # pylint:disable=unsubscriptable-object self.assertEqual(new_ctx[key], value) def test_64bit_trace_id(self): """64 bit trace ids should be padded to 128 bit trace ids.""" propagator = self.get_propagator() trace_id_64_bit = self.serialized_trace_id[:16] _, _, new_carrier = self.get_child_parent_new_carrier( { propagator.TRACE_ID_KEY: trace_id_64_bit, propagator.SPAN_ID_KEY: self.serialized_span_id, propagator.FLAGS_KEY: "1", }, ) self.assertEqual( self.get_trace_id(new_carrier), "0" * 16 + trace_id_64_bit ) def test_extract_invalid_single_header_to_explicit_ctx(self): """Given unparsable header, do not modify context""" old_ctx = Context({"k1": "v1"}) propagator = self.get_propagator() carrier = {propagator.SINGLE_HEADER_KEY: "0-1-2-3-4-5-6-7"} new_ctx = propagator.extract(carrier, old_ctx) self.assertDictEqual(new_ctx, old_ctx) def test_extract_invalid_single_header_to_implicit_ctx(self): propagator = self.get_propagator() carrier = {propagator.SINGLE_HEADER_KEY: "0-1-2-3-4-5-6-7"} new_ctx = propagator.extract(carrier) self.assertDictEqual(Context(), new_ctx) def test_extract_missing_trace_id_to_explicit_ctx(self): """Given no trace ID, do not modify context""" old_ctx = Context({"k1": "v1"}) propagator = self.get_propagator() carrier = { propagator.SPAN_ID_KEY: self.serialized_span_id, propagator.FLAGS_KEY: "1", } new_ctx = propagator.extract(carrier, old_ctx) self.assertDictEqual(new_ctx, old_ctx) def test_extract_missing_trace_id_to_implicit_ctx(self): propagator = self.get_propagator() carrier = { propagator.SPAN_ID_KEY: self.serialized_span_id, propagator.FLAGS_KEY: "1", } new_ctx = propagator.extract(carrier) self.assertDictEqual(Context(), new_ctx) def test_extract_invalid_trace_id_to_explicit_ctx(self): """Given invalid trace ID, do not modify context""" old_ctx = Context({"k1": "v1"}) propagator = self.get_propagator() carrier = { propagator.TRACE_ID_KEY: "abc123", propagator.SPAN_ID_KEY: self.serialized_span_id, propagator.FLAGS_KEY: "1", } new_ctx = propagator.extract(carrier, old_ctx) self.assertDictEqual(new_ctx, old_ctx) def test_extract_invalid_trace_id_to_implicit_ctx(self): propagator = self.get_propagator() carrier = { propagator.TRACE_ID_KEY: "abc123", propagator.SPAN_ID_KEY: self.serialized_span_id, propagator.FLAGS_KEY: "1", } new_ctx = propagator.extract(carrier) self.assertDictEqual(Context(), new_ctx) def test_extract_invalid_span_id_to_explicit_ctx(self): """Given invalid span ID, do not modify context""" old_ctx = Context({"k1": "v1"}) propagator = self.get_propagator() carrier = { propagator.TRACE_ID_KEY: self.serialized_trace_id, propagator.SPAN_ID_KEY: "abc123", propagator.FLAGS_KEY: "1", } new_ctx = propagator.extract(carrier, old_ctx) self.assertDictEqual(new_ctx, old_ctx) def test_extract_invalid_span_id_to_implicit_ctx(self): propagator = self.get_propagator() carrier = { propagator.TRACE_ID_KEY: self.serialized_trace_id, propagator.SPAN_ID_KEY: "abc123", propagator.FLAGS_KEY: "1", } new_ctx = propagator.extract(carrier) self.assertDictEqual(Context(), new_ctx) def test_extract_missing_span_id_to_explicit_ctx(self): """Given no span ID, do not modify context""" old_ctx = Context({"k1": "v1"}) propagator = self.get_propagator() carrier = { propagator.TRACE_ID_KEY: self.serialized_trace_id, propagator.FLAGS_KEY: "1", } new_ctx = propagator.extract(carrier, old_ctx) self.assertDictEqual(new_ctx, old_ctx) def test_extract_missing_span_id_to_implicit_ctx(self): propagator = self.get_propagator() carrier = { propagator.TRACE_ID_KEY: self.serialized_trace_id, propagator.FLAGS_KEY: "1", } new_ctx = propagator.extract(carrier) self.assertDictEqual(Context(), new_ctx) def test_extract_empty_carrier_to_explicit_ctx(self): """Given no headers at all, do not modify context""" old_ctx = Context({"k1": "v1"}) carrier = {} new_ctx = self.get_propagator().extract(carrier, old_ctx) self.assertDictEqual(new_ctx, old_ctx) def test_extract_empty_carrier_to_implicit_ctx(self): new_ctx = self.get_propagator().extract({}) self.assertDictEqual(Context(), new_ctx) def test_inject_empty_context(self): """If the current context has no span, don't add headers""" new_carrier = {} self.get_propagator().inject(new_carrier, get_current()) assert len(new_carrier) == 0 def test_default_span(self): """Make sure propagator does not crash when working with NonRecordingSpan""" class CarrierGetter(DefaultGetter): def get(self, carrier, key): return carrier.get(key, None) propagator = self.get_propagator() ctx = propagator.extract({}, getter=CarrierGetter()) propagator.inject({}, context=ctx) def test_fields(self): """Make sure the fields attribute returns the fields used in inject""" propagator = self.get_propagator() tracer = trace.TracerProvider().get_tracer("sdk_tracer_provider") mock_setter = Mock() with tracer.start_as_current_span("parent"): with tracer.start_as_current_span("child"): propagator.inject({}, setter=mock_setter) inject_fields = set() for call in mock_setter.mock_calls: inject_fields.add(call[1][1]) self.assertEqual(propagator.fields, inject_fields) def test_extract_none_context(self): """Given no trace ID, do not modify context""" old_ctx = None carrier = {} new_ctx = self.get_propagator().extract(carrier, old_ctx) self.assertDictEqual(Context(), new_ctx) class TestB3MultiFormat(AbstractB3FormatTestCase, unittest.TestCase): @classmethod def get_propagator(cls): return B3MultiFormat() @classmethod def get_trace_id(cls, carrier): return carrier[cls.get_propagator().TRACE_ID_KEY] def assertSampled(self, carrier): self.assertEqual(carrier[self.get_propagator().SAMPLED_KEY], "1") def assertNotSampled(self, carrier): self.assertEqual(carrier[self.get_propagator().SAMPLED_KEY], "0") class TestB3SingleFormat(AbstractB3FormatTestCase, unittest.TestCase): @classmethod def get_propagator(cls): return B3SingleFormat() @classmethod def get_trace_id(cls, carrier): return carrier[cls.get_propagator().SINGLE_HEADER_KEY].split("-")[0] def assertSampled(self, carrier): self.assertEqual( carrier[self.get_propagator().SINGLE_HEADER_KEY].split("-")[2], "1" ) def assertNotSampled(self, carrier): self.assertEqual( carrier[self.get_propagator().SINGLE_HEADER_KEY].split("-")[2], "0" ) python-opentelemetry-1.39.1/propagator/opentelemetry-propagator-jaeger/000077500000000000000000000000001511654350100265025ustar00rootroot00000000000000python-opentelemetry-1.39.1/propagator/opentelemetry-propagator-jaeger/LICENSE000066400000000000000000000261351511654350100275160ustar00rootroot00000000000000 Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. Copyright [yyyy] [name of copyright owner] Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. python-opentelemetry-1.39.1/propagator/opentelemetry-propagator-jaeger/README.rst000066400000000000000000000010221511654350100301640ustar00rootroot00000000000000OpenTelemetry Jaeger Propagator =============================== |pypi| .. |pypi| image:: https://badge.fury.io/py/opentelemetry-propagator-jaeger.svg :target: https://pypi.org/project/opentelemetry-propagator-jaeger/ This library provides a propagator for the Jaeger format Installation ------------ :: pip install opentelemetry-propagator-jaeger References ---------- * `OpenTelemetry `_ * `Jaeger format `_ python-opentelemetry-1.39.1/propagator/opentelemetry-propagator-jaeger/py.typed000066400000000000000000000000001511654350100301670ustar00rootroot00000000000000python-opentelemetry-1.39.1/propagator/opentelemetry-propagator-jaeger/pyproject.toml000066400000000000000000000026141511654350100314210ustar00rootroot00000000000000[build-system] requires = ["hatchling"] build-backend = "hatchling.build" [project] name = "opentelemetry-propagator-jaeger" dynamic = ["version"] description = "OpenTelemetry Jaeger Propagator" readme = "README.rst" license = "Apache-2.0" requires-python = ">=3.9" authors = [ { name = "OpenTelemetry Authors", email = "cncf-opentelemetry-contributors@lists.cncf.io" }, ] classifiers = [ "Development Status :: 5 - Production/Stable", "Framework :: OpenTelemetry", "Intended Audience :: Developers", "Programming Language :: Python", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.9", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", "Programming Language :: Python :: 3.12", "Programming Language :: Python :: 3.13", "Typing :: Typed", ] dependencies = [ "opentelemetry-api ~= 1.3", ] [project.entry-points.opentelemetry_propagator] jaeger = "opentelemetry.propagators.jaeger:JaegerPropagator" [project.urls] Homepage = "https://github.com/open-telemetry/opentelemetry-python/tree/main/propagator/opentelemetry-propagator-jaeger" Repository = "https://github.com/open-telemetry/opentelemetry-python" [tool.hatch.version] path = "src/opentelemetry/propagators/jaeger/version/__init__.py" [tool.hatch.build.targets.sdist] include = [ "/src", "/tests", ] [tool.hatch.build.targets.wheel] packages = ["src/opentelemetry"] python-opentelemetry-1.39.1/propagator/opentelemetry-propagator-jaeger/src/000077500000000000000000000000001511654350100272715ustar00rootroot00000000000000python-opentelemetry-1.39.1/propagator/opentelemetry-propagator-jaeger/src/opentelemetry/000077500000000000000000000000001511654350100321655ustar00rootroot00000000000000propagators/000077500000000000000000000000001511654350100344475ustar00rootroot00000000000000python-opentelemetry-1.39.1/propagator/opentelemetry-propagator-jaeger/src/opentelemetryjaeger/000077500000000000000000000000001511654350100357045ustar00rootroot00000000000000python-opentelemetry-1.39.1/propagator/opentelemetry-propagator-jaeger/src/opentelemetry/propagators__init__.py000066400000000000000000000122351511654350100400200ustar00rootroot00000000000000python-opentelemetry-1.39.1/propagator/opentelemetry-propagator-jaeger/src/opentelemetry/propagators/jaeger# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import typing import urllib.parse from opentelemetry import baggage, trace from opentelemetry.context import Context from opentelemetry.propagators.textmap import ( CarrierT, Getter, Setter, TextMapPropagator, default_getter, default_setter, ) from opentelemetry.trace import format_span_id, format_trace_id class JaegerPropagator(TextMapPropagator): """Propagator for the Jaeger format. See: https://www.jaegertracing.io/docs/1.19/client-libraries/#propagation-format """ TRACE_ID_KEY = "uber-trace-id" BAGGAGE_PREFIX = "uberctx-" DEBUG_FLAG = 0x02 def extract( self, carrier: CarrierT, context: typing.Optional[Context] = None, getter: Getter = default_getter, ) -> Context: if context is None: context = Context() header = getter.get(carrier, self.TRACE_ID_KEY) if not header: return context context = self._extract_baggage(getter, carrier, context) trace_id, span_id, flags = _parse_trace_id_header(header) if ( trace_id == trace.INVALID_TRACE_ID or span_id == trace.INVALID_SPAN_ID ): return context span = trace.NonRecordingSpan( trace.SpanContext( trace_id=trace_id, span_id=span_id, is_remote=True, trace_flags=trace.TraceFlags(flags & trace.TraceFlags.SAMPLED), ) ) return trace.set_span_in_context(span, context) def inject( self, carrier: CarrierT, context: typing.Optional[Context] = None, setter: Setter = default_setter, ) -> None: span = trace.get_current_span(context=context) span_context = span.get_span_context() if span_context == trace.INVALID_SPAN_CONTEXT: return # Non-recording spans do not have a parent span_parent_id = ( span.parent.span_id if span.is_recording() and span.parent else 0 ) trace_flags = span_context.trace_flags if trace_flags.sampled: trace_flags |= self.DEBUG_FLAG # set span identity setter.set( carrier, self.TRACE_ID_KEY, _format_uber_trace_id( span_context.trace_id, span_context.span_id, span_parent_id, trace_flags, ), ) # set span baggage, if any baggage_entries = baggage.get_all(context=context) if not baggage_entries: return for key, value in baggage_entries.items(): baggage_key = self.BAGGAGE_PREFIX + key setter.set(carrier, baggage_key, urllib.parse.quote(str(value))) @property def fields(self) -> typing.Set[str]: return {self.TRACE_ID_KEY} def _extract_baggage(self, getter, carrier, context): baggage_keys = [ key for key in getter.keys(carrier) if key.startswith(self.BAGGAGE_PREFIX) ] for key in baggage_keys: value = _extract_first_element(getter.get(carrier, key)) context = baggage.set_baggage( key.replace(self.BAGGAGE_PREFIX, ""), urllib.parse.unquote(value).strip(), context=context, ) return context def _format_uber_trace_id(trace_id, span_id, parent_span_id, flags): return f"{format_trace_id(trace_id)}:{format_span_id(span_id)}:{format_span_id(parent_span_id)}:{flags:02x}" def _extract_first_element( items: typing.Iterable[CarrierT], ) -> typing.Optional[CarrierT]: if items is None: return None return next(iter(items), None) def _parse_trace_id_header( items: typing.Iterable[CarrierT], ) -> typing.Tuple[int]: invalid_header_result = (trace.INVALID_TRACE_ID, trace.INVALID_SPAN_ID, 0) header = _extract_first_element(items) if header is None: return invalid_header_result fields = header.split(":") if len(fields) != 4: return invalid_header_result trace_id_str, span_id_str, _parent_id_str, flags_str = fields flags = _int_from_hex_str(flags_str, None) if flags is None: return invalid_header_result trace_id = _int_from_hex_str(trace_id_str, trace.INVALID_TRACE_ID) span_id = _int_from_hex_str(span_id_str, trace.INVALID_SPAN_ID) return trace_id, span_id, flags def _int_from_hex_str( identifier: str, default: typing.Optional[int] ) -> typing.Optional[int]: try: return int(identifier, 16) except ValueError: return default py.typed000066400000000000000000000000001511654350100373710ustar00rootroot00000000000000python-opentelemetry-1.39.1/propagator/opentelemetry-propagator-jaeger/src/opentelemetry/propagators/jaegerversion/000077500000000000000000000000001511654350100373715ustar00rootroot00000000000000python-opentelemetry-1.39.1/propagator/opentelemetry-propagator-jaeger/src/opentelemetry/propagators/jaeger__init__.py000066400000000000000000000011401511654350100414760ustar00rootroot00000000000000python-opentelemetry-1.39.1/propagator/opentelemetry-propagator-jaeger/src/opentelemetry/propagators/jaeger/version# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. __version__ = "1.39.1" python-opentelemetry-1.39.1/propagator/opentelemetry-propagator-jaeger/test-requirements.txt000066400000000000000000000005331511654350100327440ustar00rootroot00000000000000asgiref==3.7.2 importlib-metadata==6.11.0 iniconfig==2.0.0 packaging==24.0 pluggy==1.5.0 py-cpuinfo==9.0.0 pytest==7.4.4 tomli==2.0.1 typing_extensions==4.10.0 wrapt==1.16.0 zipp==3.19.2 -e opentelemetry-api -e opentelemetry-sdk -e opentelemetry-semantic-conventions -e tests/opentelemetry-test-utils -e propagator/opentelemetry-propagator-jaeger python-opentelemetry-1.39.1/propagator/opentelemetry-propagator-jaeger/tests/000077500000000000000000000000001511654350100276445ustar00rootroot00000000000000python-opentelemetry-1.39.1/propagator/opentelemetry-propagator-jaeger/tests/__init__.py000066400000000000000000000011101511654350100317460ustar00rootroot00000000000000# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. test_jaeger_propagator.py000066400000000000000000000234361511654350100347010ustar00rootroot00000000000000python-opentelemetry-1.39.1/propagator/opentelemetry-propagator-jaeger/tests# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from unittest.mock import Mock import opentelemetry.trace as trace_api from opentelemetry import baggage from opentelemetry.baggage import _BAGGAGE_KEY from opentelemetry.context import Context from opentelemetry.propagators import ( # pylint: disable=no-name-in-module jaeger, ) from opentelemetry.sdk import trace from opentelemetry.sdk.trace import id_generator from opentelemetry.test import TestCase FORMAT = jaeger.JaegerPropagator() def get_context_new_carrier(old_carrier, carrier_baggage=None): ctx = FORMAT.extract(old_carrier) if carrier_baggage: for key, value in carrier_baggage.items(): ctx = baggage.set_baggage(key, value, ctx) parent_span_context = trace_api.get_current_span(ctx).get_span_context() parent = trace._Span("parent", parent_span_context) child = trace._Span( "child", trace_api.SpanContext( parent_span_context.trace_id, id_generator.RandomIdGenerator().generate_span_id(), is_remote=False, trace_flags=parent_span_context.trace_flags, trace_state=parent_span_context.trace_state, ), parent=parent.get_span_context(), ) new_carrier = {} ctx = trace_api.set_span_in_context(child, ctx) FORMAT.inject(new_carrier, context=ctx) return ctx, new_carrier class TestJaegerPropagator(TestCase): @classmethod def setUpClass(cls): generator = id_generator.RandomIdGenerator() cls.trace_id = generator.generate_trace_id() cls.span_id = generator.generate_span_id() cls.parent_span_id = generator.generate_span_id() cls.serialized_uber_trace_id = jaeger._format_uber_trace_id( # pylint: disable=protected-access cls.trace_id, cls.span_id, cls.parent_span_id, 11 ) def test_extract_valid_span(self): old_carrier = {FORMAT.TRACE_ID_KEY: self.serialized_uber_trace_id} ctx = FORMAT.extract(old_carrier) span_context = trace_api.get_current_span(ctx).get_span_context() self.assertEqual(span_context.trace_id, self.trace_id) self.assertEqual(span_context.span_id, self.span_id) def test_missing_carrier(self): old_carrier = {} ctx = FORMAT.extract(old_carrier) span_context = trace_api.get_current_span(ctx).get_span_context() self.assertEqual(span_context.trace_id, trace_api.INVALID_TRACE_ID) self.assertEqual(span_context.span_id, trace_api.INVALID_SPAN_ID) def test_trace_id(self): old_carrier = {FORMAT.TRACE_ID_KEY: self.serialized_uber_trace_id} _, new_carrier = get_context_new_carrier(old_carrier) self.assertEqual( self.serialized_uber_trace_id.split(":", maxsplit=1)[0], new_carrier[FORMAT.TRACE_ID_KEY].split(":")[0], ) def test_parent_span_id(self): old_carrier = {FORMAT.TRACE_ID_KEY: self.serialized_uber_trace_id} _, new_carrier = get_context_new_carrier(old_carrier) span_id = self.serialized_uber_trace_id.split(":")[1] parent_span_id = new_carrier[FORMAT.TRACE_ID_KEY].split(":")[2] self.assertEqual(span_id, parent_span_id) def test_sampled_flag_set(self): old_carrier = {FORMAT.TRACE_ID_KEY: self.serialized_uber_trace_id} _, new_carrier = get_context_new_carrier(old_carrier) sample_flag_value = ( int(new_carrier[FORMAT.TRACE_ID_KEY].split(":")[3]) & 0x01 ) self.assertEqual(1, sample_flag_value) def test_debug_flag_set(self): old_carrier = {FORMAT.TRACE_ID_KEY: self.serialized_uber_trace_id} _, new_carrier = get_context_new_carrier(old_carrier) debug_flag_value = ( int(new_carrier[FORMAT.TRACE_ID_KEY].split(":")[3]) & FORMAT.DEBUG_FLAG ) self.assertEqual(FORMAT.DEBUG_FLAG, debug_flag_value) def test_sample_debug_flags_unset(self): uber_trace_id = jaeger._format_uber_trace_id( # pylint: disable=protected-access self.trace_id, self.span_id, self.parent_span_id, 0 ) old_carrier = {FORMAT.TRACE_ID_KEY: uber_trace_id} _, new_carrier = get_context_new_carrier(old_carrier) flags = int(new_carrier[FORMAT.TRACE_ID_KEY].split(":")[3]) sample_flag_value = flags & 0x01 debug_flag_value = flags & FORMAT.DEBUG_FLAG self.assertEqual(0, sample_flag_value) self.assertEqual(0, debug_flag_value) def test_baggage(self): old_carrier = {FORMAT.TRACE_ID_KEY: self.serialized_uber_trace_id} input_baggage = {"key1": "value1"} _, new_carrier = get_context_new_carrier(old_carrier, input_baggage) ctx = FORMAT.extract(new_carrier) self.assertDictEqual(input_baggage, ctx[_BAGGAGE_KEY]) def test_non_string_baggage(self): old_carrier = {FORMAT.TRACE_ID_KEY: self.serialized_uber_trace_id} input_baggage = {"key1": 1, "key2": True} formatted_baggage = {"key1": "1", "key2": "True"} _, new_carrier = get_context_new_carrier(old_carrier, input_baggage) ctx = FORMAT.extract(new_carrier) self.assertDictEqual(formatted_baggage, ctx[_BAGGAGE_KEY]) def test_extract_invalid_uber_trace_id(self): old_carrier = { "uber-trace-id": "000000000000000000000000deadbeef:00000000deadbef0:00", "uberctx-key1": "value1", } formatted_baggage = {"key1": "value1"} context = FORMAT.extract(old_carrier) span_context = trace_api.get_current_span(context).get_span_context() self.assertEqual(span_context.span_id, trace_api.INVALID_SPAN_ID) self.assertDictEqual(formatted_baggage, context[_BAGGAGE_KEY]) def test_extract_invalid_trace_id(self): old_carrier = { "uber-trace-id": "00000000000000000000000000000000:00000000deadbef0:00:00", "uberctx-key1": "value1", } formatted_baggage = {"key1": "value1"} context = FORMAT.extract(old_carrier) span_context = trace_api.get_current_span(context).get_span_context() self.assertEqual(span_context.trace_id, trace_api.INVALID_TRACE_ID) self.assertDictEqual(formatted_baggage, context[_BAGGAGE_KEY]) def test_extract_invalid_span_id(self): old_carrier = { "uber-trace-id": "000000000000000000000000deadbeef:0000000000000000:00:00", "uberctx-key1": "value1", } formatted_baggage = {"key1": "value1"} context = FORMAT.extract(old_carrier) span_context = trace_api.get_current_span(context).get_span_context() self.assertEqual(span_context.span_id, trace_api.INVALID_SPAN_ID) self.assertDictEqual(formatted_baggage, context[_BAGGAGE_KEY]) def test_fields(self): tracer = trace.TracerProvider().get_tracer("sdk_tracer_provider") mock_setter = Mock() with tracer.start_as_current_span("parent"): with tracer.start_as_current_span("child"): FORMAT.inject({}, setter=mock_setter) inject_fields = set() for call in mock_setter.mock_calls: inject_fields.add(call[1][1]) self.assertEqual(FORMAT.fields, inject_fields) def test_extract_no_trace_id_to_explicit_ctx(self): carrier = {} orig_ctx = Context({"k1": "v1"}) ctx = FORMAT.extract(carrier, orig_ctx) self.assertDictEqual(orig_ctx, ctx) def test_extract_no_trace_id_to_implicit_ctx(self): carrier = {} ctx = FORMAT.extract(carrier) self.assertDictEqual(Context(), ctx) def test_extract_invalid_uber_trace_id_header_to_explicit_ctx(self): trace_id_headers = [ "000000000000000000000000deadbeef:00000000deadbef0:00", "00000000000000000000000000000000:00000000deadbef0:00:00", "000000000000000000000000deadbeef:0000000000000000:00:00", "000000000000000000000000deadbeef:0000000000000000:00:xyz", ] for trace_id_header in trace_id_headers: with self.subTest(trace_id_header=trace_id_header): carrier = {"uber-trace-id": trace_id_header} orig_ctx = Context({"k1": "v1"}) ctx = FORMAT.extract(carrier, orig_ctx) self.assertDictEqual(orig_ctx, ctx) def test_extract_invalid_uber_trace_id_header_to_implicit_ctx(self): trace_id_headers = [ "000000000000000000000000deadbeef:00000000deadbef0:00", "00000000000000000000000000000000:00000000deadbef0:00:00", "000000000000000000000000deadbeef:0000000000000000:00:00", "000000000000000000000000deadbeef:0000000000000000:00:xyz", ] for trace_id_header in trace_id_headers: with self.subTest(trace_id_header=trace_id_header): carrier = {"uber-trace-id": trace_id_header} ctx = FORMAT.extract(carrier) self.assertDictEqual(Context(), ctx) def test_non_recording_span_does_not_crash(self): """Make sure propagator does not crash when working with NonRecordingSpan""" mock_setter = Mock() span = trace_api.NonRecordingSpan(trace_api.SpanContext(1, 1, True)) with trace_api.use_span(span, end_on_exit=True): with self.assertNotRaises(Exception): FORMAT.inject({}, setter=mock_setter) python-opentelemetry-1.39.1/pyproject.toml000066400000000000000000000066451511654350100207500ustar00rootroot00000000000000[project] name = "opentelemetry-python" version = "0.0.0" # This is not used. requires-python = ">=3.9" dependencies = [ "opentelemetry-api", "opentelemetry-sdk", "opentelemetry-semantic-conventions", "opentelemetry-proto", "opentelemetry-test-utils", "opentelemetry-exporter-otlp-proto-grpc", "opentelemetry-exporter-otlp-proto-http", "opentelemetry-exporter-otlp-proto-common", "opentelemetry-exporter-zipkin-json", "opentelemetry-exporter-prometheus", "opentelemetry-propagator-jaeger", "opentelemetry-propagator-b3", ] # https://docs.astral.sh/uv/reference/settings/ [tool.uv] package = false # https://docs.astral.sh/uv/reference/settings/#package required-version = ">=0.6.0" [tool.uv.sources] opentelemetry-api = { workspace = true} opentelemetry-sdk = { workspace = true } opentelemetry-proto = { workspace = true } opentelemetry-semantic-conventions = { workspace = true } opentelemetry-test-utils = { workspace = true } opentelemetry-exporter-otlp-proto-grpc = { workspace = true } opentelemetry-exporter-otlp-proto-http = { workspace = true } opentelemetry-exporter-otlp-proto-common = { workspace = true } opentelemetry-exporter-zipkin-json = { workspace = true } opentelemetry-exporter-prometheus = {workspace = true } opentelemetry-propagator-jaeger = { workspace = true } opentelemetry-propagator-b3 = { workspace = true } [tool.uv.workspace] members = [ "opentelemetry-api", "opentelemetry-sdk", "opentelemetry-semantic-conventions", "opentelemetry-proto", "exporter/*", "propagator/*", "tests/opentelemetry-test-utils", ] exclude = [ "exporter/opentelemetry-exporter-opencensus", "exporter/opentelemetry-exporter-zipkin", "exporter/opentelemetry-exporter-zipkin-proto-http", ] [tool.pytest.ini_options] addopts = "-rs -v" log_cli = true [tool.ruff] # https://docs.astral.sh/ruff/configuration/ target-version = "py38" line-length = 79 extend-exclude = [ "*_pb2*.py*", ] output-format = "concise" [tool.ruff.lint] # https://docs.astral.sh/ruff/linter/#rule-selection # pylint: https://github.com/astral-sh/ruff/issues/970 select = [ "I", # isort "F", # pyflakes "E", # pycodestyle errors "W", # pycodestyle warnings "PLC", # pylint convention "PLE", # pylint error "Q", # flake8-quotes "G", # https://docs.astral.sh/ruff/rules/#flake8-logging-format-g ] ignore = [ "E501", # line-too-long ] [tool.ruff.lint.per-file-ignores] "docs/**/*.*" = ["PLE"] [tool.ruff.lint.isort] known-third-party = [ "psutil", "pytest", "redis", "redis_opentracing", "opencensus", ] known-first-party = ["opentelemetry", "opentelemetry_example_app"] [tool.pyright] typeCheckingMode = "standard" pythonVersion = "3.9" include = [ "opentelemetry-semantic-conventions", "opentelemetry-api", "opentelemetry-sdk", "exporter/opentelemetry-exporter-otlp-proto-grpc", ] exclude = [ "opentelemetry-sdk/tests", "opentelemetry-sdk/src/opentelemetry/sdk/_events", "opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/", "opentelemetry-sdk/src/opentelemetry/sdk/trace/__init__.py", "opentelemetry-sdk/benchmarks", "exporter/opentelemetry-exporter-otlp-proto-grpc/tests", ] # When packages are correct typed add them to the strict list strict = [ "opentelemetry-semantic-conventions", "opentelemetry-sdk/src/opentelemetry/sdk/environment_variables", "opentelemetry-sdk/src/opentelemetry/sdk/resources", ] [dependency-groups] dev = [ "tox", "tox-uv>=1", "pre-commit", ] python-opentelemetry-1.39.1/rationale.md000066400000000000000000000074521511654350100203310ustar00rootroot00000000000000# OpenTelemetry Rationale When creating a library, often times designs and decisions are made that get lost over time. This document tries to collect information on design decisions to answer common questions that may come up when you explore the SDK. ## Versioning and Releasing This document describes the versioning and stability policy of components shipped from this repository, as per the [OpenTelemetry versioning and stability specification](https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/versioning-and-stability.md). The OpenTelemetry implementations, the OpenTelemetry Spec itself and this repo follows [SemVer V2](https://semver.org/spec/v2.0.0.html) guidelines. This means that, for any stable packages released from this repo, all public APIs will remain [backward compatible](https://www.python.org/dev/peps/pep-0387/), unless a major version bump occurs. This applies to the API, SDK, as well as Exporters, Instrumentation etc. shipped from this repo. For example, users can take a dependency on 1.0.0 version of any package, with the assurance that all future releases until 2.0.0 will be backward compatible. ## Goals ### API Stability Once the API for a given signal (spans, logs, metrics, baggage) has been officially released, that API module will function with any SDK that has the same major version, and equal or greater minor or patch version. For example, libraries that are instrumented with `opentelemetry-api 1.0.1` will function with SDK library `opentelemetry-sdk 1.11.33` or `opentelemetry-sdk 1.3.4`. ### SDK Stability: Public portions of the SDK (constructors, configuration, end-user interfaces) must remain backwards compatible. Internal interfaces are allowed to break. ## Core components Core components refer to the set of components which are required as per the spec. This includes API, SDK, propagators (B3 and Jaeger) and exporters which are required by the specification. These exporters are OTLP, and Zipkin. ## Mature or stable Signals Modules for mature (i.e. released) signals will be found in the latest versions of the corresponding packages of the core components. The version numbers of these will have no suffix appended, indicating they are stable. For example, the package `opentelemetry-api` v1.x.y will be considered stable. ## Pre-releases Pre-release packages are denoted by appending identifiers such as -Alpha, -Beta, -RC etc. There are no API guarantees in pre-releases. Each release can contain breaking changes and functionality could be removed as well. In general, an RC pre-release is more stable than a Beta release, which is more stable than an Alpha release. ### Immature or experimental signals Modules for experimental signals will be released in the same packages as the core components, but prefixed with `_` to indicate that they are unstable and subject to change. NO STABILITY GUARANTEES ARE MADE. ## Examples Purely for illustration purposes, not intended to represent actual releases: #### V1.0.0 Release (tracing, baggage, propagators, context) - `opentelemetry-api` 1.0.0 - Contains APIs for tracing, baggage, propagators, context - `opentelemetry-sdk` 1.0.0 - Contains SDK components for tracing, baggage, propagators, and context #### V1.15.0 Release (with metrics) - `opentelemetry-api` 1.15.0 - Contains APIs for tracing, baggage, propagators, context, and metrics - `opentelemetry-sdk` 1.15.0 - Contains SDK components for tracing, baggage, propagators, context and metrics ##### Contains the following pre-release packages - `opentelemetry-api` 1.x.yrc1 - Contains the experimental public API for logging plus other unstable features. There are no stability guarantees. - `opentelemetry-sdk` 1.x.yrc1 - Contains the experimental public SDK for logging plus other unstable features. There are no stability guarantees. python-opentelemetry-1.39.1/scripts/000077500000000000000000000000001511654350100175105ustar00rootroot00000000000000python-opentelemetry-1.39.1/scripts/add_required_checks.py000066400000000000000000000031121511654350100240270ustar00rootroot00000000000000# This script is to be used by maintainers by running it locally. from json import dumps from os import environ from requests import put from yaml import safe_load job_names = ["EasyCLA"] # Check that the files below are all the workflow YAML files that should be # considered. for yml_file_name in [ "test_0", "test_1", "misc_0", "lint_0", "contrib_0", "check-links", ]: with open(f"../.github/workflows/{yml_file_name}.yml") as yml_file: job_names.extend( [job["name"] for job in safe_load(yml_file)["jobs"].values()] ) owner = "open-telemetry" repo = "opentelemetry-python" branch = "main" response = put( ( f"https://api.github.com/repos/{owner}/{repo}/branches/{branch}/" "protection/required_status_checks/contexts" ), headers={ "Accept": "application/vnd.github.v3+json", # The token has to be created in Github, and exported to the # environment variable below. When creating the token, the resource # owner must be open-telemetry, the access must be for the repo above, # and read and write permissions must be granted for administration # permissions and read permissions must be granted for metadata # permissions. "Authorization": f"token {environ.get('REQUIRED_CHECKS_TOKEN')}", }, data=dumps({"contexts": job_names}), ) if response.status_code == 200: print(response.content) else: print( "Failed to update branch protection settings. " f"Status code: {response.status_code}" ) print(response.json()) python-opentelemetry-1.39.1/scripts/build.sh000077500000000000000000000015001511654350100211420ustar00rootroot00000000000000#!/bin/sh # This script builds wheels for the API, SDK, and extension packages in the # dist/ dir, to be uploaded to PyPI. set -ev # Get the latest versions of packaging tools python3 -m pip install --upgrade pip build setuptools wheel BASEDIR=$(dirname "$(readlink -f "$(dirname $0)")") DISTDIR=dist ( cd $BASEDIR mkdir -p $DISTDIR rm -rf ${DISTDIR:?}/* for d in opentelemetry-api/ opentelemetry-sdk/ opentelemetry-proto/ opentelemetry-semantic-conventions/ exporter/*/ shim/*/ propagator/*/ tests/opentelemetry-test-utils/; do ( echo "building $d" cd "$d" # Some ext directories (such as docker tests) are not intended to be # packaged. Verify the intent by looking for a pyproject.toml. if [ -f pyproject.toml ]; then python3 -m build --outdir "$BASEDIR/dist/" fi ) done ) python-opentelemetry-1.39.1/scripts/check_for_valid_readme.py000066400000000000000000000024601511654350100245030ustar00rootroot00000000000000"""Test script to check given paths for valid README.rst files.""" import argparse import sys from pathlib import Path import readme_renderer.rst def is_valid_rst(path): """Checks if RST can be rendered on PyPI.""" with open(path, encoding="utf-8") as readme_file: markup = readme_file.read() return readme_renderer.rst.render(markup, stream=sys.stderr) is not None def parse_args(): parser = argparse.ArgumentParser( description="Checks README.rst file in path for syntax errors." ) parser.add_argument( "paths", nargs="+", help="paths containing a README.rst to test" ) parser.add_argument("-v", "--verbose", action="store_true") return parser.parse_args() def main(): args = parse_args() error = False for path in map(Path, args.paths): readme = path / "README.rst" try: if not is_valid_rst(readme): error = True print("FAILED: RST syntax errors in", readme) continue except FileNotFoundError: error = True print("FAILED: README.rst not found in", path) continue if args.verbose: print("PASSED:", readme) if error: sys.exit(1) print("All clear.") if __name__ == "__main__": main() python-opentelemetry-1.39.1/scripts/coverage.sh000077500000000000000000000020031511654350100216350ustar00rootroot00000000000000#!/bin/bash set -e function cov { if [ ${TOX_ENV_NAME:0:4} == "py34" ] then pytest \ --ignore-glob=instrumentation/opentelemetry-instrumentation-opentracing-shim/tests/testbed/* \ --cov ${1} \ --cov-append \ --cov-branch \ --cov-report='' \ ${1} else pytest \ --cov ${1} \ --cov-append \ --cov-branch \ --cov-report='' \ ${1} fi } coverage erase cov opentelemetry-api cov opentelemetry-sdk cov exporter/opentelemetry-exporter-datadog cov instrumentation/opentelemetry-instrumentation-flask cov instrumentation/opentelemetry-instrumentation-requests cov instrumentation/opentelemetry-instrumentation-opentracing-shim cov util/opentelemetry-util-http cov exporter/opentelemetry-exporter-zipkin cov instrumentation/opentelemetry-instrumentation-aiohttp-client cov instrumentation/opentelemetry-instrumentation-asgi coverage report --show-missing coverage xml python-opentelemetry-1.39.1/scripts/eachdist.py000077500000000000000000000546541511654350100216670ustar00rootroot00000000000000#!/usr/bin/env python3 import argparse import os import re import shlex import shutil import subprocess import sys from configparser import ConfigParser from inspect import cleandoc from itertools import chain from os.path import basename from pathlib import Path, PurePath from toml import load DEFAULT_ALLSEP = " " DEFAULT_ALLFMT = "{rel}" def unique(elems): seen = set() for elem in elems: if elem not in seen: yield elem seen.add(elem) subprocess_run = subprocess.run def extraargs_help(calledcmd): return cleandoc( f""" Additional arguments to pass on to {calledcmd}. This is collected from any trailing arguments passed to `%(prog)s`. Use an initial `--` to separate them from regular arguments. """ ) def parse_args(args=None): parser = argparse.ArgumentParser(description="Development helper script.") parser.set_defaults(parser=parser) parser.add_argument( "--dry-run", action="store_true", help="Only display what would be done, don't actually do anything.", ) subparsers = parser.add_subparsers(metavar="COMMAND") subparsers.required = True excparser = subparsers.add_parser( "exec", help="Run a command for each or all targets.", formatter_class=argparse.RawTextHelpFormatter, description=cleandoc( """Run a command according to the `format` argument for each or all targets. This is an advanced command that is used internally by other commands. For example, to install all distributions in this repository editable, you could use: scripts/eachdist.py exec "python -m pip install -e {}" This will run pip for all distributions which is quite slow. It gets a bit faster if we only invoke pip once but with all the paths gathered together, which can be achieved by using `--all`: scripts/eachdist.py exec "python -m pip install {}" --all "-e {}" The sortfirst option in the DEFAULT section of eachdist.ini makes sure that dependencies are installed before their dependents. Search for usages of `parse_subargs` in the source code of this script to see more examples. This command first collects target paths and then executes commands according to `format` and `--all`. Target paths are initially all Python distribution root paths (as determined by the existence of pyproject.toml, etc. files). They are then augmented according to the section of the `PROJECT_ROOT/eachdist.ini` config file specified by the `--mode` option. The following config options are available (and processed in that order): - `extraroots`: List of project root-relative glob expressions. The resulting paths will be added. - `sortfirst`: List of glob expressions. Any matching paths will be put to the front of the path list, in the same order they appear in this option. If more than one glob matches, ordering is according to the first. - `subglob`: List of glob expressions. Each path added so far is removed and replaced with the result of all glob expressions relative to it (in order of the glob expressions). After all this, any duplicate paths are removed (the first occurrence remains). """ ), ) excparser.set_defaults(func=execute_args) excparser.add_argument( "format", help=cleandoc( """Format string for the command to execute. The available replacements depend on whether `--all` is specified. If `--all` was specified, there is only a single replacement, `{}`, that is replaced with the string that is generated from joining all targets formatted with `--all` to a single string with the value of `--allsep` as separator. If `--all` was not specified, the following replacements are available: - `{}`: the absolute path to the current target in POSIX format (with forward slashes) - `{rel}`: like `{}` but relative to the project root. - `{raw}`: the absolute path to the current target in native format (thus exactly the same as `{}` on Unix but with backslashes on Windows). - `{rawrel}`: like `{raw}` but relative to the project root. The resulting string is then split according to POSIX shell rules (so you can use quotation marks or backslashes to handle arguments containing spaces). The first token is the name of the executable to run, the remaining tokens are the arguments. Note that a shell is *not* involved by default. You can add bash/sh/cmd/powershell yourself to the format if you want. If `--all` was specified, the resulting command is simply executed once. Otherwise, the command is executed for each found target. In both cases, the project root is the working directory. """ ), ) excparser.add_argument( "--all", nargs="?", const=DEFAULT_ALLFMT, metavar="ALLFORMAT", help=cleandoc( """Instead of running the command for each target, join all target paths together to run a single command. This option optionally takes a format string to apply to each path. The available replacements are the ones that would be available for `format` if `--all` was not specified. Default ALLFORMAT if this flag is specified: `%(const)s`. """ ), ) excparser.add_argument( "--allsep", help=cleandoc( """Separator string for the strings resulting from `--all`. Only valid if `--all` is specified. """ ), ) excparser.add_argument( "--allowexitcode", type=int, action="append", default=[0], help=cleandoc( """The given command exit code is treated as success and does not abort execution. Can be specified multiple times. """ ), ) excparser.add_argument( "--mode", "-m", default="DEFAULT", help=cleandoc( """Section of config file to use for target selection configuration. See description of exec for available options.""" ), ) instparser = subparsers.add_parser( "install", help="Install all distributions." ) def setup_instparser(instparser): instparser.set_defaults(func=install_args) instparser.add_argument( "pipargs", nargs=argparse.REMAINDER, help=extraargs_help("pip") ) setup_instparser(instparser) instparser.add_argument("--editable", "-e", action="store_true") instparser.add_argument("--with-dev-deps", action="store_true") instparser.add_argument("--eager-upgrades", action="store_true") devparser = subparsers.add_parser( "develop", help="Install all distributions editable + dev dependencies.", ) setup_instparser(devparser) devparser.set_defaults( editable=True, with_dev_deps=True, eager_upgrades=True, ) lintparser = subparsers.add_parser( "lint", help="Lint everything, autofixing if possible." ) lintparser.add_argument("--check-only", action="store_true") lintparser.set_defaults(func=lint_args) testparser = subparsers.add_parser( "test", help="Test everything (run pytest yourself for more complex operations).", ) testparser.set_defaults(func=test_args) testparser.add_argument( "pytestargs", nargs=argparse.REMAINDER, help=extraargs_help("pytest") ) releaseparser = subparsers.add_parser( "update_versions", help="Updates version numbers, used by maintainers and CI", ) releaseparser.set_defaults(func=release_args) releaseparser.add_argument("--versions", required=True) releaseparser.add_argument( "releaseargs", nargs=argparse.REMAINDER, help=extraargs_help("pytest") ) patchreleaseparser = subparsers.add_parser( "update_patch_versions", help="Updates version numbers during patch release, used by maintainers and CI", ) patchreleaseparser.set_defaults(func=patch_release_args) patchreleaseparser.add_argument("--stable_version", required=True) patchreleaseparser.add_argument("--unstable_version", required=True) patchreleaseparser.add_argument("--stable_version_prev", required=True) patchreleaseparser.add_argument("--unstable_version_prev", required=True) fmtparser = subparsers.add_parser( "format", help="Formats all source code with black and isort.", ) fmtparser.set_defaults(func=format_args) fmtparser.add_argument( "--path", required=False, help="Format only this path instead of entire repository", ) versionparser = subparsers.add_parser( "version", help="Get the version for a release", ) versionparser.set_defaults(func=version_args) versionparser.add_argument( "--mode", "-m", default="DEFAULT", help=cleandoc( """Section of config file to use for target selection configuration. See description of exec for available options.""" ), ) return parser.parse_args(args) def find_projectroot(search_start=Path(".")): root = search_start.resolve() for root in chain((root,), root.parents): if any((root / marker).exists() for marker in (".git", "tox.ini")): return root return None def find_targets_unordered(rootpath): for subdir in rootpath.iterdir(): if not subdir.is_dir(): continue if subdir.name.startswith(".") or subdir.name.startswith("venv"): continue if any( (subdir / marker).exists() for marker in ("setup.py", "pyproject.toml") ): yield subdir else: yield from find_targets_unordered(subdir) def getlistcfg(strval): return [ val.strip() for line in strval.split("\n") for val in line.split(",") if val.strip() ] def find_targets(mode, rootpath): if not rootpath: sys.exit("Could not find a root directory.") cfg = ConfigParser() cfg.read(str(rootpath / "eachdist.ini")) mcfg = cfg[mode] targets = list(find_targets_unordered(rootpath)) if "extraroots" in mcfg: targets += [ path for extraglob in getlistcfg(mcfg["extraroots"]) for path in rootpath.glob(extraglob) ] if "sortfirst" in mcfg: sortfirst = getlistcfg(mcfg["sortfirst"]) def keyfunc(path): path = path.relative_to(rootpath) for idx, pattern in enumerate(sortfirst): if path.match(pattern): return idx return float("inf") targets.sort(key=keyfunc) if "ignore" in mcfg: ignore = getlistcfg(mcfg["ignore"]) def filter_func(path): path = path.relative_to(rootpath) for pattern in ignore: if path.match(pattern): return False return True filtered = filter(filter_func, targets) targets = list(filtered) subglobs = getlistcfg(mcfg.get("subglob", "")) if subglobs: targets = [ newentry for newentry in ( target / subdir for target in targets for subglob in subglobs # We need to special-case the dot, because glob fails to parse that with an IndexError. for subdir in ( (target,) if subglob == "." else target.glob(subglob) ) ) if ".egg-info" not in str(newentry) and newentry.exists() ] return list(unique(targets)) def runsubprocess(dry_run, params, *args, **kwargs): cmdstr = join_args(params) if dry_run: print(cmdstr) return None # Py < 3.6 compat. cwd = kwargs.get("cwd") if cwd and isinstance(cwd, PurePath): kwargs["cwd"] = str(cwd) check = kwargs.pop("check") # Enforce specifying check print(">>>", cmdstr, file=sys.stderr, flush=True) # This is a workaround for subprocess.run(['python']) leaving the virtualenv on Win32. # The cause for this is that when running the python.exe in a virtualenv, # the wrapper executable launches the global python as a subprocess and the search sequence # for CreateProcessW which subprocess.run and Popen use is a follows # (https://docs.microsoft.com/en-us/windows/win32/api/processthreadsapi/nf-processthreadsapi-createprocessw): # > 1. The directory from which the application loaded. # This will be the directory of the global python.exe, not the venv directory, due to the suprocess mechanism. # > 6. The directories that are listed in the PATH environment variable. # Only this would find the "correct" python.exe. params = list(params) executable = shutil.which(params[0]) if executable: params[0] = executable try: return subprocess_run(params, *args, check=check, **kwargs) except OSError as exc: raise ValueError( "Failed executing " + repr(params) + ": " + str(exc) ) from exc def execute_args(args): if args.allsep and not args.all: args.parser.error("--allsep specified but not --all.") if args.all and not args.allsep: args.allsep = DEFAULT_ALLSEP rootpath = find_projectroot() targets = find_targets(args.mode, rootpath) if not targets: sys.exit(f"Error: No targets selected (root: {rootpath})") def fmt_for_path(fmt, path): return fmt.format( path.as_posix(), rel=path.relative_to(rootpath).as_posix(), raw=path, rawrel=path.relative_to(rootpath), ) def _runcmd(cmd): result = runsubprocess( args.dry_run, shlex.split(cmd), cwd=rootpath, check=False ) if result is not None and result.returncode not in args.allowexitcode: print( f"'{cmd}' failed with code {result.returncode}", file=sys.stderr, ) sys.exit(result.returncode) if args.all: allstr = args.allsep.join( fmt_for_path(args.all, path) for path in targets ) cmd = args.format.format(allstr) _runcmd(cmd) else: for target in targets: cmd = fmt_for_path(args.format, target) _runcmd(cmd) def clean_remainder_args(remainder_args): if remainder_args and remainder_args[0] == "--": del remainder_args[0] def join_args(arglist): return " ".join(map(shlex.quote, arglist)) def install_args(args): clean_remainder_args(args.pipargs) if args.eager_upgrades: args.pipargs += ["--upgrade-strategy=eager"] if args.with_dev_deps: runsubprocess( args.dry_run, [ "python", "-m", "pip", "install", "--upgrade", "pip", "setuptools", "wheel", ] + args.pipargs, check=True, ) allfmt = "-e 'file://{}'" if args.editable else "'file://{}'" execute_args( parse_subargs( args, ( "exec", "python -m pip install {} " + join_args(args.pipargs), "--all", allfmt, ), ) ) if args.with_dev_deps: rootpath = find_projectroot() runsubprocess( args.dry_run, [ "python", "-m", "pip", "install", "--upgrade", "-r", str(rootpath / "dev-requirements.txt"), ] + args.pipargs, check=True, ) def parse_subargs(parentargs, args): subargs = parse_args(args) subargs.dry_run = parentargs.dry_run or subargs.dry_run return subargs def lint_args(args): rootdir = str(find_projectroot()) runsubprocess( args.dry_run, ("black", "--config", "pyproject.toml", ".") + (("--diff", "--check") if args.check_only else ()), cwd=rootdir, check=True, ) runsubprocess( args.dry_run, ("isort", "--settings-path", ".isort.cfg", ".") + (("--diff", "--check-only") if args.check_only else ()), cwd=rootdir, check=True, ) runsubprocess( args.dry_run, ("flake8", "--config", ".flake8", rootdir), check=True ) execute_args( parse_subargs( args, ("exec", "pylint {}", "--all", "--mode", "lintroots") ) ) execute_args( parse_subargs( args, ( "exec", "python scripts/check_for_valid_readme.py {}", "--all", ), ) ) def find(name, path): for root, _, files in os.walk(path): if name in files: return os.path.join(root, name) return None def filter_packages(targets, packages): filtered_packages = [] for target in targets: for pkg in packages: if pkg in str(target): filtered_packages.append(target) break return filtered_packages def update_version_files(targets, version, packages): print("updating version/__init__.py files") search = "__version__ .*" replace = f'__version__ = "{version}"' for target in filter_packages(targets, packages): version_file_path = target.joinpath( load(target.joinpath("pyproject.toml"))["tool"]["hatch"][ "version" ]["path"] ) with open(version_file_path) as file: text = file.read() if replace in text: print(f"{version_file_path} already contains {replace}") continue with open(version_file_path, "w", encoding="utf-8") as file: file.write(re.sub(search, replace, text)) def update_dependencies(targets, version, packages): print("updating dependencies") # PEP 508 allowed specifier operators operators = ["==", "!=", "<=", ">=", "<", ">", "===", "~=", "="] operators_pattern = "|".join(re.escape(op) for op in operators) for pkg in packages: search = rf"({basename(pkg)}[^,]*)({operators_pattern})(.*\.dev)" replace = r"\1\2 " + version update_files( targets, "pyproject.toml", search, replace, ) def update_patch_dependencies(targets, version, prev_version, packages): print("updating patch dependencies") # PEP 508 allowed specifier operators operators = ["==", "!=", "<=", ">=", "<", ">", "===", "~=", "="] operators_pattern = "|".join(re.escape(op) for op in operators) for pkg in packages: search = rf"({basename(pkg)}[^,]*?)(\s?({operators_pattern})\s?)(.*{prev_version})" replace = r"\g<1>\g<2>" + version print(f"{search=}\t{replace=}\t{pkg=}") update_files( targets, "pyproject.toml", search, replace, ) def update_files(targets, filename, search, replace): errors = False for target in targets: curr_file = find(filename, target) if curr_file is None: print(f"file missing: {target}/{filename}") continue with open(curr_file, encoding="utf-8") as _file: text = _file.read() if replace in text: print(f"{curr_file} already contains {replace}") continue with open(curr_file, "w", encoding="utf-8") as _file: _file.write(re.sub(search, replace, text)) if errors: sys.exit(1) def release_args(args): print("preparing release") rootpath = find_projectroot() targets = list(find_targets_unordered(rootpath)) cfg = ConfigParser() cfg.read(str(find_projectroot() / "eachdist.ini")) versions = args.versions updated_versions = [] for group in versions.split(","): mcfg = cfg[group] version = mcfg["version"] updated_versions.append(version) packages = mcfg["packages"].split() print(f"update {group} packages to {version}") update_dependencies(targets, version, packages) update_version_files(targets, version, packages) def patch_release_args(args): print("preparing patch release") rootpath = find_projectroot() targets = list(find_targets_unordered(rootpath)) cfg = ConfigParser() cfg.read(str(find_projectroot() / "eachdist.ini")) # stable mcfg = cfg["stable"] packages = mcfg["packages"].split() print(f"update stable packages to {args.stable_version}") update_patch_dependencies( targets, args.stable_version, args.stable_version_prev, packages ) update_version_files(targets, args.stable_version, packages) # prerelease mcfg = cfg["prerelease"] packages = mcfg["packages"].split() print(f"update prerelease packages to {args.unstable_version}") update_patch_dependencies( targets, args.unstable_version, args.unstable_version_prev, packages ) update_version_files(targets, args.unstable_version, packages) def test_args(args): clean_remainder_args(args.pytestargs) execute_args( parse_subargs( args, ( "exec", "pytest {} " + join_args(args.pytestargs), "--mode", "testroots", ), ) ) def format_args(args): root_dir = format_dir = str(find_projectroot()) if args.path: format_dir = os.path.join(format_dir, args.path) runsubprocess( args.dry_run, ("black", "--config", f"{root_dir}/pyproject.toml", "."), cwd=format_dir, check=True, ) runsubprocess( args.dry_run, ( "isort", "--settings-path", f"{root_dir}/.isort.cfg", "--profile", "black", ".", ), cwd=format_dir, check=True, ) def version_args(args): cfg = ConfigParser() cfg.read(str(find_projectroot() / "eachdist.ini")) print(cfg[args.mode]["version"]) def main(): args = parse_args() args.func(args) if __name__ == "__main__": main() python-opentelemetry-1.39.1/scripts/generate_website_docs.sh000077500000000000000000000003371511654350100243760ustar00rootroot00000000000000#!/bin/bash set -e # this script generates the documentation required for # opentelemetry.io pip install -r docs-requirements.txt TMP_DIR=/tmp/python_otel_docs rm -Rf ${TMP_DIR} sphinx-build -M jekyll ./docs ${TMP_DIR} python-opentelemetry-1.39.1/scripts/griffe_check.py000066400000000000000000000036561511654350100224730ustar00rootroot00000000000000import argparse import sys import griffe from eachdist import find_projectroot, find_targets def get_modules() -> list[str]: rootpath = find_projectroot() targets = find_targets("DEFAULT", rootpath) dirs_to_exclude = [ "docs", "scripts", "opentelemetry-docker-tests", "examples", "_template", ] packages = [] for target in targets: rel_path = target.relative_to(rootpath) if not any(excluded in str(rel_path) for excluded in dirs_to_exclude): packages.append(str(rel_path / "src")) return packages def main(): parser = argparse.ArgumentParser( description="Check for breaking changes using griffe", formatter_class=argparse.ArgumentDefaultsHelpFormatter, ) parser.add_argument( "--module", default="opentelemetry", help="Name of the module to check for breaking changes (e.g., opentelemetry, opentelemetry.sdk, opentelemetry.sdk.resources)", ) parser.add_argument( "--against", default="main", help="Git ref to compare against (e.g., branch, tag, or commit)", ) args = parser.parse_args() modules = get_modules() base = griffe.load(args.module, search_paths=modules) against = griffe.load_git( args.module, ref=args.against, search_paths=modules ) breakages = list(griffe.find_breaking_changes(against, base)) # exclude version bumps from breakages as they are expected breakages = [b for b in breakages if b._format_title() != "__version__"] if breakages: for b in breakages: # We can use `b.explain()` to get a detailed explanation of the breaking change # and we can iterate over breakages to perform more complex logic # like skipping per object.path or breakage type print(b.explain()) return 1 return 0 if __name__ == "__main__": sys.exit(main()) python-opentelemetry-1.39.1/scripts/proto_codegen.sh000077500000000000000000000045101511654350100226760ustar00rootroot00000000000000#!/bin/bash # # Regenerate python code from OTLP protos in # https://github.com/open-telemetry/opentelemetry-proto # # To use, update PROTO_REPO_BRANCH_OR_COMMIT variable below to a commit hash or # tag in opentelemtry-proto repo that you want to build off of. Then, just run # this script to update the proto files. Commit the changes as well as any # fixes needed in the OTLP exporter. # # Optional envars: # PROTO_REPO_DIR - the path to an existing checkout of the opentelemetry-proto repo # Pinned commit/branch/tag for the current version used in opentelemetry-proto python package. PROTO_REPO_BRANCH_OR_COMMIT="v1.7.0" set -e PROTO_REPO_DIR=${PROTO_REPO_DIR:-"/tmp/opentelemetry-proto"} # root of opentelemetry-python repo repo_root="$(git rev-parse --show-toplevel)" venv_dir="/tmp/proto_codegen_venv" # run on exit even if crash cleanup() { echo "Deleting $venv_dir" rm -rf $venv_dir } trap cleanup EXIT echo "Creating temporary virtualenv at $venv_dir using $(python3 --version)" python3 -m venv $venv_dir source $venv_dir/bin/activate python -m pip install \ -c $repo_root/gen-requirements.txt \ grpcio-tools mypy-protobuf echo 'python -m grpc_tools.protoc --version' python -m grpc_tools.protoc --version # Clone the proto repo if it doesn't exist if [ ! -d "$PROTO_REPO_DIR" ]; then git clone https://github.com/open-telemetry/opentelemetry-proto.git $PROTO_REPO_DIR fi # Pull in changes and switch to requested branch ( cd $PROTO_REPO_DIR git fetch --all git checkout $PROTO_REPO_BRANCH_OR_COMMIT # pull if PROTO_REPO_BRANCH_OR_COMMIT is not a detached head git symbolic-ref -q HEAD && git pull --ff-only || true ) cd $repo_root/opentelemetry-proto/src # clean up old generated code find opentelemetry/ -regex ".*_pb2.*\.pyi?" -exec rm {} + # generate proto code for all protos all_protos=$(find $PROTO_REPO_DIR/ -iname "*.proto") python -m grpc_tools.protoc \ -I $PROTO_REPO_DIR \ --python_out=. \ --mypy_out=. \ $all_protos # generate grpc output only for protos with service definitions service_protos=$(grep -REl "service \w+ {" $PROTO_REPO_DIR/opentelemetry/) python -m grpc_tools.protoc \ -I $PROTO_REPO_DIR \ --python_out=. \ --mypy_out=. \ --grpc_python_out=. \ $service_protos echo "Please update ./opentelemetry-proto/README.rst to include the updated version." python-opentelemetry-1.39.1/scripts/public_symbols_checker.py000066400000000000000000000123131511654350100245740ustar00rootroot00000000000000# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from collections import defaultdict from difflib import unified_diff from pathlib import Path from re import match from sys import exit from git import Repo from git.db import GitDB repo = Repo(__file__, odbt=GitDB, search_parent_directories=True) added_symbols = defaultdict(list) removed_symbols = defaultdict(list) def get_symbols(change_type, diff_lines_getter, prefix): if change_type == "D" or prefix == r"\-": file_path_symbols = removed_symbols else: file_path_symbols = added_symbols for diff_lines in ( repo.commit("main") .diff(repo.head.commit) .iter_change_type(change_type) ): if diff_lines.b_blob is None: # This happens if a file has been removed completely. b_file_path = diff_lines.a_blob.path else: b_file_path = diff_lines.b_blob.path b_file_path_obj = Path(b_file_path) if ( b_file_path_obj.suffix != ".py" or "opentelemetry" not in b_file_path or any( # single leading underscore part[0] == "_" and part[1] != "_" # tests directories or part == "tests" # benchmarks directories or part == "benchmarks" for part in b_file_path_obj.parts ) ): continue for diff_line in diff_lines_getter(diff_lines): matching_line = match( r"{prefix}({symbol_re})\s=\s.+|" r"{prefix}def\s({symbol_re})|" r"{prefix}class\s({symbol_re})".format( symbol_re=r"[a-zA-Z][_\w]+", prefix=prefix ), diff_line, ) if matching_line is not None: file_path_symbols[b_file_path].append( next(filter(bool, matching_line.groups())) ) def a_diff_lines_getter(diff_lines): return diff_lines.b_blob.data_stream.read().decode("utf-8").split("\n") def d_diff_lines_getter(diff_lines): return diff_lines.a_blob.data_stream.read().decode("utf-8").split("\n") def m_diff_lines_getter(diff_lines): return unified_diff( diff_lines.a_blob.data_stream.read().decode("utf-8").split("\n"), diff_lines.b_blob.data_stream.read().decode("utf-8").split("\n"), ) get_symbols("A", a_diff_lines_getter, r"") get_symbols("D", d_diff_lines_getter, r"") get_symbols("M", m_diff_lines_getter, r"\+") get_symbols("M", m_diff_lines_getter, r"\-") def remove_common_symbols(): # For each file, we remove the symbols that are added and removed in the # same commit. common_symbols = defaultdict(list) for file_path, symbols in added_symbols.items(): for symbol in symbols: if symbol in removed_symbols[file_path]: common_symbols[file_path].append(symbol) for file_path, symbols in common_symbols.items(): for symbol in symbols: added_symbols[file_path].remove(symbol) removed_symbols[file_path].remove(symbol) # If a file has no added or removed symbols, we remove it from the # dictionaries. for file_path in list(added_symbols.keys()): if not added_symbols[file_path]: del added_symbols[file_path] for file_path in list(removed_symbols.keys()): if not removed_symbols[file_path]: del removed_symbols[file_path] # If a symbol is added and removed in the same commit, we consider it as not # added or removed. remove_common_symbols() if added_symbols or removed_symbols: print("The code in this branch adds the following public symbols:") print() for file_path_, symbols_ in added_symbols.items(): print(f"- {file_path_}") for symbol_ in symbols_: print(f"\t{symbol_}") print() print( "Please make sure that all of them are strictly necessary, if not, " "please consider prefixing them with an underscore to make them " 'private. After that, please label this PR with "Approve Public API ' 'check".' ) print() print("The code in this branch removes the following public symbols:") print() for file_path_, symbols_ in removed_symbols.items(): print(f"- {file_path_}") for symbol_ in symbols_: print(f"\t{symbol_}") print() print( "Please make sure no public symbols are removed, if so, please " "consider deprecating them instead. After that, please label this " 'PR with "Approve Public API check".' ) exit(1) else: print("The code in this branch will not add any public symbols") python-opentelemetry-1.39.1/scripts/semconv/000077500000000000000000000000001511654350100211625ustar00rootroot00000000000000python-opentelemetry-1.39.1/scripts/semconv/.gitignore000066400000000000000000000000331511654350100231460ustar00rootroot00000000000000opentelemetry-specificationpython-opentelemetry-1.39.1/scripts/semconv/generate.sh000077500000000000000000000036441511654350100233220ustar00rootroot00000000000000#!/bin/bash set -ex SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" ROOT_DIR="${SCRIPT_DIR}/../.." # freeze the spec version to make SemanticAttributes generation reproducible SEMCONV_VERSION=1.38.0 SEMCONV_VERSION_TAG=v$SEMCONV_VERSION OTEL_WEAVER_IMG_VERSION=v0.18.0 INCUBATING_DIR=_incubating cd ${SCRIPT_DIR} rm -rf semantic-conventions || true mkdir semantic-conventions cd semantic-conventions git init git remote add origin https://github.com/open-telemetry/semantic-conventions.git git fetch origin "$SEMCONV_VERSION_TAG" git reset --hard FETCH_HEAD cd ${SCRIPT_DIR} # Check new schema version was added to schemas.py manually SCHEMAS_PY_PATH=${ROOT_DIR}/opentelemetry-semantic-conventions/src/opentelemetry/semconv/schemas.py if ! grep -q $SEMCONV_VERSION "$SCHEMAS_PY_PATH"; then echo "Error: schema version $SEMCONV_VERSION is not found in $SCHEMAS_PY_PATH. Please add it manually." exit 1 fi generate() { TARGET=$1 OUTPUT=$2 FILTER=$3 docker run --rm \ -v ${SCRIPT_DIR}/semantic-conventions/model:/source \ -v ${SCRIPT_DIR}/templates:/templates \ -v ${ROOT_DIR}/opentelemetry-semantic-conventions/src/opentelemetry/semconv/:/output \ otel/weaver:$OTEL_WEAVER_IMG_VERSION \ registry \ generate \ --registry=/source \ --templates=/templates \ ${TARGET} \ /output/${TARGET} \ --param output=${OUTPUT} \ --param filter=${FILTER} } # stable attributes and metrics mkdir -p ${ROOT_DIR}/opentelemetry-semantic-conventions/src/opentelemetry/semconv/attributes mkdir -p ${ROOT_DIR}/opentelemetry-semantic-conventions/src/opentelemetry/semconv/metrics generate "./" "./" "stable" mkdir -p ${ROOT_DIR}/opentelemetry-semantic-conventions/src/opentelemetry/semconv/${INCUBATING_DIR}/attributes mkdir -p ${ROOT_DIR}/opentelemetry-semantic-conventions/src/opentelemetry/semconv/${INCUBATING_DIR}/metrics generate "./" "./${INCUBATING_DIR}/" "any" cd "$ROOT_DIR" tox -e ruff python-opentelemetry-1.39.1/scripts/semconv/templates/000077500000000000000000000000001511654350100231605ustar00rootroot00000000000000python-opentelemetry-1.39.1/scripts/semconv/templates/registry/000077500000000000000000000000001511654350100250305ustar00rootroot00000000000000python-opentelemetry-1.39.1/scripts/semconv/templates/registry/common.j2000066400000000000000000000026731511654350100265650ustar00rootroot00000000000000{%- macro file_header() -%} # Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. {% endmacro -%} {%- macro str_or_empty(str) -%} {% if str is none %}{{""}}{% else %}{{str}}{% endif %} {%- endmacro %} {%- macro remove_trailing_dots(str) -%} {%- if str[-1:] == '.' -%}{{ remove_trailing_dots(str[:-1]) }}{%- else -%}{{ str }}{%- endif -%} {%- endmacro -%} {%- macro comment_with_prefix(str, prefix) -%} {{remove_trailing_dots(str | trim(' \n')) | comment_with_prefix(prefix) | replace("\\", "\\\\")}} {%- endmacro %} {%- macro import_deprecated(semconv) -%} {%- if (semconv | select("deprecated") | list | count > 0) or (ctx.filter == "any" and semconv | select("stable") | list | count > 0) -%} from typing_extensions import deprecated {%- endif -%} {%- endmacro-%} {%- macro deprecated_note_or_empty(attribute) -%} {% if attribute is deprecated %}{{ attribute.deprecated.note }}{% else %}{{""}}{% endif %} {%- endmacro %} python-opentelemetry-1.39.1/scripts/semconv/templates/registry/semantic_attributes.j2000066400000000000000000000070271511654350100313440ustar00rootroot00000000000000# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Final {% set file_name = ctx.output + (ctx.root_namespace | snake_case) ~ "_attributes.py" -%} {{- template.set_file_name(file_name) -}} {%- import 'common.j2' as c %} {% set attributes = ctx.attributes | list %} {% set enum_attributes = attributes | select("enum") | rejectattr("name", "in", ctx.excluded_attributes) | list %} {% if enum_attributes | count > 0 %}from enum import Enum{% endif %} {{c.import_deprecated(enum_attributes)}} {%- macro attribute_name(attribute) -%} {{ attribute.name | screaming_snake_case }}{%- if attribute.type is template_type -%}_TEMPLATE{%- endif -%} {%- endmacro -%} {%- macro stable_class_ref(const_name, separator) -%} {{ctx.stable_package_name}}.{{ctx.root_namespace}}_attributes{{separator}}{{const_name}} {%- endmacro %} {%- macro write_docstring(name, brief, note, deprecated_note, stability, multiline) -%} {%- if multiline %}""" {% endif %} {%- if deprecated_note is mapping -%} {{prefix}}Deprecated: {{c.comment_with_prefix(deprecated_note.note, "")}}. {%- elif c.str_or_empty(deprecated_note)|length -%} {{prefix}}Deprecated: {{c.comment_with_prefix(deprecated_note, "")}}. {%- elif ctx.filter == "any" and stability == "stable" -%} {{prefix}}Deprecated in favor of stable :py:const:`{{stable_class_ref(name, '.')}}`. {%- elif c.str_or_empty(brief)|length -%} {{prefix}}{{c.comment_with_prefix(brief, "")}}. {%- if c.str_or_empty(note)|length %} {{prefix}}Note: {{c.comment_with_prefix(note, "")}}. {%- endif -%} {%- endif -%} {%- if multiline %} """{%- endif %} {%- endmacro -%} {% for attribute in attributes %} {% set attr_name = attribute_name(attribute) %} {%- set multiline = attribute.name not in ctx.excluded_attributes -%} {%- set deprecated_note = c.deprecated_note_or_empty(attribute) %} {%- set doc_string = write_docstring(attr_name, attribute.brief, attribute.note, deprecated_note, attribute.stability, multiline)-%} {%- set prefix = "" if multiline else "# " -%} {{prefix}}{{attr_name}}: Final = "{{attribute.name}}" {{prefix}}{{doc_string}} {% endfor %} {% for attribute in enum_attributes %}{%- set class_name = attribute.name | map_text("py_enum_attribute_to_class_name", attribute.name | pascal_case ~ "Values") -%} {%- if attribute is deprecated %} @deprecated("The attribute {{attribute.name}} is deprecated - {{ c.comment_with_prefix(attribute.deprecated.note, "") }}") {%- elif attribute.stability == "stable" and ctx.filter == "any" %} @deprecated("Deprecated in favor of stable :py:const:`{{stable_class_ref(class_name, '.')}}`.") {%- endif %} class {{class_name}}(Enum): {%- for member in attribute.type.members %} {% set member_name = member.id | screaming_snake_case -%} {%- set doc_string=write_docstring(class_name + '.' + member_name, member.brief or member.id, "", member.deprecated, member.stability, false)-%} {{member_name}} = {{ member.value | print_member_value }} {% if doc_string %}"""{{doc_string}}"""{% endif %} {%- endfor %} {% endfor %} python-opentelemetry-1.39.1/scripts/semconv/templates/registry/semantic_metrics.j2000066400000000000000000000073051511654350100306230ustar00rootroot00000000000000# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. {% set file_name = ctx.output + ctx.root_namespace | snake_case ~ "_metrics.py" -%} {{- template.set_file_name(file_name) -}} {% import 'common.j2' as c -%} {%- macro stable_class_ref(const_name, separator) -%} {{ctx.stable_package_name}}.{{ctx.root_namespace}}_metrics{{separator}}{{const_name}} {%- endmacro %} {%- macro write_docstring(metric, const_name, prefix) -%} {%- if metric is deprecated %} {{prefix}}Deprecated: {{c.comment_with_prefix(metric.deprecated.note, prefix)}}. {%- elif ctx.filter == "any" and metric.stability == "stable" %} {{prefix}}Deprecated in favor of stable :py:const:`{{stable_class_ref(const_name, '.')}}`. {%- else -%} {%- if c.str_or_empty(metric.brief)|length %} {{prefix}}{{c.comment_with_prefix(metric.brief, prefix)}} {%- endif %} {{prefix}}Instrument: {{ metric.instrument }} {{prefix}}Unit: {{ metric.unit }} {%- if c.str_or_empty(metric.note)|length %} {{prefix}}Note: {{c.comment_with_prefix(metric.note, prefix)}}. {%- endif -%} {%- endif -%} {%- endmacro -%} {%- macro import_instrument_classes(metrics) -%} {% if ctx.filter == "any" %} from opentelemetry.metrics import Meter {%- set instruments = ["counter", "histogram", "updowncounter"]-%} {%- for i in instruments -%} {%- if ctx.metrics | selectattr("instrument", "equalto", i) | list | count > 0 %} from opentelemetry.metrics import {{i | map_text("py_instrument_to_type")}} {%- endif -%} {%- endfor-%} {%- if ctx.metrics | selectattr("instrument", "equalto", "gauge") | list | count > 0 %} from typing import Callable, Generator, Iterable, Optional, Sequence, Union from opentelemetry.metrics import CallbackOptions, ObservableGauge, Observation # pylint: disable=invalid-name CallbackT = Union[ Callable[[CallbackOptions], Iterable[Observation]], Generator[Iterable[Observation], CallbackOptions, None], ] {%- endif %} {%- endif -%} {%- endmacro %} from typing import Final {{ import_instrument_classes(filtered_metrics) }} {%- for metric in ctx.metrics %} {% set const_name = metric.metric_name | screaming_snake_case %} {{const_name}}: Final = "{{metric.metric_name}}" {%- set doc_string=write_docstring(metric, const_name, "")-%}{%- if doc_string %} """{{doc_string}} """{% endif %} {% if ctx.filter == "any" %} {% set metric_name = metric.metric_name | replace(".", "_") %} {%- if metric.instrument == "gauge" %} def create_{{ metric_name }}(meter: Meter, callbacks: Optional[Sequence[CallbackT]]) -> {{metric.instrument | map_text("py_instrument_to_type")}}: {%- else %} def create_{{ metric_name }}(meter: Meter) -> {{metric.instrument | map_text("py_instrument_to_type")}}: {%- endif %} {%- if c.str_or_empty(metric.brief) |length %} """{{ c.comment_with_prefix(metric.brief, "") }}""" {% endif -%} return meter.create_{{ metric.instrument | map_text("py_instrument_to_factory")}}( name={{ const_name }}, {%- if metric.instrument == "gauge" %} callbacks=callbacks, {%- endif %} description="{{ c.str_or_empty(metric.brief|trim)|replace('\n', ' ')}}", unit="{{ metric.unit }}", ) {%- endif -%} {% endfor %} python-opentelemetry-1.39.1/scripts/semconv/templates/registry/weaver.yaml000066400000000000000000000034601511654350100272100ustar00rootroot00000000000000params: # excluded namespaces will not be generated excluded_namespaces: [ios, aspnetcore, signalr, android, dotnet, jvm, kestrel, v8js, veightjs, go, nodejs] # excluded attributes will be commented out in the generated code # this behavior is fully controlled by jinja templates excluded_attributes: ["messaging.client_id"] stable_package_name: opentelemetry.semconv templates: - pattern: semantic_attributes.j2 filter: > semconv_grouped_attributes({ "exclude_root_namespace": $excluded_namespaces, "exclude_stability": if $filter == "any" then [] else ["experimental", "", null] end, }) | map({ root_namespace: .root_namespace, attributes: .attributes, output: $output + "attributes/", stable_package_name: $stable_package_name + ".attributes", filter: $filter, excluded_attributes: $excluded_attributes[] }) application_mode: each - pattern: semantic_metrics.j2 filter: > semconv_grouped_metrics({ "exclude_root_namespace": $excluded_namespaces, "exclude_stability": if $filter == "any" then [] else ["experimental", "", null] end, }) | map({ root_namespace: .root_namespace, metrics: .metrics, output: $output + "metrics/", stable_package_name: $stable_package_name + ".metrics", filter: $filter }) application_mode: each text_maps: py_instrument_to_factory: counter: counter histogram: histogram updowncounter: up_down_counter gauge: observable_gauge py_instrument_to_type: counter: Counter histogram: Histogram updowncounter: UpDownCounter gauge: ObservableGauge # remember the Values suffix! py_enum_attribute_to_class_name: cpython.gc.generation: CPythonGCGenerationValues python-opentelemetry-1.39.1/scripts/tracecontext-integration-test.sh000077500000000000000000000025151511654350100260530ustar00rootroot00000000000000#!/bin/sh set -e # hard-coding the git tag to ensure stable builds. TRACECONTEXT_GIT_TAG="d782773b2cf2fa4afd6a80a93b289d8a74ca894d" # clone w3c tracecontext tests mkdir -p target rm -rf ./target/trace-context git clone https://github.com/w3c/trace-context ./target/trace-context cd ./target/trace-context && git checkout $TRACECONTEXT_GIT_TAG && cd - # start example opentelemetry service, which propagates trace-context by # default. python ./tests/w3c_tracecontext_validation_server.py 1>&2 & EXAMPLE_SERVER_PID=$! # give the app server a little time to start up. Not adding some sort # of delay would cause many of the tracecontext tests to fail being # unable to connect. sleep 1 onshutdown() { # send a sigint, to ensure # it is caught as a KeyboardInterrupt in the # example service. kill $EXAMPLE_SERVER_PID } trap onshutdown EXIT cd ./target/trace-context/test # The disabled test is not compatible with an optional part of the W3C # spec that we have implemented (dropping duplicated keys from tracestate). # W3C are planning to include flags for optional features in the test suite. # https://github.com/w3c/trace-context/issues/529 # FIXME: update test to use flags for optional features when available. export SERVICE_ENDPOINT=http://127.0.0.1:5000/verify-tracecontext pytest test.py -k "not test_tracestate_duplicated_keys"python-opentelemetry-1.39.1/scripts/update_sha.py000066400000000000000000000034551511654350100222060ustar00rootroot00000000000000# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # pylint: disable=import-error,unspecified-encoding import argparse import requests from ruamel.yaml import YAML API_URL = "https://api.github.com/repos/open-telemetry/opentelemetry-python-contrib/commits/" workflow_files = [ ".github/workflows/test_0.yml" ".github/workflows/test_1.yml" ".github/workflows/misc_0.yml" ".github/workflows/contrib_0.yml" ".github/workflows/lint_0.yml" ] def get_sha(branch): url = API_URL + branch response = requests.get(url, timeout=15) response.raise_for_status() return response.json()["sha"] def update_sha(sha): yaml = YAML() yaml.preserve_quotes = True for workflow_file in workflow_files: with open(workflow_file, "r") as file: workflow = yaml.load(file) workflow["env"]["CONTRIB_REPO_SHA"] = sha with open(workflow_file, "w") as file: yaml.dump(workflow, file) def main(): args = parse_args() sha = get_sha(args.branch) update_sha(sha) def parse_args(): parser = argparse.ArgumentParser( description="Updates the SHA in the workflow file" ) parser.add_argument("-b", "--branch", help="branch to use") return parser.parse_args() if __name__ == "__main__": main() python-opentelemetry-1.39.1/shim/000077500000000000000000000000001511654350100167615ustar00rootroot00000000000000python-opentelemetry-1.39.1/shim/opentelemetry-opencensus-shim/000077500000000000000000000000001511654350100247735ustar00rootroot00000000000000python-opentelemetry-1.39.1/shim/opentelemetry-opencensus-shim/LICENSE000066400000000000000000000261351511654350100260070ustar00rootroot00000000000000 Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. Copyright [yyyy] [name of copyright owner] Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. python-opentelemetry-1.39.1/shim/opentelemetry-opencensus-shim/README.rst000066400000000000000000000010051511654350100264560ustar00rootroot00000000000000OpenCensus Shim for OpenTelemetry ================================== |pypi| .. |pypi| image:: https://badge.fury.io/py/opentelemetry-opencensus-shim.svg :target: https://pypi.org/project/opentelemetry-opencensus-shim/ Installation ------------ :: pip install opentelemetry-opencensus-shim References ---------- * `OpenCensus Shim for OpenTelemetry `_ * `OpenTelemetry Project `_ python-opentelemetry-1.39.1/shim/opentelemetry-opencensus-shim/pyproject.toml000066400000000000000000000026261511654350100277150ustar00rootroot00000000000000[build-system] requires = ["hatchling"] build-backend = "hatchling.build" [project] name = "opentelemetry-opencensus-shim" dynamic = ["version"] description = "OpenCensus Shim for OpenTelemetry" readme = "README.rst" license = "Apache-2.0" requires-python = ">=3.9" authors = [ { name = "OpenTelemetry Authors", email = "cncf-opentelemetry-contributors@lists.cncf.io" }, ] classifiers = [ "Development Status :: 4 - Beta", "Framework :: OpenTelemetry", "Intended Audience :: Developers", "Programming Language :: Python", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.9", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", "Programming Language :: Python :: 3.12", "Programming Language :: Python :: 3.13", "Typing :: Typed", ] dependencies = [ "opentelemetry-api ~= 1.3", "wrapt ~= 1.0", # may work with older versions but this is the oldest confirmed version "opencensus >= 0.11.0", ] [project.optional-dependencies] [project.urls] Homepage = "https://github.com/open-telemetry/opentelemetry-python/tree/main/shim/opentelemetry-opencensus-shim" Repository = "https://github.com/open-telemetry/opentelemetry-python" [tool.hatch.version] path = "src/opentelemetry/shim/opencensus/version/__init__.py" [tool.hatch.build.targets.sdist] include = ["/src", "/tests"] [tool.hatch.build.targets.wheel] packages = ["src/opentelemetry"] python-opentelemetry-1.39.1/shim/opentelemetry-opencensus-shim/src/000077500000000000000000000000001511654350100255625ustar00rootroot00000000000000python-opentelemetry-1.39.1/shim/opentelemetry-opencensus-shim/src/opentelemetry/000077500000000000000000000000001511654350100304565ustar00rootroot00000000000000python-opentelemetry-1.39.1/shim/opentelemetry-opencensus-shim/src/opentelemetry/shim/000077500000000000000000000000001511654350100314165ustar00rootroot00000000000000python-opentelemetry-1.39.1/shim/opentelemetry-opencensus-shim/src/opentelemetry/shim/opencensus/000077500000000000000000000000001511654350100336005ustar00rootroot00000000000000__init__.py000066400000000000000000000030271511654350100356340ustar00rootroot00000000000000python-opentelemetry-1.39.1/shim/opentelemetry-opencensus-shim/src/opentelemetry/shim/opencensus# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ The OpenTelemetry OpenCensus shim is a library which allows an easy migration from OpenCensus to OpenTelemetry. Additional details can be found `in the specification `_. The shim consists of a set of classes which implement the OpenCensus Python API while using OpenTelemetry constructs behind the scenes. Its purpose is to allow applications which are already instrumented using OpenCensus to start using OpenTelemetry with minimal effort, without having to rewrite large portions of the codebase. """ from opentelemetry.shim.opencensus._patch import install_shim, uninstall_shim __all__ = [ "install_shim", "uninstall_shim", ] # TODO: Decide when this should be called. # 1. defensive import in opentelemetry-api # 2. defensive import directly in OpenCensus, although that would require a release # 3. ask the user to do it # install_shim() _patch.py000066400000000000000000000050611511654350100353330ustar00rootroot00000000000000python-opentelemetry-1.39.1/shim/opentelemetry-opencensus-shim/src/opentelemetry/shim/opencensus# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from functools import lru_cache from logging import getLogger from typing import Optional from opencensus.trace.span_context import SpanContext from opencensus.trace.tracer import Tracer from opencensus.trace.tracers.noop_tracer import NoopTracer from opentelemetry import trace from opentelemetry.shim.opencensus._shim_tracer import ShimTracer from opentelemetry.shim.opencensus.version import __version__ _logger = getLogger(__name__) def install_shim( tracer_provider: Optional[trace.TracerProvider] = None, ) -> None: otel_tracer = trace.get_tracer( "opentelemetry-opencensus-shim", __version__, tracer_provider=tracer_provider, ) @lru_cache() def cached_shim_tracer(span_context: SpanContext) -> ShimTracer: return ShimTracer( NoopTracer(), oc_span_context=span_context, otel_tracer=otel_tracer, ) def fget_tracer(self: Tracer) -> ShimTracer: # self.span_context is how instrumentations pass propagated context into OpenCensus e.g. # https://github.com/census-instrumentation/opencensus-python/blob/fd064f438c5e490d25b004ee2545be55d2e28679/contrib/opencensus-ext-flask/opencensus/ext/flask/flask_middleware.py#L147-L153 return cached_shim_tracer(self.span_context) def fset_tracer(self, value) -> None: # ignore attempts to set the value pass # Tracer's constructor sets self.tracer to either a NoopTracer or ContextTracer depending # on sampler: # https://github.com/census-instrumentation/opencensus-python/blob/2e08df591b507612b3968be8c2538dedbf8fab37/opencensus/trace/tracer.py#L63. # We monkeypatch Tracer.tracer with a property to return a shim instance instead. This # makes all instances of Tracer (even those already created) use a ShimTracer. Tracer.tracer = property(fget_tracer, fset_tracer) _logger.info("Installed OpenCensus shim") def uninstall_shim() -> None: if hasattr(Tracer, "tracer"): del Tracer.tracer _shim_span.py000066400000000000000000000145011511654350100362140ustar00rootroot00000000000000python-opentelemetry-1.39.1/shim/opentelemetry-opencensus-shim/src/opentelemetry/shim/opencensus# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging from datetime import datetime from typing import TYPE_CHECKING import wrapt from opencensus.trace import execution_context from opencensus.trace.blank_span import BlankSpan from opencensus.trace.span import SpanKind from opencensus.trace.status import Status from opencensus.trace.time_event import MessageEvent from opentelemetry import context, trace if TYPE_CHECKING: from opentelemetry.shim.opencensus._shim_tracer import ShimTracer _logger = logging.getLogger(__name__) # Copied from Java # https://github.com/open-telemetry/opentelemetry-java/blob/0d3a04669e51b33ea47b29399a7af00012d25ccb/opencensus-shim/src/main/java/io/opentelemetry/opencensusshim/SpanConverter.java#L24-L27 _MESSAGE_EVENT_ATTRIBUTE_KEY_TYPE = "message.event.type" _MESSAGE_EVENT_ATTRIBUTE_KEY_SIZE_UNCOMPRESSED = ( "message.event.size.uncompressed" ) _MESSAGE_EVENT_ATTRIBUTE_KEY_SIZE_COMPRESSED = "message.event.size.compressed" _MESSAGE_EVENT_TYPE_STR_MAPPING = { 0: "TYPE_UNSPECIFIED", 1: "SENT", 2: "RECEIVED", } def _opencensus_time_to_nanos(timestamp: str) -> int: """Converts an OpenCensus formatted time string (ISO 8601 with Z) to time.time_ns style unix timestamp """ # format taken from # https://github.com/census-instrumentation/opencensus-python/blob/c38c71b9285e71de94d0185ff3c5bf65ee163345/opencensus/common/utils/__init__.py#L76 # # datetime.fromisoformat() does not work with the added "Z" until python 3.11 seconds_float = datetime.strptime( timestamp, "%Y-%m-%dT%H:%M:%S.%fZ" ).timestamp() return round(seconds_float * 1e9) # pylint: disable=abstract-method class ShimSpan(wrapt.ObjectProxy): def __init__( self, wrapped: BlankSpan, *, otel_span: trace.Span, shim_tracer: "ShimTracer", ) -> None: super().__init__(wrapped) self._self_otel_span = otel_span self._self_shim_tracer = shim_tracer self._self_token: object = None # Set a few values for BlankSpan members (they appear to be part of the "public" API # even though they are not documented in BaseSpan). Some instrumentations may use these # and not expect an AttributeError to be raised. Set values from OTel where possible # and let ObjectProxy defer to the wrapped BlankSpan otherwise. sc = self._self_otel_span.get_span_context() self.same_process_as_parent_span = not sc.is_remote self.span_id = sc.span_id def span(self, name="child_span"): return self._self_shim_tracer.start_span(name=name) def add_attribute(self, attribute_key, attribute_value): self._self_otel_span.set_attribute(attribute_key, attribute_value) def add_annotation(self, description, **attrs): self._self_otel_span.add_event(description, attrs) def add_message_event(self, message_event: MessageEvent): attrs = { _MESSAGE_EVENT_ATTRIBUTE_KEY_TYPE: _MESSAGE_EVENT_TYPE_STR_MAPPING[ message_event.type ], } if message_event.uncompressed_size_bytes is not None: attrs[_MESSAGE_EVENT_ATTRIBUTE_KEY_SIZE_UNCOMPRESSED] = ( message_event.uncompressed_size_bytes ) if message_event.compressed_size_bytes is not None: attrs[_MESSAGE_EVENT_ATTRIBUTE_KEY_SIZE_COMPRESSED] = ( message_event.compressed_size_bytes ) timestamp = _opencensus_time_to_nanos(message_event.timestamp) self._self_otel_span.add_event( str(message_event.id), attrs, timestamp=timestamp, ) # pylint: disable=no-self-use def add_link(self, link): """span links do not work with the shim because the OpenCensus Tracer does not accept links in start_span(). Same issue applies to SpanKind. Also see: https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/compatibility/opencensus.md#known-incompatibilities """ _logger.warning( "OpenTelemetry does not support links added after a span is created." ) @property def span_kind(self): """Setting span_kind does not work with the shim because the OpenCensus Tracer does not accept the param in start_span() and there's no way to set OTel span kind after start_span(). """ return SpanKind.UNSPECIFIED @span_kind.setter def span_kind(self, value): _logger.warning( "OpenTelemetry does not support setting span kind after a span is created." ) def set_status(self, status: Status): self._self_otel_span.set_status( trace.StatusCode.OK if status.is_ok else trace.StatusCode.ERROR, status.description, ) def finish(self): """Note this method does not pop the span from current context. Use Tracer.end_span() or a `with span: ...` statement (contextmanager) to do that. """ self._self_otel_span.end() def __enter__(self): self._self_otel_span.__enter__() return self # pylint: disable=arguments-differ def __exit__(self, exception_type, exception_value, traceback): self._self_otel_span.__exit__( exception_type, exception_value, traceback ) # OpenCensus Span.__exit__() calls Tracer.end_span() # https://github.com/census-instrumentation/opencensus-python/blob/2e08df591b507612b3968be8c2538dedbf8fab37/opencensus/trace/span.py#L390 # but that would cause the OTel span to be ended twice. Instead, this code just copies # the context teardown from that method. context.detach(self._self_token) execution_context.set_current_span( self._self_shim_tracer.current_span() ) _shim_tracer.py000066400000000000000000000131711511654350100365350ustar00rootroot00000000000000python-opentelemetry-1.39.1/shim/opentelemetry-opencensus-shim/src/opentelemetry/shim/opencensus# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging import wrapt from opencensus.trace import execution_context from opencensus.trace.blank_span import BlankSpan from opencensus.trace.span_context import SpanContext from opencensus.trace.tracers.base import Tracer as BaseTracer from opencensus.trace.tracestate import Tracestate from opentelemetry import context, trace from opentelemetry.shim.opencensus._shim_span import ShimSpan _logger = logging.getLogger(__name__) _SHIM_SPAN_KEY = context.create_key("opencensus-shim-span-key") _SAMPLED = trace.TraceFlags(trace.TraceFlags.SAMPLED) def set_shim_span_in_context( span: ShimSpan, ctx: context.Context ) -> context.Context: return context.set_value(_SHIM_SPAN_KEY, span, ctx) def get_shim_span_in_context() -> ShimSpan: return context.get_value(_SHIM_SPAN_KEY) def set_oc_span_in_context( oc_span_context: SpanContext, ctx: context.Context ) -> context.Context: """Returns a new OTel context based on ctx with oc_span_context set as the current span""" # If no SpanContext is passed to the opencensus.trace.tracer.Tracer, it creates a new one # with a random trace ID and a None span ID to be the parent: # https://github.com/census-instrumentation/opencensus-python/blob/2e08df591b507612b3968be8c2538dedbf8fab37/opencensus/trace/tracer.py#L47. # # OpenTelemetry considers this an invalid SpanContext and will ignore it, so we can just # return early if oc_span_context.span_id is None: return ctx trace_id = int(oc_span_context.trace_id, 16) span_id = int(oc_span_context.span_id, 16) is_remote = oc_span_context.from_header trace_flags = ( _SAMPLED if oc_span_context.trace_options.get_enabled() else None ) trace_state = ( trace.TraceState(tuple(oc_span_context.tracestate.items())) # OC SpanContext does not validate this type if isinstance(oc_span_context.tracestate, Tracestate) else None ) return trace.set_span_in_context( trace.NonRecordingSpan( trace.SpanContext( trace_id=trace_id, span_id=span_id, is_remote=is_remote, trace_flags=trace_flags, trace_state=trace_state, ) ) ) # pylint: disable=abstract-method class ShimTracer(wrapt.ObjectProxy): def __init__( self, wrapped: BaseTracer, *, oc_span_context: SpanContext, otel_tracer: trace.Tracer, ) -> None: super().__init__(wrapped) self._self_oc_span_context = oc_span_context self._self_otel_tracer = otel_tracer # For now, finish() is not implemented by the shim. It would require keeping a list of all # spans created so they can all be finished. # def finish(self): # """End spans and send to reporter.""" def span(self, name="span"): return self.start_span(name=name) def start_span(self, name="span"): parent_ctx = context.get_current() # If there is no current span in context, use the one provided to the OC Tracer at # creation time if trace.get_current_span(parent_ctx) is trace.INVALID_SPAN: parent_ctx = set_oc_span_in_context( self._self_oc_span_context, parent_ctx ) span = self._self_otel_tracer.start_span(name, context=parent_ctx) shim_span = ShimSpan( BlankSpan(name=name, context_tracer=self), otel_span=span, shim_tracer=self, ) ctx = trace.set_span_in_context(span) ctx = set_shim_span_in_context(shim_span, ctx) # OpenCensus's ContextTracer calls execution_context.set_current_span(span) which is # equivalent to the below. This can cause context to leak but is equivalent. # pylint: disable=protected-access shim_span._self_token = context.attach(ctx) # Also set it in OC's context, equivalent to # https://github.com/census-instrumentation/opencensus-python/blob/2e08df591b507612b3968be8c2538dedbf8fab37/opencensus/trace/tracers/context_tracer.py#L94 execution_context.set_current_span(shim_span) return shim_span def end_span(self): """Finishes the current span in the context and restores the context from before the span was started. """ span = self.current_span() if not span: _logger.warning("No active span, cannot do end_span.") return span.finish() # pylint: disable=protected-access context.detach(span._self_token) # Also reset the OC execution_context, equivalent to # https://github.com/census-instrumentation/opencensus-python/blob/2e08df591b507612b3968be8c2538dedbf8fab37/opencensus/trace/tracers/context_tracer.py#L114-L117 execution_context.set_current_span(self.current_span()) # pylint: disable=no-self-use def current_span(self): return get_shim_span_in_context() def add_attribute_to_current_span(self, attribute_key, attribute_value): self.current_span().add_attribute(attribute_key, attribute_value) py.typed000066400000000000000000000000001511654350100352060ustar00rootroot00000000000000python-opentelemetry-1.39.1/shim/opentelemetry-opencensus-shim/src/opentelemetry/shim/opencensusversion/000077500000000000000000000000001511654350100352065ustar00rootroot00000000000000python-opentelemetry-1.39.1/shim/opentelemetry-opencensus-shim/src/opentelemetry/shim/opencensus__init__.py000066400000000000000000000011401511654350100373130ustar00rootroot00000000000000python-opentelemetry-1.39.1/shim/opentelemetry-opencensus-shim/src/opentelemetry/shim/opencensus/version# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. __version__ = "0.60b1" python-opentelemetry-1.39.1/shim/opentelemetry-opencensus-shim/test-requirements.txt000066400000000000000000000012461511654350100312370ustar00rootroot00000000000000asgiref==3.7.2 cachetools==5.3.3 certifi==2024.7.4 charset-normalizer==3.3.2 google-api-core==2.17.1 google-auth==2.28.1 googleapis-common-protos==1.63.2 grpcio==1.66.2 idna==3.7 importlib-metadata==6.11.0 iniconfig==2.0.0 opencensus==0.11.1 opencensus-context==0.1.3 opencensus-proto==0.1.0 packaging==24.0 pluggy==1.5.0 protobuf==3.20.3 py-cpuinfo==9.0.0 pyasn1==0.5.1 pyasn1-modules==0.3.0 pytest==7.4.4 requests==2.32.3 rsa==4.9 six==1.16.0 tomli==2.0.1 typing_extensions==4.10.0 urllib3==2.2.2 wrapt==1.16.0 zipp==3.19.2 -e opentelemetry-api -e opentelemetry-sdk -e tests/opentelemetry-test-utils -e opentelemetry-semantic-conventions -e shim/opentelemetry-opencensus-shim python-opentelemetry-1.39.1/shim/opentelemetry-opencensus-shim/tests/000077500000000000000000000000001511654350100261355ustar00rootroot00000000000000python-opentelemetry-1.39.1/shim/opentelemetry-opencensus-shim/tests/__init__.py000066400000000000000000000000001511654350100302340ustar00rootroot00000000000000python-opentelemetry-1.39.1/shim/opentelemetry-opencensus-shim/tests/test_patch.py000066400000000000000000000056711511654350100306560ustar00rootroot00000000000000# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from opencensus.trace.tracer import Tracer from opencensus.trace.tracers.noop_tracer import NoopTracer from opentelemetry.shim.opencensus import install_shim, uninstall_shim from opentelemetry.shim.opencensus._shim_tracer import ShimTracer class TestPatch(unittest.TestCase): def setUp(self): uninstall_shim() def tearDown(self): uninstall_shim() def test_install_shim(self): # Initially the shim is not installed. The Tracer class has no tracer property, it is # instance level only. self.assertFalse(hasattr(Tracer, "tracer")) install_shim() # The actual Tracer class should now be patched with a tracer property self.assertTrue(hasattr(Tracer, "tracer")) self.assertIsInstance(Tracer.tracer, property) def test_install_shim_affects_existing_tracers(self): # Initially the shim is not installed. A OC Tracer instance should have a NoopTracer oc_tracer = Tracer() self.assertIsInstance(oc_tracer.tracer, NoopTracer) self.assertNotIsInstance(oc_tracer.tracer, ShimTracer) install_shim() # The property should cause existing instances to get the singleton ShimTracer self.assertIsInstance(oc_tracer.tracer, ShimTracer) def test_install_shim_affects_new_tracers(self): install_shim() # The property should cause existing instances to get the singleton ShimTracer oc_tracer = Tracer() self.assertIsInstance(oc_tracer.tracer, ShimTracer) def test_uninstall_shim_resets_tracer(self): install_shim() uninstall_shim() # The actual Tracer class should not be patched self.assertFalse(hasattr(Tracer, "tracer")) def test_uninstall_shim_resets_existing_tracers(self): oc_tracer = Tracer() orig = oc_tracer.tracer install_shim() uninstall_shim() # Accessing the tracer member should no longer use the property, and instead should get # its original NoopTracer self.assertIs(oc_tracer.tracer, orig) def test_uninstall_shim_resets_new_tracers(self): install_shim() uninstall_shim() # Accessing the tracer member should get the NoopTracer oc_tracer = Tracer() self.assertIsInstance(oc_tracer.tracer, NoopTracer) self.assertNotIsInstance(oc_tracer.tracer, ShimTracer) python-opentelemetry-1.39.1/shim/opentelemetry-opencensus-shim/tests/test_shim.py000066400000000000000000000200751511654350100305120ustar00rootroot00000000000000# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging import unittest from unittest.mock import patch from opencensus.trace import trace_options, tracestate from opencensus.trace.blank_span import BlankSpan as OcBlankSpan from opencensus.trace.link import Link as OcLink from opencensus.trace.span import SpanKind from opencensus.trace.span_context import SpanContext from opencensus.trace.tracer import Tracer as OcTracer from opencensus.trace.tracers.noop_tracer import NoopTracer as OcNoopTracer from opentelemetry import context, trace from opentelemetry.shim.opencensus import install_shim, uninstall_shim from opentelemetry.shim.opencensus._shim_span import ShimSpan from opentelemetry.shim.opencensus._shim_tracer import ( ShimTracer, set_oc_span_in_context, ) class TestShim(unittest.TestCase): def setUp(self): uninstall_shim() install_shim() def tearDown(self): uninstall_shim() def assert_hasattr(self, obj, key): self.assertTrue(hasattr(obj, key)) def test_shim_tracer_wraps_noop_tracer(self): oc_tracer = OcTracer() self.assertIsInstance(oc_tracer.tracer, ShimTracer) # wrapt.ObjectProxy does the magic here. The ShimTracer should look like the real OC # NoopTracer. self.assertIsInstance(oc_tracer.tracer, OcNoopTracer) self.assert_hasattr(oc_tracer.tracer, "finish") self.assert_hasattr(oc_tracer.tracer, "span") self.assert_hasattr(oc_tracer.tracer, "start_span") self.assert_hasattr(oc_tracer.tracer, "end_span") self.assert_hasattr(oc_tracer.tracer, "current_span") self.assert_hasattr(oc_tracer.tracer, "add_attribute_to_current_span") self.assert_hasattr(oc_tracer.tracer, "list_collected_spans") def test_shim_tracer_starts_shim_spans(self): oc_tracer = OcTracer() with oc_tracer.start_span("foo") as span: self.assertIsInstance(span, ShimSpan) def test_shim_span_wraps_blank_span(self): oc_tracer = OcTracer() with oc_tracer.start_span("foo") as span: # wrapt.ObjectProxy does the magic here. The ShimSpan should look like the real OC # BlankSpan. self.assertIsInstance(span, OcBlankSpan) # members self.assert_hasattr(span, "name") self.assert_hasattr(span, "parent_span") self.assert_hasattr(span, "start_time") self.assert_hasattr(span, "end_time") self.assert_hasattr(span, "span_id") self.assert_hasattr(span, "attributes") self.assert_hasattr(span, "stack_trace") self.assert_hasattr(span, "annotations") self.assert_hasattr(span, "message_events") self.assert_hasattr(span, "links") self.assert_hasattr(span, "status") self.assert_hasattr(span, "same_process_as_parent_span") self.assert_hasattr(span, "_child_spans") self.assert_hasattr(span, "context_tracer") self.assert_hasattr(span, "span_kind") # methods self.assert_hasattr(span, "on_create") self.assert_hasattr(span, "children") self.assert_hasattr(span, "span") self.assert_hasattr(span, "add_attribute") self.assert_hasattr(span, "add_annotation") self.assert_hasattr(span, "add_message_event") self.assert_hasattr(span, "add_link") self.assert_hasattr(span, "set_status") self.assert_hasattr(span, "start") self.assert_hasattr(span, "finish") self.assert_hasattr(span, "__iter__") self.assert_hasattr(span, "__enter__") self.assert_hasattr(span, "__exit__") def test_add_link_logs_a_warning(self): oc_tracer = OcTracer() with oc_tracer.start_span("foo") as span: with self.assertLogs(level=logging.WARNING): span.add_link(OcLink("1", "1")) def test_set_span_kind_logs_a_warning(self): oc_tracer = OcTracer() with oc_tracer.start_span("foo") as span: with self.assertLogs(level=logging.WARNING): span.span_kind = SpanKind.CLIENT # pylint: disable=no-self-use,no-member,protected-access def test_shim_span_contextmanager_calls_does_not_call_end(self): # This was a bug in first implementation where the underlying OTel span.end() was # called after span.__exit__ which caused double-ending the span. oc_tracer = OcTracer() oc_span = oc_tracer.start_span("foo") with patch.object( oc_span, "_self_otel_span", wraps=oc_span._self_otel_span, ) as spy_otel_span: with oc_span: pass spy_otel_span.end.assert_not_called() def test_set_oc_span_in_context_no_span_id(self): # This won't create a span ID and is the default behavior if you don't pass a context # when creating the Tracer ctx = set_oc_span_in_context(SpanContext(), context.get_current()) self.assertIs(trace.get_current_span(ctx), trace.INVALID_SPAN) def test_set_oc_span_in_context_ids(self): ctx = set_oc_span_in_context( SpanContext( trace_id="ace0216bab2b7ba249761dbb19c871b7", span_id="1fead89ecf242225", ), context.get_current(), ) span_ctx = trace.get_current_span(ctx).get_span_context() self.assertEqual( trace.format_trace_id(span_ctx.trace_id), "ace0216bab2b7ba249761dbb19c871b7", ) self.assertEqual( trace.format_span_id(span_ctx.span_id), "1fead89ecf242225" ) def test_set_oc_span_in_context_remote(self): for is_from_remote in True, False: ctx = set_oc_span_in_context( SpanContext( trace_id="ace0216bab2b7ba249761dbb19c871b7", span_id="1fead89ecf242225", from_header=is_from_remote, ), context.get_current(), ) span_ctx = trace.get_current_span(ctx).get_span_context() self.assertEqual(span_ctx.is_remote, is_from_remote) def test_set_oc_span_in_context_traceoptions(self): for oc_trace_options, expect in [ # Not sampled ( trace_options.TraceOptions("0"), trace.TraceFlags(trace.TraceFlags.DEFAULT), ), # Sampled ( trace_options.TraceOptions("1"), trace.TraceFlags(trace.TraceFlags.SAMPLED), ), ]: ctx = set_oc_span_in_context( SpanContext( trace_id="ace0216bab2b7ba249761dbb19c871b7", span_id="1fead89ecf242225", trace_options=oc_trace_options, ), context.get_current(), ) span_ctx = trace.get_current_span(ctx).get_span_context() self.assertEqual(span_ctx.trace_flags, expect) def test_set_oc_span_in_context_tracestate(self): ctx = set_oc_span_in_context( SpanContext( trace_id="ace0216bab2b7ba249761dbb19c871b7", span_id="1fead89ecf242225", tracestate=tracestate.Tracestate({"hello": "tracestate"}), ), context.get_current(), ) span_ctx = trace.get_current_span(ctx).get_span_context() self.assertEqual( span_ctx.trace_state, trace.TraceState([("hello", "tracestate")]) ) python-opentelemetry-1.39.1/shim/opentelemetry-opencensus-shim/tests/test_shim_with_sdk.py000066400000000000000000000300601511654350100324010ustar00rootroot00000000000000# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging import unittest from datetime import datetime from opencensus.trace import execution_context, time_event from opencensus.trace.span_context import SpanContext from opencensus.trace.status import Status as OcStatus from opencensus.trace.tracer import Tracer as OcTracer from opentelemetry import trace from opentelemetry.sdk.trace import ReadableSpan, TracerProvider from opentelemetry.sdk.trace.export import SimpleSpanProcessor from opentelemetry.sdk.trace.export.in_memory_span_exporter import ( InMemorySpanExporter, ) from opentelemetry.sdk.trace.sampling import ALWAYS_ON from opentelemetry.shim.opencensus import install_shim, uninstall_shim _TIMESTAMP = datetime.fromisoformat("2023-01-01T00:00:00.000000") class TestShimWithSdk(unittest.TestCase): def setUp(self): uninstall_shim() self.tracer_provider = TracerProvider( sampler=ALWAYS_ON, shutdown_on_exit=False ) self.mem_exporter = InMemorySpanExporter() self.tracer_provider.add_span_processor( SimpleSpanProcessor(self.mem_exporter) ) install_shim(self.tracer_provider) def tearDown(self): uninstall_shim() def test_start_span_interacts_with_context(self): oc_tracer = OcTracer() span = oc_tracer.start_span("foo") # Should have created a real OTel span in implicit context under the hood. OpenCensus # does not require another step to set the span in context. otel_span = trace.get_current_span() self.assertNotEqual(span.span_id, 0) self.assertEqual(span.span_id, otel_span.get_span_context().span_id) # This should end the span and remove it from context oc_tracer.end_span() self.assertIs(trace.get_current_span(), trace.INVALID_SPAN) def test_start_span_interacts_with_oc_context(self): oc_tracer = OcTracer() span = oc_tracer.start_span("foo") # Should have put the shim span in OC's implicit context under the hood. OpenCensus # does not require another step to set the span in context. self.assertIs(execution_context.get_current_span(), span) # This should end the span and remove it from context oc_tracer.end_span() self.assertIs(execution_context.get_current_span(), None) def test_context_manager_interacts_with_context(self): oc_tracer = OcTracer() with oc_tracer.start_span("foo") as span: # Should have created a real OTel span in implicit context under the hood otel_span = trace.get_current_span() self.assertNotEqual(span.span_id, 0) self.assertEqual( span.span_id, otel_span.get_span_context().span_id ) # The span should now be popped from context self.assertIs(trace.get_current_span(), trace.INVALID_SPAN) def test_context_manager_interacts_with_oc_context(self): oc_tracer = OcTracer() with oc_tracer.start_span("foo") as span: # Should have placed the shim span in implicit context under the hood self.assertIs(execution_context.get_current_span(), span) # The span should now be popped from context self.assertIs(execution_context.get_current_span(), None) def test_exports_a_span(self): oc_tracer = OcTracer() with oc_tracer.start_span("span1"): pass self.assertEqual(len(self.mem_exporter.get_finished_spans()), 1) def test_uses_tracers_span_context_when_no_parent_in_context(self): # the SpanContext passed to the Tracer will become the parent when there is no span # already set in the OTel context oc_tracer = OcTracer( span_context=SpanContext( trace_id="ace0216bab2b7ba249761dbb19c871b7", span_id="1fead89ecf242225", ) ) with oc_tracer.start_span("span1"): pass exported_span: ReadableSpan = self.mem_exporter.get_finished_spans()[0] parent = exported_span.parent self.assertIsNotNone(parent) self.assertEqual( trace.format_trace_id(parent.trace_id), "ace0216bab2b7ba249761dbb19c871b7", ) self.assertEqual( trace.format_span_id(parent.span_id), "1fead89ecf242225" ) def test_ignores_tracers_span_context_when_parent_already_in_context(self): # the SpanContext passed to the Tracer will be ignored since there is already a span # set in the OTel context oc_tracer = OcTracer( span_context=SpanContext( trace_id="ace0216bab2b7ba249761dbb19c871b7", span_id="1fead89ecf242225", ) ) otel_tracer = self.tracer_provider.get_tracer(__name__) with otel_tracer.start_as_current_span("some_parent"): with oc_tracer.start_span("span1"): pass oc_span: ReadableSpan = self.mem_exporter.get_finished_spans()[0] otel_parent: ReadableSpan = self.mem_exporter.get_finished_spans()[1] self.assertEqual( oc_span.parent, otel_parent.context, ) def test_span_attributes(self): oc_tracer = OcTracer() with oc_tracer.start_span("span1") as span: span.add_attribute("key1", "value1") span.add_attribute("key2", "value2") exported_span: ReadableSpan = self.mem_exporter.get_finished_spans()[0] self.assertDictEqual( dict(exported_span.attributes), {"key1": "value1", "key2": "value2"}, ) def test_span_annotations(self): oc_tracer = OcTracer() with oc_tracer.start_span("span1") as span: span.add_annotation("description", key1="value1", key2="value2") exported_span: ReadableSpan = self.mem_exporter.get_finished_spans()[0] self.assertEqual(len(exported_span.events), 1) event = exported_span.events[0] self.assertEqual(event.name, "description") self.assertDictEqual( dict(event.attributes), {"key1": "value1", "key2": "value2"} ) def test_span_message_event(self): oc_tracer = OcTracer() with oc_tracer.start_span("span1") as span: span.add_message_event( time_event.MessageEvent( _TIMESTAMP, "id_sent", time_event.Type.SENT, "20", "10" ) ) span.add_message_event( time_event.MessageEvent( _TIMESTAMP, "id_received", time_event.Type.RECEIVED, "20", "10", ) ) span.add_message_event( time_event.MessageEvent( _TIMESTAMP, "id_unspecified", None, "20", "10", ) ) exported_span: ReadableSpan = self.mem_exporter.get_finished_spans()[0] self.assertEqual(len(exported_span.events), 3) event1, event2, event3 = exported_span.events self.assertEqual(event1.name, "id_sent") self.assertDictEqual( dict(event1.attributes), { "message.event.size.compressed": "10", "message.event.size.uncompressed": "20", "message.event.type": "SENT", }, ) self.assertEqual(event2.name, "id_received") self.assertDictEqual( dict(event2.attributes), { "message.event.size.compressed": "10", "message.event.size.uncompressed": "20", "message.event.type": "RECEIVED", }, ) self.assertEqual(event3.name, "id_unspecified") self.assertDictEqual( dict(event3.attributes), { "message.event.size.compressed": "10", "message.event.size.uncompressed": "20", "message.event.type": "TYPE_UNSPECIFIED", }, ) def test_span_status(self): oc_tracer = OcTracer() with oc_tracer.start_span("span_ok") as span: # OTel will log about the message being set on a not OK span with self.assertLogs(level=logging.WARNING) as rec: span.set_status(OcStatus(0, "message")) self.assertIn( "description should only be set when status_code is set to StatusCode.ERROR", rec.output[0], ) with oc_tracer.start_span("span_exception") as span: span.set_status( OcStatus.from_exception(Exception("exception message")) ) self.assertEqual(len(self.mem_exporter.get_finished_spans()), 2) ok_span: ReadableSpan = self.mem_exporter.get_finished_spans()[0] exc_span: ReadableSpan = self.mem_exporter.get_finished_spans()[1] self.assertTrue(ok_span.status.is_ok) # should be none even though we provided it because OTel drops the description when # status is not ERROR self.assertIsNone(ok_span.status.description) self.assertFalse(exc_span.status.is_ok) self.assertEqual(exc_span.status.description, "exception message") def assert_related(self, *, child: ReadableSpan, parent: ReadableSpan): self.assertEqual( child.parent.span_id, parent.get_span_context().span_id ) def test_otel_sandwich(self): oc_tracer = OcTracer() otel_tracer = self.tracer_provider.get_tracer(__name__) with oc_tracer.start_span("opencensus_outer"): with otel_tracer.start_as_current_span("otel_middle"): with oc_tracer.start_span("opencensus_inner"): pass self.assertEqual(len(self.mem_exporter.get_finished_spans()), 3) opencensus_inner: ReadableSpan = ( self.mem_exporter.get_finished_spans()[0] ) otel_middle: ReadableSpan = self.mem_exporter.get_finished_spans()[1] opencensus_outer: ReadableSpan = ( self.mem_exporter.get_finished_spans()[2] ) self.assertEqual(opencensus_outer.name, "opencensus_outer") self.assertEqual(otel_middle.name, "otel_middle") self.assertEqual(opencensus_inner.name, "opencensus_inner") self.assertIsNone(opencensus_outer.parent) self.assert_related(parent=opencensus_outer, child=otel_middle) self.assert_related(parent=otel_middle, child=opencensus_inner) def test_opencensus_sandwich(self): oc_tracer = OcTracer() otel_tracer = self.tracer_provider.get_tracer(__name__) with otel_tracer.start_as_current_span("otel_outer"): with oc_tracer.start_span("opencensus_middle"): with otel_tracer.start_as_current_span("otel_inner"): pass self.assertEqual(len(self.mem_exporter.get_finished_spans()), 3) otel_inner: ReadableSpan = self.mem_exporter.get_finished_spans()[0] opencensus_middle: ReadableSpan = ( self.mem_exporter.get_finished_spans()[1] ) otel_outer: ReadableSpan = self.mem_exporter.get_finished_spans()[2] self.assertEqual(otel_outer.name, "otel_outer") self.assertEqual(opencensus_middle.name, "opencensus_middle") self.assertEqual(otel_inner.name, "otel_inner") self.assertIsNone(otel_outer.parent) self.assert_related(parent=otel_outer, child=opencensus_middle) self.assert_related(parent=opencensus_middle, child=otel_inner) python-opentelemetry-1.39.1/shim/opentelemetry-opentracing-shim/000077500000000000000000000000001511654350100251225ustar00rootroot00000000000000python-opentelemetry-1.39.1/shim/opentelemetry-opentracing-shim/LICENSE000066400000000000000000000261351511654350100261360ustar00rootroot00000000000000 Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. Copyright [yyyy] [name of copyright owner] Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. python-opentelemetry-1.39.1/shim/opentelemetry-opentracing-shim/README.rst000066400000000000000000000010141511654350100266050ustar00rootroot00000000000000OpenTracing Shim for OpenTelemetry ================================== |pypi| .. |pypi| image:: https://badge.fury.io/py/opentelemetry-opentracing-shim.svg :target: https://pypi.org/project/opentelemetry-opentracing-shim/ Installation ------------ :: pip install opentelemetry-opentracing-shim References ---------- * `OpenTracing Shim for OpenTelemetry `_ * `OpenTelemetry Project `_ python-opentelemetry-1.39.1/shim/opentelemetry-opentracing-shim/pyproject.toml000066400000000000000000000025071511654350100300420ustar00rootroot00000000000000[build-system] requires = ["hatchling"] build-backend = "hatchling.build" [project] name = "opentelemetry-opentracing-shim" dynamic = ["version"] description = "OpenTracing Shim for OpenTelemetry" readme = "README.rst" license = "Apache-2.0" requires-python = ">=3.9" authors = [ { name = "OpenTelemetry Authors", email = "cncf-opentelemetry-contributors@lists.cncf.io" }, ] classifiers = [ "Development Status :: 4 - Beta", "Framework :: OpenTelemetry", "Intended Audience :: Developers", "Programming Language :: Python", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.9", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", "Programming Language :: Python :: 3.12", "Programming Language :: Python :: 3.13", "Typing :: Typed", ] dependencies = [ "typing-extensions >= 4.5.0", "opentelemetry-api ~= 1.3", "opentracing ~= 2.0", ] [project.urls] Homepage = "https://github.com/open-telemetry/opentelemetry-python/tree/main/shim/opentelemetry-opentracing-shim" Repository = "https://github.com/open-telemetry/opentelemetry-python" [tool.hatch.version] path = "src/opentelemetry/shim/opentracing_shim/version/__init__.py" [tool.hatch.build.targets.sdist] include = [ "/src", "/tests", ] [tool.hatch.build.targets.wheel] packages = ["src/opentelemetry"] python-opentelemetry-1.39.1/shim/opentelemetry-opentracing-shim/src/000077500000000000000000000000001511654350100257115ustar00rootroot00000000000000python-opentelemetry-1.39.1/shim/opentelemetry-opentracing-shim/src/opentelemetry/000077500000000000000000000000001511654350100306055ustar00rootroot00000000000000python-opentelemetry-1.39.1/shim/opentelemetry-opentracing-shim/src/opentelemetry/shim/000077500000000000000000000000001511654350100315455ustar00rootroot00000000000000opentracing_shim/000077500000000000000000000000001511654350100350175ustar00rootroot00000000000000python-opentelemetry-1.39.1/shim/opentelemetry-opentracing-shim/src/opentelemetry/shim__init__.py000066400000000000000000000707271511654350100371450ustar00rootroot00000000000000python-opentelemetry-1.39.1/shim/opentelemetry-opentracing-shim/src/opentelemetry/shim/opentracing_shim# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ The OpenTelemetry OpenTracing shim is a library which allows an easy migration from OpenTracing to OpenTelemetry. The shim consists of a set of classes which implement the OpenTracing Python API while using OpenTelemetry constructs behind the scenes. Its purpose is to allow applications which are already instrumented using OpenTracing to start using OpenTelemetry with a minimal effort, without having to rewrite large portions of the codebase. To use the shim, a :class:`TracerShim` instance is created and then used as if it were an "ordinary" OpenTracing :class:`opentracing.Tracer`, as in the following example:: import time from opentelemetry import trace from opentelemetry.sdk.trace import TracerProvider from opentelemetry.shim.opentracing_shim import create_tracer # Define which OpenTelemetry Tracer provider implementation to use. trace.set_tracer_provider(TracerProvider()) # Create an OpenTelemetry Tracer. otel_tracer = trace.get_tracer(__name__) # Create an OpenTracing shim. shim = create_tracer(otel_tracer) with shim.start_active_span("ProcessHTTPRequest"): print("Processing HTTP request") # Sleeping to mock real work. time.sleep(0.1) with shim.start_active_span("GetDataFromDB"): print("Getting data from DB") # Sleeping to mock real work. time.sleep(0.2) Note: While the OpenTracing Python API represents time values as the number of **seconds** since the epoch expressed as :obj:`float` values, the OpenTelemetry Python API represents time values as the number of **nanoseconds** since the epoch expressed as :obj:`int` values. This fact requires the OpenTracing shim to convert time values back and forth between the two representations, which involves floating point arithmetic. Due to the way computers represent floating point values in hardware, representation of decimal floating point values in binary-based hardware is imprecise by definition. The above results in **slight imprecisions** in time values passed to the shim via the OpenTracing API when comparing the value passed to the shim and the value stored in the OpenTelemetry :class:`opentelemetry.trace.Span` object behind the scenes. **This is not a bug in this library or in Python**. Rather, this is a generic problem which stems from the fact that not every decimal floating point number can be correctly represented in binary, and therefore affects other libraries and programming languages as well. More information about this problem can be found in the `Floating Point Arithmetic\\: Issues and Limitations`_ section of the Python documentation. While testing this library, the aforementioned imprecisions were observed to be of *less than a microsecond*. API --- .. _Floating Point Arithmetic\\: Issues and Limitations: https://docs.python.org/3/tutorial/floatingpoint.html """ # TODO: make pylint use 3p opentracing module for type inference # pylint:disable=no-member from __future__ import annotations import logging from types import TracebackType from typing import Type, TypeVar from opentracing import ( Format, Scope, ScopeManager, Span, SpanContext, Tracer, UnsupportedFormatException, ) from typing_extensions import deprecated from opentelemetry.baggage import get_baggage, set_baggage from opentelemetry.context import ( Context, attach, create_key, detach, get_value, set_value, ) from opentelemetry.propagate import get_global_textmap from opentelemetry.shim.opentracing_shim import util from opentelemetry.shim.opentracing_shim.version import __version__ from opentelemetry.trace import ( INVALID_SPAN_CONTEXT, Link, NonRecordingSpan, TracerProvider, get_current_span, set_span_in_context, use_span, ) from opentelemetry.trace import SpanContext as OtelSpanContext from opentelemetry.trace import Tracer as OtelTracer from opentelemetry.util.types import Attributes ValueT = TypeVar("ValueT", int, float, bool, str) logger = logging.getLogger(__name__) _SHIM_KEY = create_key("scope_shim") def create_tracer(otel_tracer_provider: TracerProvider) -> "TracerShim": """Creates a :class:`TracerShim` object from the provided OpenTelemetry :class:`opentelemetry.trace.TracerProvider`. The returned :class:`TracerShim` is an implementation of :class:`opentracing.Tracer` using OpenTelemetry under the hood. Args: otel_tracer_provider: A tracer from this provider will be used to perform the actual tracing when user code is instrumented using the OpenTracing API. Returns: The created :class:`TracerShim`. """ return TracerShim(otel_tracer_provider.get_tracer(__name__, __version__)) class SpanContextShim(SpanContext): """Implements :class:`opentracing.SpanContext` by wrapping a :class:`opentelemetry.trace.SpanContext` object. Args: otel_context: A :class:`opentelemetry.trace.SpanContext` to be used for constructing the :class:`SpanContextShim`. """ def __init__(self, otel_context: OtelSpanContext): self._otel_context = otel_context # Context is being used here since it must be immutable. self._baggage = Context() def unwrap(self) -> OtelSpanContext: """Returns the wrapped :class:`opentelemetry.trace.SpanContext` object. Returns: The :class:`opentelemetry.trace.SpanContext` object wrapped by this :class:`SpanContextShim`. """ return self._otel_context @property def baggage(self) -> Context: """Returns the ``baggage`` associated with this object""" return self._baggage class SpanShim(Span): """Wraps a :class:`opentelemetry.trace.Span` object. Args: tracer: The :class:`opentracing.Tracer` that created this `SpanShim`. context: A :class:`SpanContextShim` which contains the context for this :class:`SpanShim`. span: A :class:`opentelemetry.trace.Span` to wrap. """ def __init__(self, tracer, context: SpanContextShim, span): super().__init__(tracer, context) self._otel_span = span def unwrap(self): """Returns the wrapped :class:`opentelemetry.trace.Span` object. Returns: The :class:`opentelemetry.trace.Span` object wrapped by this :class:`SpanShim`. """ return self._otel_span def set_operation_name(self, operation_name: str) -> "SpanShim": """Updates the name of the wrapped OpenTelemetry span. Args: operation_name: The new name to be used for the underlying :class:`opentelemetry.trace.Span` object. Returns: Returns this :class:`SpanShim` instance to allow call chaining. """ self._otel_span.update_name(operation_name) return self def finish(self, finish_time: float | None = None): """Ends the OpenTelemetry span wrapped by this :class:`SpanShim`. If *finish_time* is provided, the time value is converted to the OpenTelemetry time format (number of nanoseconds since the epoch, expressed as an integer) and passed on to the OpenTelemetry tracer when ending the OpenTelemetry span. If *finish_time* isn't provided, it is up to the OpenTelemetry tracer implementation to generate a timestamp when ending the span. Args: finish_time: A value that represents the finish time expressed as the number of seconds since the epoch as returned by :func:`time.time()`. """ end_time = finish_time if end_time is not None: end_time = util.time_seconds_to_ns(finish_time) self._otel_span.end(end_time=end_time) def set_tag(self, key: str, value: ValueT) -> "SpanShim": """Sets an OpenTelemetry attribute on the wrapped OpenTelemetry span. Args: key: A tag key. value: A tag value. Returns: Returns this :class:`SpanShim` instance to allow call chaining. """ self._otel_span.set_attribute(key, value) return self def log_kv( self, key_values: Attributes, timestamp: float | None = None ) -> "SpanShim": """Logs an event for the wrapped OpenTelemetry span. Note: The OpenTracing API defines the values of *key_values* to be of any type. However, the OpenTelemetry API requires that the values be any one of the types defined in ``opentelemetry.trace.util.Attributes`` therefore, only these types are supported as values. Args: key_values: A dictionary as specified in ``opentelemetry.trace.util.Attributes``. timestamp: Timestamp of the OpenTelemetry event, will be generated automatically if omitted. Returns: Returns this :class:`SpanShim` instance to allow call chaining. """ if timestamp is not None: event_timestamp = util.time_seconds_to_ns(timestamp) else: event_timestamp = None event_name = util.event_name_from_kv(key_values) self._otel_span.add_event(event_name, key_values, event_timestamp) return self @deprecated("This method is deprecated in favor of log_kv") def log(self, **kwargs): super().log(**kwargs) @deprecated("This method is deprecated in favor of log_kv") def log_event(self, event, payload=None): super().log_event(event, payload=payload) def set_baggage_item(self, key: str, value: str): """Stores a Baggage item in the span as a key/value pair. Args: key: A tag key. value: A tag value. """ # pylint: disable=protected-access self._context._baggage = set_baggage( key, value, context=self._context._baggage ) def get_baggage_item(self, key: str) -> object | None: """Retrieves value of the baggage item with the given key. Args: key: A tag key. Returns: Returns this :class:`SpanShim` instance to allow call chaining. """ # pylint: disable=protected-access return get_baggage(key, context=self._context._baggage) class ScopeShim(Scope): """A `ScopeShim` wraps the OpenTelemetry functionality related to span activation/deactivation while using OpenTracing :class:`opentracing.Scope` objects for presentation. Unlike other classes in this package, the `ScopeShim` class doesn't wrap an OpenTelemetry class because OpenTelemetry doesn't have the notion of "scope" (though it *does* have similar functionality). There are two ways to construct a `ScopeShim` object: using the default initializer and using the :meth:`from_context_manager()` class method. It is necessary to have both ways for constructing `ScopeShim` objects because in some cases we need to create the object from an OpenTelemetry `opentelemetry.trace.Span` context manager (as returned by :meth:`opentelemetry.trace.use_span`), in which case our only way of retrieving a `opentelemetry.trace.Span` object is by calling the ``__enter__()`` method on the context manager, which makes the span active in the OpenTelemetry tracer; whereas in other cases we need to accept a `SpanShim` object and wrap it in a `ScopeShim`. The former is used mainly when the instrumentation code retrieves the currently-active span using `ScopeManagerShim.active`. The latter is mainly used when the instrumentation code activates a span using :meth:`ScopeManagerShim.activate`. Args: manager: The :class:`ScopeManagerShim` that created this :class:`ScopeShim`. span: The :class:`SpanShim` this :class:`ScopeShim` controls. span_cm: A Python context manager which yields an OpenTelemetry `opentelemetry.trace.Span` from its ``__enter__()`` method. Used by :meth:`from_context_manager` to store the context manager as an attribute so that it can later be closed by calling its ``__exit__()`` method. Defaults to `None`. """ def __init__( self, manager: "ScopeManagerShim", span: SpanShim, span_cm=None ): super().__init__(manager, span) self._span_cm = span_cm self._token = attach(set_value(_SHIM_KEY, self)) # TODO: Change type of `manager` argument to `opentracing.ScopeManager`? We # need to get rid of `manager.tracer` for this. @classmethod def from_context_manager(cls, manager: "ScopeManagerShim", span_cm): """Constructs a :class:`ScopeShim` from an OpenTelemetry `opentelemetry.trace.Span` context manager. The method extracts a `opentelemetry.trace.Span` object from the context manager by calling the context manager's ``__enter__()`` method. This causes the span to start in the OpenTelemetry tracer. Example usage:: span = otel_tracer.start_span("TestSpan") span_cm = opentelemetry.trace.use_span(span) scope_shim = ScopeShim.from_context_manager( scope_manager_shim, span_cm=span_cm, ) Args: manager: The :class:`ScopeManagerShim` that created this :class:`ScopeShim`. span_cm: A context manager as returned by :meth:`opentelemetry.trace.use_span`. """ # pylint: disable=unnecessary-dunder-call otel_span = span_cm.__enter__() span_context = SpanContextShim(otel_span.get_span_context()) span = SpanShim(manager.tracer, span_context, otel_span) return cls(manager, span, span_cm) def close(self): """Closes the `ScopeShim`. If the `ScopeShim` was created from a context manager, calling this method sets the active span in the OpenTelemetry tracer back to the span which was active before this `ScopeShim` was created. In addition, if the span represented by this `ScopeShim` was activated with the *finish_on_close* argument set to `True`, calling this method will end the span. Warning: In the current state of the implementation it is possible to create a `ScopeShim` directly from a `SpanShim`, that is - without using :meth:`from_context_manager()`. For that reason we need to be able to end the span represented by the `ScopeShim` in this case, too. Please note that closing a `ScopeShim` created this way (for example as returned by :meth:`ScopeManagerShim.active`) **always ends the associated span**, regardless of the value passed in *finish_on_close* when activating the span. """ self._end_span_scope(None, None, None) def __exit__(self, exc_type, exc_val, exc_tb): """ Override the __exit__ method of `opentracing.scope.Scope` so we can report exceptions correctly in opentelemetry specification format. """ self._end_span_scope(exc_type, exc_val, exc_tb) def _end_span_scope( self, exc_type: Type[BaseException] | None, exc_val: BaseException | None, exc_tb: TracebackType | None, ) -> None: detach(self._token) if self._span_cm is not None: self._span_cm.__exit__(exc_type, exc_val, exc_tb) else: self._span.unwrap().end() class ScopeManagerShim(ScopeManager): """Implements :class:`opentracing.ScopeManager` by setting and getting the active `opentelemetry.trace.Span` in the OpenTelemetry tracer. This class keeps a reference to a :class:`TracerShim` as an attribute. This reference is used to communicate with the OpenTelemetry tracer. It is necessary to have a reference to the :class:`TracerShim` rather than the :class:`opentelemetry.trace.Tracer` wrapped by it because when constructing a :class:`SpanShim` we need to pass a reference to a :class:`opentracing.Tracer`. Args: tracer: A :class:`TracerShim` to use for setting and getting active span state. """ def __init__(self, tracer: "TracerShim"): # The only thing the ``__init__()``` method on the base class does is # initialize `self._noop_span` and `self._noop_scope` with no-op # objects. Therefore, it doesn't seem useful to call it. # pylint: disable=super-init-not-called self._tracer = tracer def activate(self, span: SpanShim, finish_on_close: bool) -> "ScopeShim": """Activates a :class:`SpanShim` and returns a :class:`ScopeShim` which represents the active span. Args: span: A :class:`SpanShim` to be activated. finish_on_close(:obj:`bool`): Determines whether the OpenTelemetry span should be ended when the returned :class:`ScopeShim` is closed. Returns: A :class:`ScopeShim` representing the activated span. """ span_cm = use_span(span.unwrap(), end_on_exit=finish_on_close) return ScopeShim.from_context_manager(self, span_cm=span_cm) @property def active(self) -> "ScopeShim": """Returns a :class:`ScopeShim` object representing the currently-active span in the OpenTelemetry tracer. Returns: A :class:`ScopeShim` representing the active span in the OpenTelemetry tracer, or `None` if no span is currently active. Warning: Calling :meth:`ScopeShim.close` on the :class:`ScopeShim` returned by this property **always ends the corresponding span**, regardless of the *finish_on_close* value used when activating the span. This is a limitation of the current implementation of the OpenTracing shim and is likely to be handled in future versions. """ span = get_current_span() if span.get_span_context() == INVALID_SPAN_CONTEXT: return None try: return get_value(_SHIM_KEY) except KeyError: span_context = SpanContextShim(span.get_span_context()) wrapped_span = SpanShim(self._tracer, span_context, span) return ScopeShim(self, span=wrapped_span) @property def tracer(self) -> "TracerShim": """Returns the :class:`TracerShim` reference used by this :class:`ScopeManagerShim` for setting and getting the active span from the OpenTelemetry tracer. Returns: The :class:`TracerShim` used for setting and getting the active span. Warning: This property is *not* a part of the OpenTracing API. It is used internally by the current implementation of the OpenTracing shim and will likely be removed in future versions. """ return self._tracer class TracerShim(Tracer): """Wraps a :class:`opentelemetry.trace.Tracer` object. This wrapper class allows using an OpenTelemetry tracer as if it were an OpenTracing tracer. It exposes the same methods as an "ordinary" OpenTracing tracer, and uses OpenTelemetry transparently for performing the actual tracing. This class depends on the *OpenTelemetry API*. Therefore, any implementation of a :class:`opentelemetry.trace.Tracer` should work with this class. Args: tracer: A :class:`opentelemetry.trace.Tracer` to use for tracing. This tracer will be invoked by the shim to create actual spans. """ def __init__(self, tracer: OtelTracer): super().__init__(scope_manager=ScopeManagerShim(self)) self._otel_tracer = tracer self._supported_formats = ( Format.TEXT_MAP, Format.HTTP_HEADERS, ) def unwrap(self): """Returns the :class:`opentelemetry.trace.Tracer` object that is wrapped by this :class:`TracerShim` and used for actual tracing. Returns: The :class:`opentelemetry.trace.Tracer` used for actual tracing. """ return self._otel_tracer def start_active_span( self, operation_name: str, child_of: SpanShim | SpanContextShim | None = None, references: list | None = None, tags: Attributes = None, start_time: float | None = None, ignore_active_span: bool = False, finish_on_close: bool = True, ) -> "ScopeShim": """Starts and activates a span. In terms of functionality, this method behaves exactly like the same method on a "regular" OpenTracing tracer. See :meth:`opentracing.Tracer.start_active_span` for more details. Args: operation_name: Name of the operation represented by the new span from the perspective of the current service. child_of: A :class:`SpanShim` or :class:`SpanContextShim` representing the parent in a "child of" reference. If specified, the *references* parameter must be omitted. references: A list of :class:`opentracing.Reference` objects that identify one or more parents of type :class:`SpanContextShim`. tags: A dictionary of tags. start_time: An explicit start time expressed as the number of seconds since the epoch as returned by :func:`time.time()`. ignore_active_span: Ignore the currently-active span in the OpenTelemetry tracer and make the created span the root span of a new trace. finish_on_close: Determines whether the created span should end automatically when closing the returned :class:`ScopeShim`. Returns: A :class:`ScopeShim` that is already activated by the :class:`ScopeManagerShim`. """ current_span = get_current_span() if ( child_of is None and current_span.get_span_context() is not INVALID_SPAN_CONTEXT ): child_of = SpanShim(None, None, current_span) span = self.start_span( operation_name=operation_name, child_of=child_of, references=references, tags=tags, start_time=start_time, ignore_active_span=ignore_active_span, ) return self._scope_manager.activate(span, finish_on_close) def start_span( self, operation_name: str | None = None, child_of: SpanShim | SpanContextShim | None = None, references: list | None = None, tags: Attributes = None, start_time: float | None = None, ignore_active_span: bool = False, ) -> SpanShim: """Implements the ``start_span()`` method from the base class. Starts a span. In terms of functionality, this method behaves exactly like the same method on a "regular" OpenTracing tracer. See :meth:`opentracing.Tracer.start_span` for more details. Args: operation_name: Name of the operation represented by the new span from the perspective of the current service. child_of: A :class:`SpanShim` or :class:`SpanContextShim` representing the parent in a "child of" reference. If specified, the *references* parameter must be omitted. references: A list of :class:`opentracing.Reference` objects that identify one or more parents of type :class:`SpanContextShim`. tags: A dictionary of tags. start_time: An explicit start time expressed as the number of seconds since the epoch as returned by :func:`time.time()`. ignore_active_span: Ignore the currently-active span in the OpenTelemetry tracer and make the created span the root span of a new trace. Returns: An already-started :class:`SpanShim` instance. """ # Use active span as parent when no explicit parent is specified. if not ignore_active_span and not child_of: child_of = self.active_span # Use the specified parent or the active span if possible. Otherwise, # use a `None` parent, which triggers the creation of a new trace. parent = child_of.unwrap() if child_of else None if isinstance(parent, OtelSpanContext): parent = NonRecordingSpan(parent) valid_links = [] if references: for ref in references: if ref.referenced_context.unwrap() is not INVALID_SPAN_CONTEXT: valid_links.append(Link(ref.referenced_context.unwrap())) if valid_links and parent is None: parent = NonRecordingSpan(valid_links[0].context) parent_span_context = set_span_in_context(parent) # The OpenTracing API expects time values to be `float` values which # represent the number of seconds since the epoch. OpenTelemetry # represents time values as nanoseconds since the epoch. start_time_ns = start_time if start_time_ns is not None: start_time_ns = util.time_seconds_to_ns(start_time) span = self._otel_tracer.start_span( operation_name, context=parent_span_context, links=valid_links, attributes=tags, start_time=start_time_ns, ) context = SpanContextShim(span.get_span_context()) return SpanShim(self, context, span) def inject(self, span_context, format: object, carrier: object): """Injects ``span_context`` into ``carrier``. See base class for more details. Args: span_context: The ``opentracing.SpanContext`` to inject. format: a Python object instance that represents a given carrier format. `format` may be of any type, and `format` equality is defined by Python ``==`` operator. carrier: the format-specific carrier object to inject into """ # pylint: disable=redefined-builtin # This implementation does not perform the injecting by itself but # uses the configured propagators in opentelemetry.propagators. # TODO: Support Format.BINARY once it is supported in # opentelemetry-python. if format not in self._supported_formats: raise UnsupportedFormatException propagator = get_global_textmap() span = span_context.unwrap() if span_context else None if isinstance(span, OtelSpanContext): span = NonRecordingSpan(span) ctx = set_span_in_context(span) propagator.inject(carrier, context=ctx) def extract(self, format: object, carrier: object): """Returns an ``opentracing.SpanContext`` instance extracted from a ``carrier``. See base class for more details. Args: format: a Python object instance that represents a given carrier format. ``format`` may be of any type, and ``format`` equality is defined by python ``==`` operator. carrier: the format-specific carrier object to extract from Returns: An ``opentracing.SpanContext`` extracted from ``carrier`` or ``None`` if no such ``SpanContext`` could be found. """ # pylint: disable=redefined-builtin # This implementation does not perform the extracting by itself but # uses the configured propagators in opentelemetry.propagators. # TODO: Support Format.BINARY once it is supported in # opentelemetry-python. if format not in self._supported_formats: raise UnsupportedFormatException propagator = get_global_textmap() ctx = propagator.extract(carrier) span = get_current_span(ctx) if span is not None: otel_context = span.get_span_context() else: otel_context = INVALID_SPAN_CONTEXT return SpanContextShim(otel_context) py.typed000066400000000000000000000000001511654350100365040ustar00rootroot00000000000000python-opentelemetry-1.39.1/shim/opentelemetry-opentracing-shim/src/opentelemetry/shim/opentracing_shimutil.py000066400000000000000000000033131511654350100363460ustar00rootroot00000000000000python-opentelemetry-1.39.1/shim/opentelemetry-opentracing-shim/src/opentelemetry/shim/opentracing_shim# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # A default event name to be used for logging events when a better event name # can't be derived from the event's key-value pairs. DEFAULT_EVENT_NAME = "log" def time_seconds_to_ns(time_seconds): """Converts a time value in seconds to a time value in nanoseconds. `time_seconds` is a `float` as returned by `time.time()` which represents the number of seconds since the epoch. The returned value is an `int` representing the number of nanoseconds since the epoch. """ return int(time_seconds * 1e9) def time_seconds_from_ns(time_nanoseconds): """Converts a time value in nanoseconds to a time value in seconds. `time_nanoseconds` is an `int` representing the number of nanoseconds since the epoch. The returned value is a `float` representing the number of seconds since the epoch. """ return time_nanoseconds / 1e9 def event_name_from_kv(key_values): """A helper function which returns an event name from the given dict, or a default event name. """ if key_values is None or "event" not in key_values: return DEFAULT_EVENT_NAME return key_values["event"] version/000077500000000000000000000000001511654350100365045ustar00rootroot00000000000000python-opentelemetry-1.39.1/shim/opentelemetry-opentracing-shim/src/opentelemetry/shim/opentracing_shim__init__.py000066400000000000000000000011401511654350100406110ustar00rootroot00000000000000python-opentelemetry-1.39.1/shim/opentelemetry-opentracing-shim/src/opentelemetry/shim/opentracing_shim/version# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. __version__ = "0.60b1" python-opentelemetry-1.39.1/shim/opentelemetry-opentracing-shim/test-requirements.txt000066400000000000000000000005471511654350100313710ustar00rootroot00000000000000asgiref==3.7.2 importlib-metadata==6.11.0 iniconfig==2.0.0 opentracing==2.4.0 packaging==24.0 pluggy==1.5.0 py-cpuinfo==9.0.0 pytest==7.4.4 tomli==2.0.1 typing_extensions==4.10.0 wrapt==1.16.0 zipp==3.19.2 -e opentelemetry-api -e opentelemetry-sdk -e tests/opentelemetry-test-utils -e opentelemetry-semantic-conventions -e shim/opentelemetry-opentracing-shim python-opentelemetry-1.39.1/shim/opentelemetry-opentracing-shim/tests/000077500000000000000000000000001511654350100262645ustar00rootroot00000000000000python-opentelemetry-1.39.1/shim/opentelemetry-opentracing-shim/tests/__init__.py000066400000000000000000000000001511654350100303630ustar00rootroot00000000000000python-opentelemetry-1.39.1/shim/opentelemetry-opentracing-shim/tests/test_shim.py000066400000000000000000000576441511654350100306550ustar00rootroot00000000000000# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # TODO: make pylint use 3p opentracing module for type inference # pylint:disable=no-member import time import traceback from unittest import TestCase from unittest.mock import Mock import opentracing from opentelemetry import trace from opentelemetry.propagate import get_global_textmap, set_global_textmap from opentelemetry.sdk.trace import TracerProvider from opentelemetry.shim.opentracing_shim import ( SpanContextShim, SpanShim, create_tracer, util, ) from opentelemetry.test.mock_textmap import ( MockTextMapPropagator, NOOPTextMapPropagator, ) class TestShim(TestCase): # pylint: disable=too-many-public-methods def setUp(self): """Create an OpenTelemetry tracer and a shim before every test case.""" trace.set_tracer_provider(TracerProvider()) self.shim = create_tracer(trace.get_tracer_provider()) @classmethod def setUpClass(cls): # Save current propagator to be restored on teardown. cls._previous_propagator = get_global_textmap() # Set mock propagator for testing. set_global_textmap(MockTextMapPropagator()) @classmethod def tearDownClass(cls): # Restore previous propagator. set_global_textmap(cls._previous_propagator) def test_shim_type(self): # Verify shim is an OpenTracing tracer. self.assertIsInstance(self.shim, opentracing.Tracer) def test_start_active_span(self): """Test span creation and activation using `start_active_span()`.""" with self.shim.start_active_span("TestSpan0") as scope: # Verify correct type of Scope and Span objects. self.assertIsInstance(scope, opentracing.Scope) self.assertIsInstance(scope.span, opentracing.Span) # Verify span is started. self.assertIsNotNone(scope.span.unwrap().start_time) # Verify span is active. self.assertEqual( self.shim.active_span.context.unwrap(), scope.span.context.unwrap(), ) # TODO: We can't check for equality of self.shim.active_span and # scope.span because the same OpenTelemetry span is returned inside # different SpanShim objects. A possible solution is described # here: # https://github.com/open-telemetry/opentelemetry-python/issues/161#issuecomment-534136274 # Verify span has ended. self.assertIsNotNone(scope.span.unwrap().end_time) # Verify no span is active. self.assertIsNone(self.shim.active_span) def test_start_span(self): """Test span creation using `start_span()`.""" with self.shim.start_span("TestSpan1") as span: # Verify correct type of Span object. self.assertIsInstance(span, opentracing.Span) # Verify span is started. self.assertIsNotNone(span.unwrap().start_time) # Verify `start_span()` does NOT make the span active. self.assertIsNone(self.shim.active_span) # Verify span has ended. self.assertIsNotNone(span.unwrap().end_time) def test_start_span_no_contextmanager(self): """Test `start_span()` without a `with` statement.""" span = self.shim.start_span("TestSpan2") # Verify span is started. self.assertIsNotNone(span.unwrap().start_time) # Verify `start_span()` does NOT make the span active. self.assertIsNone(self.shim.active_span) span.finish() def test_explicit_span_finish(self): """Test `finish()` method on `Span` objects.""" span = self.shim.start_span("TestSpan3") # Verify span hasn't ended. self.assertIsNone(span.unwrap().end_time) span.finish() # Verify span has ended. self.assertIsNotNone(span.unwrap().end_time) def test_explicit_start_time(self): """Test `start_time` argument.""" now = time.time() with self.shim.start_active_span("TestSpan4", start_time=now) as scope: result = util.time_seconds_from_ns(scope.span.unwrap().start_time) # Tolerate inaccuracies of less than a microsecond. See Note: # https://open-telemetry.github.io/opentelemetry-python/opentelemetry.shim.opentracing_shim.html # TODO: This seems to work consistently, but we should find out the # biggest possible loss of precision. self.assertAlmostEqual(result, now, places=6) def test_explicit_end_time(self): """Test `end_time` argument of `finish()` method.""" span = self.shim.start_span("TestSpan5") now = time.time() span.finish(now) end_time = util.time_seconds_from_ns(span.unwrap().end_time) # Tolerate inaccuracies of less than a microsecond. See Note: # https://open-telemetry.github.io/opentelemetry-python/opentelemetry.shim.opentracing_shim.html # TODO: This seems to work consistently, but we should find out the # biggest possible loss of precision. self.assertAlmostEqual(end_time, now, places=6) def test_explicit_span_activation(self): """Test manual activation and deactivation of a span.""" span = self.shim.start_span("TestSpan6") # Verify no span is currently active. self.assertIsNone(self.shim.active_span) with self.shim.scope_manager.activate( span, finish_on_close=True ) as scope: # Verify span is active. self.assertEqual( self.shim.active_span.context.unwrap(), scope.span.context.unwrap(), ) # Verify no span is active. self.assertIsNone(self.shim.active_span) def test_start_active_span_finish_on_close(self): """Test `finish_on_close` argument of `start_active_span()`.""" with self.shim.start_active_span( "TestSpan7", finish_on_close=True ) as scope: # Verify span hasn't ended. self.assertIsNone(scope.span.unwrap().end_time) # Verify span has ended. self.assertIsNotNone(scope.span.unwrap().end_time) with self.shim.start_active_span( "TestSpan8", finish_on_close=False ) as scope: # Verify span hasn't ended. self.assertIsNone(scope.span.unwrap().end_time) # Verify span hasn't ended after scope had been closed. self.assertIsNone(scope.span.unwrap().end_time) scope.span.finish() def test_activate_finish_on_close(self): """Test `finish_on_close` argument of `activate()`.""" span = self.shim.start_span("TestSpan9") with self.shim.scope_manager.activate( span, finish_on_close=True ) as scope: # Verify span is active. self.assertEqual( self.shim.active_span.context.unwrap(), scope.span.context.unwrap(), ) # Verify span has ended. self.assertIsNotNone(span.unwrap().end_time) span = self.shim.start_span("TestSpan10") with self.shim.scope_manager.activate( span, finish_on_close=False ) as scope: # Verify span is active. self.assertEqual( self.shim.active_span.context.unwrap(), scope.span.context.unwrap(), ) # Verify span hasn't ended. self.assertIsNone(span.unwrap().end_time) span.finish() def test_explicit_scope_close(self): """Test `close()` method on `ScopeShim`.""" with self.shim.start_active_span("ParentSpan") as parent: # Verify parent span is active. self.assertEqual( self.shim.active_span.context.unwrap(), parent.span.context.unwrap(), ) child = self.shim.start_active_span("ChildSpan") # Verify child span is active. self.assertEqual( self.shim.active_span.context.unwrap(), child.span.context.unwrap(), ) # Verify child span hasn't ended. self.assertIsNone(child.span.unwrap().end_time) child.close() # Verify child span has ended. self.assertIsNotNone(child.span.unwrap().end_time) # Verify parent span becomes active again. self.assertEqual( self.shim.active_span.context.unwrap(), parent.span.context.unwrap(), ) def test_parent_child_implicit(self): """Test parent-child relationship and activation/deactivation of spans without specifying the parent span upon creation. """ with self.shim.start_active_span("ParentSpan") as parent: # Verify parent span is the active span. self.assertEqual( self.shim.active_span.context.unwrap(), parent.span.context.unwrap(), ) with self.shim.start_active_span("ChildSpan") as child: # Verify child span is the active span. self.assertEqual( self.shim.active_span.context.unwrap(), child.span.context.unwrap(), ) # Verify parent-child relationship. parent_trace_id = ( parent.span.unwrap().get_span_context().trace_id ) child_trace_id = ( child.span.unwrap().get_span_context().trace_id ) self.assertEqual(parent_trace_id, child_trace_id) self.assertEqual( child.span.unwrap().parent, parent.span.unwrap().get_span_context(), ) # Verify parent span becomes the active span again. self.assertEqual( self.shim.active_span.context.unwrap(), parent.span.context.unwrap(), # TODO: Check equality of the spans themselves rather than # their context once the SpanShim reconstruction problem has # been addressed (see previous TODO). ) # Verify there is no active span. self.assertIsNone(self.shim.active_span) def test_parent_child_explicit_span(self): """Test parent-child relationship of spans when specifying a `Span` object as a parent upon creation. """ with self.shim.start_span("ParentSpan") as parent: with self.shim.start_active_span( "ChildSpan", child_of=parent ) as child: parent_trace_id = parent.unwrap().get_span_context().trace_id child_trace_id = ( child.span.unwrap().get_span_context().trace_id ) self.assertEqual(child_trace_id, parent_trace_id) self.assertEqual( child.span.unwrap().parent, parent.unwrap().get_span_context(), ) with self.shim.start_span("ParentSpan") as parent: child = self.shim.start_span("ChildSpan", child_of=parent) parent_trace_id = parent.unwrap().get_span_context().trace_id child_trace_id = child.unwrap().get_span_context().trace_id self.assertEqual(child_trace_id, parent_trace_id) self.assertEqual( child.unwrap().parent, parent.unwrap().get_span_context() ) child.finish() def test_parent_child_explicit_span_context(self): """Test parent-child relationship of spans when specifying a `SpanContext` object as a parent upon creation. """ with self.shim.start_span("ParentSpan") as parent: with self.shim.start_active_span( "ChildSpan", child_of=parent.context ) as child: parent_trace_id = parent.unwrap().get_span_context().trace_id child_trace_id = ( child.span.unwrap().get_span_context().trace_id ) self.assertEqual(child_trace_id, parent_trace_id) self.assertEqual( child.span.unwrap().parent, parent.context.unwrap() ) with self.shim.start_span("ParentSpan") as parent: with self.shim.start_span( "SpanWithContextParent", child_of=parent.context ) as child: parent_trace_id = parent.unwrap().get_span_context().trace_id child_trace_id = child.unwrap().get_span_context().trace_id self.assertEqual(child_trace_id, parent_trace_id) self.assertEqual( child.unwrap().parent, parent.context.unwrap() ) def test_references(self): """Test span creation using the `references` argument.""" with self.shim.start_span("ParentSpan") as parent: ref = opentracing.child_of(parent.context) with self.shim.start_active_span( "ChildSpan", references=[ref] ) as child: self.assertEqual( child.span.unwrap().links[0].context, parent.context.unwrap(), ) def test_follows_from_references(self): """Test span creation using the `references` argument with a follows from relationship.""" with self.shim.start_span("ParentSpan") as parent: ref = opentracing.follows_from(parent.context) with self.shim.start_active_span( "FollowingSpan", references=[ref] ) as child: self.assertEqual( child.span.unwrap().links[0].context, parent.context.unwrap(), ) self.assertEqual( child.span.unwrap().parent, parent.context.unwrap(), ) def test_set_operation_name(self): """Test `set_operation_name()` method.""" with self.shim.start_active_span("TestName") as scope: self.assertEqual(scope.span.unwrap().name, "TestName") scope.span.set_operation_name("NewName") self.assertEqual(scope.span.unwrap().name, "NewName") def test_tags(self): """Test tags behavior using the `tags` argument and the `set_tags()` method. """ tags = {"foo": "bar"} with self.shim.start_active_span("TestSetTag", tags=tags) as scope: scope.span.set_tag("baz", "qux") self.assertEqual(scope.span.unwrap().attributes["foo"], "bar") self.assertEqual(scope.span.unwrap().attributes["baz"], "qux") def test_span_tracer(self): """Test the `tracer` property on `Span` objects.""" with self.shim.start_active_span("TestSpan11") as scope: self.assertEqual(scope.span.tracer, self.shim) def test_log_kv(self): """Test the `log_kv()` method on `Span` objects.""" with self.shim.start_span("TestSpan12") as span: span.log_kv({"foo": "bar"}) self.assertEqual(span.unwrap().events[0].attributes["foo"], "bar") # Verify timestamp was generated automatically. self.assertIsNotNone(span.unwrap().events[0].timestamp) # Test explicit timestamp. now = time.time() span.log_kv({"foo": "bar"}, now) result = util.time_seconds_from_ns( span.unwrap().events[1].timestamp ) self.assertEqual(span.unwrap().events[1].attributes["foo"], "bar") # Tolerate inaccuracies of less than a microsecond. See Note: # https://open-telemetry.github.io/opentelemetry-python/shim/opentracing_shim/opentracing_shim.html # TODO: This seems to work consistently, but we should find out the # biggest possible loss of precision. self.assertAlmostEqual(result, now, places=6) def test_log(self): """Test the deprecated `log` method on `Span` objects.""" with self.shim.start_span("TestSpan13") as span: with self.assertWarns(DeprecationWarning): span.log(event="foo", payload="bar") self.assertEqual(span.unwrap().events[0].attributes["event"], "foo") self.assertEqual(span.unwrap().events[0].attributes["payload"], "bar") self.assertIsNotNone(span.unwrap().events[0].timestamp) def test_log_event(self): """Test the deprecated `log_event` method on `Span` objects.""" with self.shim.start_span("TestSpan14") as span: with self.assertWarns(DeprecationWarning): span.log_event("foo", "bar") self.assertEqual(span.unwrap().events[0].attributes["event"], "foo") self.assertEqual(span.unwrap().events[0].attributes["payload"], "bar") self.assertIsNotNone(span.unwrap().events[0].timestamp) def test_span_context(self): """Test construction of `SpanContextShim` objects.""" otel_context = trace.SpanContext(1234, 5678, is_remote=False) context = SpanContextShim(otel_context) self.assertIsInstance(context, opentracing.SpanContext) self.assertEqual(context.unwrap().trace_id, 1234) self.assertEqual(context.unwrap().span_id, 5678) def test_span_on_error(self): """Verify error tag and logs are created on span when an exception is raised. """ # Raise an exception while a span is active. with self.assertRaises(Exception) as exc_ctx: with self.shim.start_active_span("TestName") as scope: # pylint: disable=broad-exception-raised raise Exception("bad thing") ex = exc_ctx.exception expected_stack = "".join( traceback.format_exception(type(ex), value=ex, tb=ex.__traceback__) ) # Verify exception details have been added to span. exc_event = scope.span.unwrap().events[0] self.assertEqual(exc_event.name, "exception") self.assertEqual( exc_event.attributes["exception.message"], "bad thing" ) self.assertEqual( exc_event.attributes["exception.type"], Exception.__name__ ) # cannot get the whole stacktrace so just assert exception part is contained self.assertIn( expected_stack, exc_event.attributes["exception.stacktrace"] ) def test_inject_http_headers(self): """Test `inject()` method for Format.HTTP_HEADERS.""" otel_context = trace.SpanContext( trace_id=1220, span_id=7478, is_remote=False ) context = SpanContextShim(otel_context) headers = {} self.shim.inject(context, opentracing.Format.HTTP_HEADERS, headers) self.assertEqual( headers[MockTextMapPropagator.TRACE_ID_KEY], str(1220) ) self.assertEqual(headers[MockTextMapPropagator.SPAN_ID_KEY], str(7478)) def test_inject_text_map(self): """Test `inject()` method for Format.TEXT_MAP.""" otel_context = trace.SpanContext( trace_id=1220, span_id=7478, is_remote=False ) context = SpanContextShim(otel_context) # Verify Format.TEXT_MAP text_map = {} self.shim.inject(context, opentracing.Format.TEXT_MAP, text_map) self.assertEqual( text_map[MockTextMapPropagator.TRACE_ID_KEY], str(1220) ) self.assertEqual( text_map[MockTextMapPropagator.SPAN_ID_KEY], str(7478) ) def test_inject_binary(self): """Test `inject()` method for Format.BINARY.""" otel_context = trace.SpanContext( trace_id=1220, span_id=7478, is_remote=False ) context = SpanContextShim(otel_context) # Verify exception for non supported binary format. with self.assertRaises(opentracing.UnsupportedFormatException): self.shim.inject(context, opentracing.Format.BINARY, bytearray()) def test_extract_http_headers(self): """Test `extract()` method for Format.HTTP_HEADERS.""" carrier = { MockTextMapPropagator.TRACE_ID_KEY: 1220, MockTextMapPropagator.SPAN_ID_KEY: 7478, } ctx = self.shim.extract(opentracing.Format.HTTP_HEADERS, carrier) self.assertEqual(ctx.unwrap().trace_id, 1220) self.assertEqual(ctx.unwrap().span_id, 7478) def test_extract_empty_context_returns_invalid_context(self): """In the case where the propagator cannot extract a SpanContext, extract should return and invalid span context. """ _old_propagator = get_global_textmap() set_global_textmap(NOOPTextMapPropagator()) try: carrier = {} ctx = self.shim.extract(opentracing.Format.HTTP_HEADERS, carrier) self.assertEqual(ctx.unwrap(), trace.INVALID_SPAN_CONTEXT) finally: set_global_textmap(_old_propagator) def test_extract_text_map(self): """Test `extract()` method for Format.TEXT_MAP.""" carrier = { MockTextMapPropagator.TRACE_ID_KEY: 1220, MockTextMapPropagator.SPAN_ID_KEY: 7478, } ctx = self.shim.extract(opentracing.Format.TEXT_MAP, carrier) self.assertEqual(ctx.unwrap().trace_id, 1220) self.assertEqual(ctx.unwrap().span_id, 7478) def test_extract_binary(self): """Test `extract()` method for Format.BINARY.""" # Verify exception for non supported binary format. with self.assertRaises(opentracing.UnsupportedFormatException): self.shim.extract(opentracing.Format.BINARY, bytearray()) def test_baggage(self): span_context_shim = SpanContextShim( trace.SpanContext(1234, 5678, is_remote=False) ) baggage = span_context_shim.baggage with self.assertRaises(ValueError): baggage[1] = 3 span_shim = SpanShim(Mock(), span_context_shim, Mock()) span_shim.set_baggage_item(1, 2) self.assertTrue(span_shim.get_baggage_item(1), 2) def test_active(self): """Test that the active property and start_active_span return the same object""" # Verify no span is currently active. self.assertIsNone(self.shim.active_span) with self.shim.start_active_span("TestSpan15") as scope: # Verify span is active. self.assertEqual( self.shim.active_span.context.unwrap(), scope.span.context.unwrap(), ) self.assertIs(self.shim.scope_manager.active, scope) # Verify no span is active. self.assertIsNone(self.shim.active_span) def test_mixed_mode(self): """Test that span parent-child relationship is kept between OpenTelemetry and the OpenTracing shim""" span_shim = self.shim.start_span("TestSpan16") with self.shim.scope_manager.activate(span_shim, finish_on_close=True): with ( TracerProvider() .get_tracer(__name__) .start_as_current_span("abc") ) as opentelemetry_span: self.assertIs( span_shim.unwrap().context, opentelemetry_span.parent, ) with ( TracerProvider().get_tracer(__name__).start_as_current_span("abc") ) as opentelemetry_span: with self.shim.start_active_span("TestSpan17") as scope: self.assertIs( scope.span.unwrap().parent, opentelemetry_span.context, ) python-opentelemetry-1.39.1/shim/opentelemetry-opentracing-shim/tests/test_util.py000066400000000000000000000045601511654350100306570ustar00rootroot00000000000000# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from time import time, time_ns from unittest import TestCase from opentelemetry.shim.opentracing_shim.util import ( DEFAULT_EVENT_NAME, event_name_from_kv, time_seconds_from_ns, time_seconds_to_ns, ) class TestUtil(TestCase): def test_event_name_from_kv(self): # Test basic behavior. event_name = "send HTTP request" res = event_name_from_kv({"event": event_name, "foo": "bar"}) self.assertEqual(res, event_name) # Test None. res = event_name_from_kv(None) self.assertEqual(res, DEFAULT_EVENT_NAME) # Test empty dict. res = event_name_from_kv({}) self.assertEqual(res, DEFAULT_EVENT_NAME) # Test missing `event` field. res = event_name_from_kv({"foo": "bar"}) self.assertEqual(res, DEFAULT_EVENT_NAME) def test_time_seconds_to_ns(self): time_seconds = time() result = time_seconds_to_ns(time_seconds) self.assertEqual(result, int(time_seconds * 1e9)) def test_time_seconds_from_ns(self): time_nanoseconds = time_ns() result = time_seconds_from_ns(time_nanoseconds) self.assertEqual(result, time_nanoseconds / 1e9) def test_time_conversion_precision(self): """Verify time conversion from seconds to nanoseconds and vice versa is accurate enough. """ time_seconds = 1570484241.9501917 time_nanoseconds = time_seconds_to_ns(time_seconds) result = time_seconds_from_ns(time_nanoseconds) # Tolerate inaccuracies of less than a microsecond. # TODO: Put a link to an explanation in the docs. # TODO: This seems to work consistently, but we should find out the # biggest possible loss of precision. self.assertAlmostEqual(result, time_seconds, places=6) python-opentelemetry-1.39.1/shim/opentelemetry-opentracing-shim/tests/testbed/000077500000000000000000000000001511654350100277165ustar00rootroot00000000000000python-opentelemetry-1.39.1/shim/opentelemetry-opentracing-shim/tests/testbed/README.rst000066400000000000000000000030531511654350100314060ustar00rootroot00000000000000 Testbed suite for the OpenTelemetry-OpenTracing Bridge ====================================================== Testbed suite designed to test the API changes. Build and test. --------------- .. code-block:: sh tox -e py311-test-opentracing-shim Alternatively, due to the organization of the suite, it's possible to run directly the tests using ``py.test``\ : .. code-block:: sh py.test -s testbed/test_multiple_callbacks/test_threads.py Tested frameworks ----------------- Currently the examples cover ``threading`` and ``asyncio``. List of patterns ---------------- * `Active Span replacement `_ - Start an isolated task and query for its results in another task/thread. * `Client-Server `_ - Typical client-server example. * `Common Request Handler `_ - One request handler for all requests. * `Late Span finish `_ - Late parent ``Span`` finish. * `Multiple callbacks `_ - Multiple callbacks spawned at the same time. * `Nested callbacks `_ - One callback at a time, defined in a pipeline fashion. * `Subtask Span propagation `_ - ``Span`` propagation for subtasks/coroutines. Adding new patterns ------------------- A new pattern is composed of a directory under *testbed* with the *test_* prefix, and containing the files for each platform, also with the *test_* prefix: .. code-block:: testbed/ test_new_pattern/ test_threads.py test_asyncio.py python-opentelemetry-1.39.1/shim/opentelemetry-opentracing-shim/tests/testbed/__init__.py000066400000000000000000000000001511654350100320150ustar00rootroot00000000000000python-opentelemetry-1.39.1/shim/opentelemetry-opentracing-shim/tests/testbed/otel_ot_shim_tracer.py000066400000000000000000000016561511654350100343250ustar00rootroot00000000000000import opentelemetry.shim.opentracing_shim as opentracingshim from opentelemetry.sdk import trace from opentelemetry.sdk.trace.export import SimpleSpanProcessor from opentelemetry.sdk.trace.export.in_memory_span_exporter import ( InMemorySpanExporter, ) class MockTracer(opentracingshim.TracerShim): """Wrapper of `opentracingshim.TracerShim`. MockTracer extends `opentracingshim.TracerShim` by adding a in memory span exporter that can be used to get the list of finished spans.""" def __init__(self): tracer_provider = trace.TracerProvider() oteltracer = tracer_provider.get_tracer(__name__) super().__init__(oteltracer) exporter = InMemorySpanExporter() span_processor = SimpleSpanProcessor(exporter) tracer_provider.add_span_processor(span_processor) self.exporter = exporter def finished_spans(self): return self.exporter.get_finished_spans() test_active_span_replacement/000077500000000000000000000000001511654350100355515ustar00rootroot00000000000000python-opentelemetry-1.39.1/shim/opentelemetry-opentracing-shim/tests/testbedREADME.rst000066400000000000000000000012471511654350100372440ustar00rootroot00000000000000python-opentelemetry-1.39.1/shim/opentelemetry-opentracing-shim/tests/testbed/test_active_span_replacement Active Span replacement example. ================================ This example shows a ``Span`` being created and then passed to an asynchronous task, which will temporary activate it to finish its processing, and further restore the previously active ``Span``. ``threading`` implementation: .. code-block:: python # Create a new Span for this task with self.tracer.start_active_span("task"): with self.tracer.scope_manager.activate(span, True): # Simulate work strictly related to the initial Span pass # Use the task span as parent of a new subtask with self.tracer.start_active_span("subtask"): pass __init__.py000066400000000000000000000000001511654350100376500ustar00rootroot00000000000000python-opentelemetry-1.39.1/shim/opentelemetry-opentracing-shim/tests/testbed/test_active_span_replacementtest_asyncio.py000066400000000000000000000044641511654350100406370ustar00rootroot00000000000000python-opentelemetry-1.39.1/shim/opentelemetry-opentracing-shim/tests/testbed/test_active_span_replacement# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import asyncio # pylint: disable=import-error from ..otel_ot_shim_tracer import MockTracer from ..testcase import OpenTelemetryTestCase from ..utils import stop_loop_when class TestAsyncio(OpenTelemetryTestCase): def setUp(self): # pylint: disable=invalid-name self.tracer = MockTracer() self.loop = asyncio.get_event_loop() def test_main(self): # Start an isolated task and query for its result -and finish it- # in another task/thread span = self.tracer.start_span("initial") self.submit_another_task(span) stop_loop_when( self.loop, lambda: len(self.tracer.finished_spans()) >= 3, timeout=5.0, ) self.loop.run_forever() spans = self.tracer.finished_spans() self.assertEqual(len(spans), 3) self.assertNamesEqual(spans, ["initial", "subtask", "task"]) # task/subtask are part of the same trace, # and subtask is a child of task self.assertSameTrace(spans[1], spans[2]) self.assertIsChildOf(spans[1], spans[2]) # initial task is not related in any way to those two tasks self.assertNotSameTrace(spans[0], spans[1]) self.assertEqual(spans[0].parent, None) async def task(self, span): # Create a new Span for this task with self.tracer.start_active_span("task"): with self.tracer.scope_manager.activate(span, True): # Simulate work strictly related to the initial Span pass # Use the task span as parent of a new subtask with self.tracer.start_active_span("subtask"): pass def submit_another_task(self, span): self.loop.create_task(self.task(span)) test_threads.py000066400000000000000000000044611511654350100406210ustar00rootroot00000000000000python-opentelemetry-1.39.1/shim/opentelemetry-opentracing-shim/tests/testbed/test_active_span_replacement# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from concurrent.futures import ThreadPoolExecutor # pylint: disable=import-error from ..otel_ot_shim_tracer import MockTracer from ..testcase import OpenTelemetryTestCase class TestThreads(OpenTelemetryTestCase): def setUp(self): # pylint: disable=invalid-name self.tracer = MockTracer() # use max_workers=3 as a general example even if only one would suffice self.executor = ThreadPoolExecutor(max_workers=3) def test_main(self): # Start an isolated task and query for its result -and finish it- # in another task/thread span = self.tracer.start_span("initial") self.submit_another_task(span) self.executor.shutdown(True) spans = self.tracer.finished_spans() self.assertEqual(len(spans), 3) self.assertNamesEqual(spans, ["initial", "subtask", "task"]) # task/subtask are part of the same trace, # and subtask is a child of task self.assertSameTrace(spans[1], spans[2]) self.assertIsChildOf(spans[1], spans[2]) # initial task is not related in any way to those two tasks self.assertNotSameTrace(spans[0], spans[1]) self.assertEqual(spans[0].parent, None) self.assertEqual(spans[2].parent, None) def task(self, span): # Create a new Span for this task with self.tracer.start_active_span("task"): with self.tracer.scope_manager.activate(span, True): # Simulate work strictly related to the initial Span pass # Use the task span as parent of a new subtask with self.tracer.start_active_span("subtask"): pass def submit_another_task(self, span): self.executor.submit(self.task, span) python-opentelemetry-1.39.1/shim/opentelemetry-opentracing-shim/tests/testbed/test_client_server/000077500000000000000000000000001511654350100336215ustar00rootroot00000000000000README.rst000066400000000000000000000015061511654350100352330ustar00rootroot00000000000000python-opentelemetry-1.39.1/shim/opentelemetry-opentracing-shim/tests/testbed/test_client_server Client-Server example. ====================== This example shows a ``Span`` created by a ``Client``, which will send a ``Message`` / ``SpanContext`` to a ``Server``, which will in turn extract such context and use it as parent of a new (server-side) ``Span``. ``Client.send()`` is used to send messages and inject the ``SpanContext`` using the ``TEXT_MAP`` format, and ``Server.process()`` will process received messages and will extract the context used as parent. .. code-block:: python def send(self): with self.tracer.start_active_span("send") as scope: scope.span.set_tag(tags.SPAN_KIND, tags.SPAN_KIND_RPC_CLIENT) message = {} self.tracer.inject(scope.span.context, opentracing.Format.TEXT_MAP, message) self.queue.put(message) __init__.py000066400000000000000000000000001511654350100356410ustar00rootroot00000000000000python-opentelemetry-1.39.1/shim/opentelemetry-opentracing-shim/tests/testbed/test_client_servertest_asyncio.py000066400000000000000000000055371511654350100366320ustar00rootroot00000000000000python-opentelemetry-1.39.1/shim/opentelemetry-opentracing-shim/tests/testbed/test_client_server# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import asyncio import opentracing from opentracing.ext import tags # pylint: disable=import-error from ..otel_ot_shim_tracer import MockTracer from ..testcase import OpenTelemetryTestCase from ..utils import get_logger, get_one_by_tag, stop_loop_when logger = get_logger(__name__) class Server: def __init__(self, *args, **kwargs): tracer = kwargs.pop("tracer") queue = kwargs.pop("queue") super().__init__(*args, **kwargs) self.tracer = tracer self.queue = queue async def run(self): value = await self.queue.get() self.process(value) def process(self, message): logger.info("Processing message in server") ctx = self.tracer.extract(opentracing.Format.TEXT_MAP, message) with self.tracer.start_active_span("receive", child_of=ctx) as scope: scope.span.set_tag(tags.SPAN_KIND, tags.SPAN_KIND_RPC_SERVER) class Client: def __init__(self, tracer, queue): self.tracer = tracer self.queue = queue async def send(self): with self.tracer.start_active_span("send") as scope: scope.span.set_tag(tags.SPAN_KIND, tags.SPAN_KIND_RPC_CLIENT) message = {} self.tracer.inject( scope.span.context, opentracing.Format.TEXT_MAP, message ) await self.queue.put(message) logger.info("Sent message from client") class TestAsyncio(OpenTelemetryTestCase): def setUp(self): # pylint: disable=invalid-name self.tracer = MockTracer() self.queue = asyncio.Queue() self.loop = asyncio.get_event_loop() self.server = Server(tracer=self.tracer, queue=self.queue) def test(self): client = Client(self.tracer, self.queue) self.loop.create_task(self.server.run()) self.loop.create_task(client.send()) stop_loop_when( self.loop, lambda: len(self.tracer.finished_spans()) >= 2, timeout=5.0, ) self.loop.run_forever() spans = self.tracer.finished_spans() self.assertIsNotNone( get_one_by_tag(spans, tags.SPAN_KIND, tags.SPAN_KIND_RPC_SERVER) ) self.assertIsNotNone( get_one_by_tag(spans, tags.SPAN_KIND, tags.SPAN_KIND_RPC_CLIENT) ) test_threads.py000066400000000000000000000053021511654350100366050ustar00rootroot00000000000000python-opentelemetry-1.39.1/shim/opentelemetry-opentracing-shim/tests/testbed/test_client_server# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from queue import Queue from threading import Thread import opentracing from opentracing.ext import tags # pylint: disable=import-error from ..otel_ot_shim_tracer import MockTracer from ..testcase import OpenTelemetryTestCase from ..utils import await_until, get_logger, get_one_by_tag logger = get_logger(__name__) class Server(Thread): def __init__(self, *args, **kwargs): tracer = kwargs.pop("tracer") queue = kwargs.pop("queue") super().__init__(*args, **kwargs) self.daemon = True self.tracer = tracer self.queue = queue def run(self): value = self.queue.get() self.process(value) def process(self, message): logger.info("Processing message in server") ctx = self.tracer.extract(opentracing.Format.TEXT_MAP, message) with self.tracer.start_active_span("receive", child_of=ctx) as scope: scope.span.set_tag(tags.SPAN_KIND, tags.SPAN_KIND_RPC_SERVER) class Client: def __init__(self, tracer, queue): self.tracer = tracer self.queue = queue def send(self): with self.tracer.start_active_span("send") as scope: scope.span.set_tag(tags.SPAN_KIND, tags.SPAN_KIND_RPC_CLIENT) message = {} self.tracer.inject( scope.span.context, opentracing.Format.TEXT_MAP, message ) self.queue.put(message) logger.info("Sent message from client") class TestThreads(OpenTelemetryTestCase): def setUp(self): # pylint: disable=invalid-name self.tracer = MockTracer() self.queue = Queue() self.server = Server(tracer=self.tracer, queue=self.queue) self.server.start() def test(self): client = Client(self.tracer, self.queue) client.send() await_until(lambda: len(self.tracer.finished_spans()) >= 2) spans = self.tracer.finished_spans() self.assertIsNotNone( get_one_by_tag(spans, tags.SPAN_KIND, tags.SPAN_KIND_RPC_SERVER) ) self.assertIsNotNone( get_one_by_tag(spans, tags.SPAN_KIND, tags.SPAN_KIND_RPC_CLIENT) ) test_common_request_handler/000077500000000000000000000000001511654350100354335ustar00rootroot00000000000000python-opentelemetry-1.39.1/shim/opentelemetry-opentracing-shim/tests/testbedREADME.rst000066400000000000000000000016271511654350100371300ustar00rootroot00000000000000python-opentelemetry-1.39.1/shim/opentelemetry-opentracing-shim/tests/testbed/test_common_request_handler Common Request Handler example. =============================== This example shows a ``Span`` used with ``RequestHandler``, which is used as a middleware (as in web frameworks) to manage a new ``Span`` per operation through its ``before_request()`` / ``after_response()`` methods. Implementation details: * For ``threading``, no active ``Span`` is consumed as the tasks may be run concurrently on different threads, and an explicit ``SpanContext`` has to be saved to be used as parent. RequestHandler implementation: .. code-block:: python def before_request(self, request, request_context): # If we should ignore the active Span, use any passed SpanContext # as the parent. Else, use the active one. span = self.tracer.start_span("send", child_of=self.context, ignore_active_span=True) __init__.py000066400000000000000000000000001511654350100375320ustar00rootroot00000000000000python-opentelemetry-1.39.1/shim/opentelemetry-opentracing-shim/tests/testbed/test_common_request_handlerrequest_handler.py000066400000000000000000000033001511654350100411660ustar00rootroot00000000000000python-opentelemetry-1.39.1/shim/opentelemetry-opentracing-shim/tests/testbed/test_common_request_handler# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from opentracing.ext import tags # pylint: disable=import-error from ..utils import get_logger logger = get_logger(__name__) class RequestHandler: def __init__(self, tracer, context=None, ignore_active_span=True): self.tracer = tracer self.context = context self.ignore_active_span = ignore_active_span def before_request(self, request, request_context): logger.info("Before request %s", request) # If we should ignore the active Span, use any passed SpanContext # as the parent. Else, use the active one. if self.ignore_active_span: span = self.tracer.start_span( "send", child_of=self.context, ignore_active_span=True ) else: span = self.tracer.start_span("send") span.set_tag(tags.SPAN_KIND, tags.SPAN_KIND_RPC_CLIENT) request_context["span"] = span def after_request(self, request, request_context): # pylint: disable=no-self-use logger.info("After request %s", request) span = request_context.get("span") if span is not None: span.finish() test_asyncio.py000066400000000000000000000121701511654350100405120ustar00rootroot00000000000000python-opentelemetry-1.39.1/shim/opentelemetry-opentracing-shim/tests/testbed/test_common_request_handler# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import asyncio from opentracing.ext import tags # pylint: disable=import-error from ..otel_ot_shim_tracer import MockTracer from ..testcase import OpenTelemetryTestCase from ..utils import get_logger, get_one_by_operation_name, stop_loop_when from .request_handler import RequestHandler logger = get_logger(__name__) class Client: def __init__(self, request_handler, loop): self.request_handler = request_handler self.loop = loop async def send_task(self, message): request_context = {} async def before_handler(): self.request_handler.before_request(message, request_context) async def after_handler(): self.request_handler.after_request(message, request_context) await before_handler() await after_handler() return f"{message}::response" def send(self, message): return self.send_task(message) def send_sync(self, message): return self.loop.run_until_complete(self.send_task(message)) class TestAsyncio(OpenTelemetryTestCase): """ There is only one instance of 'RequestHandler' per 'Client'. Methods of 'RequestHandler' are executed in different Tasks, and no Span propagation among them is done automatically. Therefore we cannot use current active span and activate span. So one issue here is setting correct parent span. """ def setUp(self): # pylint: disable=invalid-name self.tracer = MockTracer() self.loop = asyncio.get_event_loop() self.client = Client(RequestHandler(self.tracer), self.loop) def test_two_callbacks(self): res_future1 = self.loop.create_task(self.client.send("message1")) res_future2 = self.loop.create_task(self.client.send("message2")) stop_loop_when( self.loop, lambda: len(self.tracer.finished_spans()) >= 2, timeout=5.0, ) self.loop.run_forever() self.assertEqual("message1::response", res_future1.result()) self.assertEqual("message2::response", res_future2.result()) spans = self.tracer.finished_spans() self.assertEqual(len(spans), 2) for span in spans: self.assertEqual( span.attributes.get(tags.SPAN_KIND, None), tags.SPAN_KIND_RPC_CLIENT, ) self.assertNotSameTrace(spans[0], spans[1]) self.assertIsNone(spans[0].parent) self.assertIsNone(spans[1].parent) def test_parent_not_picked(self): """Active parent should not be picked up by child.""" async def do_task(): with self.tracer.start_active_span("parent"): response = await self.client.send_task("no_parent") self.assertEqual("no_parent::response", response) self.loop.run_until_complete(do_task()) spans = self.tracer.finished_spans() self.assertEqual(len(spans), 2) child_span = get_one_by_operation_name(spans, "send") self.assertIsNotNone(child_span) parent_span = get_one_by_operation_name(spans, "parent") self.assertIsNotNone(parent_span) # Here check that there is no parent-child relation. self.assertIsNotChildOf(child_span, parent_span) def test_good_solution_to_set_parent(self): """Asyncio and contextvars are integrated, in this case it is not needed to activate current span by hand. """ async def do_task(): with self.tracer.start_active_span("parent"): # Set ignore_active_span to False indicating that the # framework will do it for us. req_handler = RequestHandler( self.tracer, ignore_active_span=False, ) client = Client(req_handler, self.loop) response = await client.send_task("correct_parent") self.assertEqual("correct_parent::response", response) # Send second request, now there is no active parent, # but it will be set, ups response = await client.send_task("wrong_parent") self.assertEqual("wrong_parent::response", response) self.loop.run_until_complete(do_task()) spans = self.tracer.finished_spans() self.assertEqual(len(spans), 3) parent_span = get_one_by_operation_name(spans, "parent") self.assertIsNotNone(parent_span) spans = [span for span in spans if span != parent_span] self.assertIsChildOf(spans[0], parent_span) self.assertIsNotChildOf(spans[1], parent_span) test_threads.py000066400000000000000000000113221511654350100404750ustar00rootroot00000000000000python-opentelemetry-1.39.1/shim/opentelemetry-opentracing-shim/tests/testbed/test_common_request_handler# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from concurrent.futures import ThreadPoolExecutor from opentracing.ext import tags # pylint: disable=import-error from ..otel_ot_shim_tracer import MockTracer from ..testcase import OpenTelemetryTestCase from ..utils import get_logger, get_one_by_operation_name from .request_handler import RequestHandler logger = get_logger(__name__) class Client: def __init__(self, request_handler, executor): self.request_handler = request_handler self.executor = executor def send_task(self, message): request_context = {} def before_handler(): self.request_handler.before_request(message, request_context) def after_handler(): self.request_handler.after_request(message, request_context) self.executor.submit(before_handler).result() self.executor.submit(after_handler).result() return f"{message}::response" def send(self, message): return self.executor.submit(self.send_task, message) def send_sync(self, message, timeout=5.0): fut = self.executor.submit(self.send_task, message) return fut.result(timeout=timeout) class TestThreads(OpenTelemetryTestCase): """ There is only one instance of 'RequestHandler' per 'Client'. Methods of 'RequestHandler' are executed concurrently in different threads which are reused (executor). Therefore we cannot use current active span and activate span. So one issue here is setting correct parent span. """ def setUp(self): # pylint: disable=invalid-name self.tracer = MockTracer() self.executor = ThreadPoolExecutor(max_workers=3) self.client = Client(RequestHandler(self.tracer), self.executor) def test_two_callbacks(self): response_future1 = self.client.send("message1") response_future2 = self.client.send("message2") self.assertEqual("message1::response", response_future1.result(5.0)) self.assertEqual("message2::response", response_future2.result(5.0)) spans = self.tracer.finished_spans() self.assertEqual(len(spans), 2) for span in spans: self.assertEqual( span.attributes.get(tags.SPAN_KIND, None), tags.SPAN_KIND_RPC_CLIENT, ) self.assertNotSameTrace(spans[0], spans[1]) self.assertIsNone(spans[0].parent) self.assertIsNone(spans[1].parent) def test_parent_not_picked(self): """Active parent should not be picked up by child.""" with self.tracer.start_active_span("parent"): response = self.client.send_sync("no_parent") self.assertEqual("no_parent::response", response) spans = self.tracer.finished_spans() self.assertEqual(len(spans), 2) child_span = get_one_by_operation_name(spans, "send") self.assertIsNotNone(child_span) parent_span = get_one_by_operation_name(spans, "parent") self.assertIsNotNone(parent_span) # Here check that there is no parent-child relation. self.assertIsNotChildOf(child_span, parent_span) def test_bad_solution_to_set_parent(self): """Solution is bad because parent is per client and is not automatically activated depending on the context. """ with self.tracer.start_active_span("parent") as scope: client = Client( # Pass a span context to be used ad the parent. RequestHandler(self.tracer, scope.span.context), self.executor, ) response = client.send_sync("correct_parent") self.assertEqual("correct_parent::response", response) response = client.send_sync("wrong_parent") self.assertEqual("wrong_parent::response", response) spans = self.tracer.finished_spans() self.assertEqual(len(spans), 3) spans = sorted(spans, key=lambda x: x.start_time) parent_span = get_one_by_operation_name(spans, "parent") self.assertIsNotNone(parent_span) spans = [s for s in spans if s != parent_span] self.assertEqual(len(spans), 2) for span in spans: self.assertIsChildOf(span, parent_span) python-opentelemetry-1.39.1/shim/opentelemetry-opentracing-shim/tests/testbed/test_late_span_finish/000077500000000000000000000000001511654350100342635ustar00rootroot00000000000000README.rst000066400000000000000000000013621511654350100356750ustar00rootroot00000000000000python-opentelemetry-1.39.1/shim/opentelemetry-opentracing-shim/tests/testbed/test_late_span_finish Late Span finish example. ========================= This example shows a ``Span`` for a top-level operation, with independent, unknown lifetime, acting as parent of a few asynchronous subtasks (which must re-activate it but not finish it). .. code-block:: python # Fire away a few subtasks, passing a parent Span whose lifetime # is not tied at all to the children. def submit_subtasks(self, parent_span): def task(name, interval): with self.tracer.scope_manager.activate(parent_span, False): with self.tracer.start_active_span(name): time.sleep(interval) self.executor.submit(task, "task1", 0.1) self.executor.submit(task, "task2", 0.3) __init__.py000066400000000000000000000000001511654350100363030ustar00rootroot00000000000000python-opentelemetry-1.39.1/shim/opentelemetry-opentracing-shim/tests/testbed/test_late_span_finishtest_asyncio.py000066400000000000000000000043621511654350100372670ustar00rootroot00000000000000python-opentelemetry-1.39.1/shim/opentelemetry-opentracing-shim/tests/testbed/test_late_span_finish# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import asyncio # pylint: disable=import-error from ..otel_ot_shim_tracer import MockTracer from ..testcase import OpenTelemetryTestCase from ..utils import get_logger, stop_loop_when logger = get_logger(__name__) class TestAsyncio(OpenTelemetryTestCase): def setUp(self): # pylint: disable=invalid-name self.tracer = MockTracer() self.loop = asyncio.get_event_loop() def test_main(self): # Create a Span and use it as (explicit) parent of a pair of subtasks. parent_span = self.tracer.start_span("parent") self.submit_subtasks(parent_span) stop_loop_when( self.loop, lambda: len(self.tracer.finished_spans()) >= 2, timeout=5.0, ) self.loop.run_forever() # Late-finish the parent Span now. parent_span.finish() spans = self.tracer.finished_spans() self.assertEqual(len(spans), 3) self.assertNamesEqual(spans, ["task1", "task2", "parent"]) for idx in range(2): self.assertSameTrace(spans[idx], spans[-1]) self.assertIsChildOf(spans[idx], spans[-1]) self.assertTrue(spans[idx].end_time <= spans[-1].end_time) # Fire away a few subtasks, passing a parent Span whose lifetime # is not tied at all to the children. def submit_subtasks(self, parent_span): async def task(name): logger.info("Running %s", name) with self.tracer.scope_manager.activate(parent_span, False): with self.tracer.start_active_span(name): await asyncio.sleep(0.1) self.loop.create_task(task("task1")) self.loop.create_task(task("task2")) test_threads.py000066400000000000000000000041411511654350100372470ustar00rootroot00000000000000python-opentelemetry-1.39.1/shim/opentelemetry-opentracing-shim/tests/testbed/test_late_span_finish# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import time from concurrent.futures import ThreadPoolExecutor # pylint: disable=import-error from ..otel_ot_shim_tracer import MockTracer from ..testcase import OpenTelemetryTestCase class TestThreads(OpenTelemetryTestCase): def setUp(self): # pylint: disable=invalid-name self.tracer = MockTracer() self.executor = ThreadPoolExecutor(max_workers=3) def test_main(self): # Create a Span and use it as (explicit) parent of a pair of subtasks. parent_span = self.tracer.start_span("parent") self.submit_subtasks(parent_span) # Wait for the threadpool to be done. self.executor.shutdown(True) # Late-finish the parent Span now. parent_span.finish() spans = self.tracer.finished_spans() self.assertEqual(len(spans), 3) self.assertNamesEqual(spans, ["task1", "task2", "parent"]) for idx in range(2): self.assertSameTrace(spans[idx], spans[-1]) self.assertIsChildOf(spans[idx], spans[-1]) self.assertTrue(spans[idx].end_time <= spans[-1].end_time) # Fire away a few subtasks, passing a parent Span whose lifetime # is not tied at all to the children. def submit_subtasks(self, parent_span): def task(name, interval): with self.tracer.scope_manager.activate(parent_span, False): with self.tracer.start_active_span(name): time.sleep(interval) self.executor.submit(task, "task1", 0.1) self.executor.submit(task, "task2", 0.3) test_listener_per_request/000077500000000000000000000000001511654350100351415ustar00rootroot00000000000000python-opentelemetry-1.39.1/shim/opentelemetry-opentracing-shim/tests/testbedREADME.rst000066400000000000000000000013221511654350100366260ustar00rootroot00000000000000python-opentelemetry-1.39.1/shim/opentelemetry-opentracing-shim/tests/testbed/test_listener_per_request Listener Response example. ========================== This example shows a ``Span`` created upon a message being sent to a ``Client``, and its handling along a related, **not shared** ``ResponseListener`` object with a ``on_response(self, response)`` method to finish it. .. code-block:: python def _task(self, message, listener): res = "%s::response" % message listener.on_response(res) return res def send_sync(self, message): span = self.tracer.start_span("send") span.set_tag(tags.SPAN_KIND, tags.SPAN_KIND_RPC_CLIENT) listener = ResponseListener(span) return self.executor.submit(self._task, message, listener).result() __init__.py000066400000000000000000000000001511654350100372400ustar00rootroot00000000000000python-opentelemetry-1.39.1/shim/opentelemetry-opentracing-shim/tests/testbed/test_listener_per_requestresponse_listener.py000066400000000000000000000002331511654350100412540ustar00rootroot00000000000000python-opentelemetry-1.39.1/shim/opentelemetry-opentracing-shim/tests/testbed/test_listener_per_requestclass ResponseListener: def __init__(self, span): self.span = span def on_response(self, res): del res self.span.finish() test_asyncio.py000066400000000000000000000035111511654350100402170ustar00rootroot00000000000000python-opentelemetry-1.39.1/shim/opentelemetry-opentracing-shim/tests/testbed/test_listener_per_request# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import asyncio from opentracing.ext import tags # pylint: disable=import-error from ..otel_ot_shim_tracer import MockTracer from ..testcase import OpenTelemetryTestCase from ..utils import get_one_by_tag from .response_listener import ResponseListener async def task(message, listener): res = f"{message}::response" listener.on_response(res) return res class Client: def __init__(self, tracer, loop): self.tracer = tracer self.loop = loop def send_sync(self, message): span = self.tracer.start_span("send") span.set_tag(tags.SPAN_KIND, tags.SPAN_KIND_RPC_CLIENT) listener = ResponseListener(span) return self.loop.run_until_complete(task(message, listener)) class TestAsyncio(OpenTelemetryTestCase): def setUp(self): # pylint: disable=invalid-name self.tracer = MockTracer() self.loop = asyncio.get_event_loop() def test_main(self): client = Client(self.tracer, self.loop) res = client.send_sync("message") self.assertEqual(res, "message::response") spans = self.tracer.finished_spans() self.assertEqual(len(spans), 1) span = get_one_by_tag(spans, tags.SPAN_KIND, tags.SPAN_KIND_RPC_CLIENT) self.assertIsNotNone(span) test_threads.py000066400000000000000000000036141511654350100402100ustar00rootroot00000000000000python-opentelemetry-1.39.1/shim/opentelemetry-opentracing-shim/tests/testbed/test_listener_per_request# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from concurrent.futures import ThreadPoolExecutor from opentracing.ext import tags # pylint: disable=import-error from ..otel_ot_shim_tracer import MockTracer from ..testcase import OpenTelemetryTestCase from ..utils import get_one_by_tag from .response_listener import ResponseListener class Client: def __init__(self, tracer): self.tracer = tracer self.executor = ThreadPoolExecutor(max_workers=3) def _task(self, message, listener): # pylint: disable=no-self-use res = f"{message}::response" listener.on_response(res) return res def send_sync(self, message): span = self.tracer.start_span("send") span.set_tag(tags.SPAN_KIND, tags.SPAN_KIND_RPC_CLIENT) listener = ResponseListener(span) return self.executor.submit(self._task, message, listener).result() class TestThreads(OpenTelemetryTestCase): def setUp(self): # pylint: disable=invalid-name self.tracer = MockTracer() def test_main(self): client = Client(self.tracer) res = client.send_sync("message") self.assertEqual(res, "message::response") spans = self.tracer.finished_spans() self.assertEqual(len(spans), 1) span = get_one_by_tag(spans, tags.SPAN_KIND, tags.SPAN_KIND_RPC_CLIENT) self.assertIsNotNone(span) test_multiple_callbacks/000077500000000000000000000000001511654350100345305ustar00rootroot00000000000000python-opentelemetry-1.39.1/shim/opentelemetry-opentracing-shim/tests/testbedREADME.rst000066400000000000000000000032721511654350100362230ustar00rootroot00000000000000python-opentelemetry-1.39.1/shim/opentelemetry-opentracing-shim/tests/testbed/test_multiple_callbacks Multiple callbacks example. =========================== This example shows a ``Span`` created for a top-level operation, covering a set of asynchronous operations (representing callbacks), and have this ``Span`` finished when **all** of them have been executed. ``Client.send()`` is used to create a new asynchronous operation (callback), and in turn every operation both restores the active ``Span``, and creates a child ``Span`` (useful for measuring the performance of each callback). Implementation details: * For ``threading``, a thread-safe counter is put in each ``Span`` to keep track of the pending callbacks, and call ``Span.finish()`` when the count becomes 0. * For ``asyncio`` the children corotuines representing the subtasks are simply yielded over, so no counter is needed. ``threading`` implementation: .. code-block:: python def task(self, interval, parent_span): logger.info("Starting task") try: scope = self.tracer.scope_manager.activate(parent_span, False) with self.tracer.start_active_span("task"): time.sleep(interval) finally: scope.close() if parent_span._ref_count.decr() == 0: parent_span.finish() ``asyncio`` implementation: .. code-block:: python async def task(self, interval, parent_span): logger.info("Starting task") with self.tracer.start_active_span("task"): await asyncio.sleep(interval) # Invoke and yield over the coroutines. with self.tracer.start_active_span("parent"): tasks = self.submit_callbacks() await asyncio.gather(*tasks) __init__.py000066400000000000000000000000001511654350100366270ustar00rootroot00000000000000python-opentelemetry-1.39.1/shim/opentelemetry-opentracing-shim/tests/testbed/test_multiple_callbackstest_asyncio.py000066400000000000000000000045641511654350100376170ustar00rootroot00000000000000python-opentelemetry-1.39.1/shim/opentelemetry-opentracing-shim/tests/testbed/test_multiple_callbacks# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import asyncio import random # pylint: disable=import-error from ..otel_ot_shim_tracer import MockTracer from ..testcase import OpenTelemetryTestCase from ..utils import get_logger, stop_loop_when random.seed() logger = get_logger(__name__) class TestAsyncio(OpenTelemetryTestCase): def setUp(self): # pylint: disable=invalid-name self.tracer = MockTracer() self.loop = asyncio.get_event_loop() def test_main(self): # Need to run within a Task, as the scope manager depends # on Task.current_task() async def main_task(): with self.tracer.start_active_span("parent"): tasks = self.submit_callbacks() await asyncio.gather(*tasks) self.loop.create_task(main_task()) stop_loop_when( self.loop, lambda: len(self.tracer.finished_spans()) >= 4, timeout=5.0, ) self.loop.run_forever() spans = self.tracer.finished_spans() self.assertEqual(len(spans), 4) self.assertNamesEqual(spans, ["task", "task", "task", "parent"]) for idx in range(3): self.assertSameTrace(spans[idx], spans[-1]) self.assertIsChildOf(spans[idx], spans[-1]) async def task(self, interval, parent_span): logger.info("Starting task") with self.tracer.scope_manager.activate(parent_span, False): with self.tracer.start_active_span("task"): await asyncio.sleep(interval) def submit_callbacks(self): parent_span = self.tracer.scope_manager.active.span tasks = [] for _ in range(3): interval = 0.1 + random.randint(200, 500) * 0.001 task = self.loop.create_task(self.task(interval, parent_span)) tasks.append(task) return tasks test_threads.py000066400000000000000000000045721511654350100376030ustar00rootroot00000000000000python-opentelemetry-1.39.1/shim/opentelemetry-opentracing-shim/tests/testbed/test_multiple_callbacks# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import random import time from concurrent.futures import ThreadPoolExecutor # pylint: disable=import-error from ..otel_ot_shim_tracer import MockTracer from ..testcase import OpenTelemetryTestCase from ..utils import RefCount, get_logger random.seed() logger = get_logger(__name__) class TestThreads(OpenTelemetryTestCase): def setUp(self): # pylint: disable=invalid-name self.tracer = MockTracer() self.executor = ThreadPoolExecutor(max_workers=3) def test_main(self): try: scope = self.tracer.start_active_span( "parent", finish_on_close=False ) scope.span.ref_count = RefCount(1) self.submit_callbacks(scope.span) finally: scope.close() if scope.span.ref_count.decr() == 0: scope.span.finish() self.executor.shutdown(True) spans = self.tracer.finished_spans() self.assertEqual(len(spans), 4) self.assertNamesEqual(spans, ["task", "task", "task", "parent"]) for idx in range(3): self.assertSameTrace(spans[idx], spans[-1]) self.assertIsChildOf(spans[idx], spans[-1]) def task(self, interval, parent_span): logger.info("Starting task") scope = None try: scope = self.tracer.scope_manager.activate(parent_span, False) with self.tracer.start_active_span("task"): time.sleep(interval) finally: scope.close() if parent_span.ref_count.decr() == 0: parent_span.finish() def submit_callbacks(self, parent_span): for _ in range(3): parent_span.ref_count.incr() self.executor.submit( self.task, 0.1 + random.randint(200, 500) * 0.001, parent_span ) python-opentelemetry-1.39.1/shim/opentelemetry-opentracing-shim/tests/testbed/test_nested_callbacks/000077500000000000000000000000001511654350100342365ustar00rootroot00000000000000README.rst000066400000000000000000000027311511654350100356510ustar00rootroot00000000000000python-opentelemetry-1.39.1/shim/opentelemetry-opentracing-shim/tests/testbed/test_nested_callbacks Nested callbacks example. ========================= This example shows a ``Span`` for a top-level operation, and how it can be passed down on a list of nested callbacks (always one at a time), have it as the active one for each of them, and finished **only** when the last one executes. For Python, we have decided to do it in a **fire-and-forget** fashion. Implementation details: * For ``threading``, the ``Span`` is manually activated in each coroutine/task. * For ``asyncio``, the active ``Span`` is not activated down the chain as the ``Context`` automatically propagates it. ``threading`` implementation: .. code-block:: python def submit(self): span = self.tracer.scope_manager.active.span def task1(): with self.tracer.scope_manager.activate(span, False): span.set_tag("key1", "1") def task2(): with self.tracer.scope_manager.activate(span, False): span.set_tag("key2", "2") ... ``asyncio`` implementation: .. code-block:: python async def task1(): span.set_tag("key1", "1") async def task2(): span.set_tag("key2", "2") async def task3(): span.set_tag("key3", "3") span.finish() self.loop.create_task(task3()) self.loop.create_task(task2()) self.loop.create_task(task1()) __init__.py000066400000000000000000000000001511654350100362560ustar00rootroot00000000000000python-opentelemetry-1.39.1/shim/opentelemetry-opentracing-shim/tests/testbed/test_nested_callbackstest_asyncio.py000066400000000000000000000041371511654350100372420ustar00rootroot00000000000000python-opentelemetry-1.39.1/shim/opentelemetry-opentracing-shim/tests/testbed/test_nested_callbacks# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import asyncio # pylint: disable=import-error from ..otel_ot_shim_tracer import MockTracer from ..testcase import OpenTelemetryTestCase from ..utils import stop_loop_when class TestAsyncio(OpenTelemetryTestCase): def setUp(self): # pylint: disable=invalid-name self.tracer = MockTracer() self.loop = asyncio.get_event_loop() def test_main(self): # Start a Span and let the callback-chain # finish it when the task is done async def task(): with self.tracer.start_active_span("one", finish_on_close=False): self.submit() self.loop.create_task(task()) stop_loop_when( self.loop, lambda: len(self.tracer.finished_spans()) == 1, timeout=5.0, ) self.loop.run_forever() spans = self.tracer.finished_spans() self.assertEqual(len(spans), 1) self.assertEqual(spans[0].name, "one") for idx in range(1, 4): self.assertEqual( spans[0].attributes.get(f"key{idx}", None), str(idx) ) def submit(self): span = self.tracer.scope_manager.active.span async def task1(): span.set_tag("key1", "1") async def task2(): span.set_tag("key2", "2") async def task3(): span.set_tag("key3", "3") span.finish() self.loop.create_task(task3()) self.loop.create_task(task2()) self.loop.create_task(task1()) test_threads.py000066400000000000000000000047731511654350100372350ustar00rootroot00000000000000python-opentelemetry-1.39.1/shim/opentelemetry-opentracing-shim/tests/testbed/test_nested_callbacks# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from concurrent.futures import ThreadPoolExecutor # pylint: disable=import-error from ..otel_ot_shim_tracer import MockTracer from ..testcase import OpenTelemetryTestCase from ..utils import await_until class TestThreads(OpenTelemetryTestCase): def setUp(self): # pylint: disable=invalid-name self.tracer = MockTracer() self.executor = ThreadPoolExecutor(max_workers=3) def tearDown(self): # pylint: disable=invalid-name self.executor.shutdown(False) def test_main(self): # Start a Span and let the callback-chain # finish it when the task is done with self.tracer.start_active_span("one", finish_on_close=False): self.submit() # Cannot shutdown the executor and wait for the callbacks # to be run, as in such case only the first will be executed, # and the rest will get canceled. await_until(lambda: len(self.tracer.finished_spans()) == 1, 5) spans = self.tracer.finished_spans() self.assertEqual(len(spans), 1) self.assertEqual(spans[0].name, "one") for idx in range(1, 4): self.assertEqual( spans[0].attributes.get(f"key{idx}", None), str(idx) ) def submit(self): span = self.tracer.scope_manager.active.span def task1(): with self.tracer.scope_manager.activate(span, False): span.set_tag("key1", "1") def task2(): with self.tracer.scope_manager.activate(span, False): span.set_tag("key2", "2") def task3(): with self.tracer.scope_manager.activate( span, True ): span.set_tag("key3", "3") self.executor.submit(task3) self.executor.submit(task2) self.executor.submit(task1) test_subtask_span_propagation/000077500000000000000000000000001511654350100357765ustar00rootroot00000000000000python-opentelemetry-1.39.1/shim/opentelemetry-opentracing-shim/tests/testbedREADME.rst000066400000000000000000000032021511654350100374620ustar00rootroot00000000000000python-opentelemetry-1.39.1/shim/opentelemetry-opentracing-shim/tests/testbed/test_subtask_span_propagation Subtask Span propagation example. ================================= This example shows an active ``Span`` being simply propagated to the subtasks -either threads or coroutines-, and finished **by** the parent task. In real-life scenarios instrumentation libraries may help with ``Span`` propagation **if** not offered by default (see implementation details below), but we show here the case without such help. Implementation details: * For ``threading``, the ``Span`` is manually passed down the call chain, activating it in each corotuine/task. * For ``asyncio``, the active ``Span`` is not passed nor activated down the chain as the ``Context`` automatically propagates it. ``threading`` implementation: .. code-block:: python def parent_task(self, message): with self.tracer.start_active_span("parent") as scope: f = self.executor.submit(self.child_task, message, scope.span) res = f.result() return res def child_task(self, message, span): with self.tracer.scope_manager.activate(span, False): with self.tracer.start_active_span("child"): return "%s::response" % message ``asyncio`` implementation: .. code-block:: python async def parent_task(self, message): # noqa with self.tracer.start_active_span("parent"): res = await self.child_task(message) return res async def child_task(self, message): # No need to pass/activate the parent Span, as it stays in the context. with self.tracer.start_active_span("child"): return "%s::response" % message __init__.py000066400000000000000000000000001511654350100400750ustar00rootroot00000000000000python-opentelemetry-1.39.1/shim/opentelemetry-opentracing-shim/tests/testbed/test_subtask_span_propagationtest_asyncio.py000066400000000000000000000031401511654350100410520ustar00rootroot00000000000000python-opentelemetry-1.39.1/shim/opentelemetry-opentracing-shim/tests/testbed/test_subtask_span_propagation# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import asyncio # pylint: disable=import-error from ..otel_ot_shim_tracer import MockTracer from ..testcase import OpenTelemetryTestCase class TestAsyncio(OpenTelemetryTestCase): def setUp(self): # pylint: disable=invalid-name self.tracer = MockTracer() self.loop = asyncio.get_event_loop() def test_main(self): res = self.loop.run_until_complete(self.parent_task("message")) self.assertEqual(res, "message::response") spans = self.tracer.finished_spans() self.assertEqual(len(spans), 2) self.assertNamesEqual(spans, ["child", "parent"]) self.assertIsChildOf(spans[0], spans[1]) async def parent_task(self, message): # noqa with self.tracer.start_active_span("parent"): res = await self.child_task(message) return res async def child_task(self, message): # No need to pass/activate the parent Span, as it stays in the context. with self.tracer.start_active_span("child"): return f"{message}::response" test_threads.py000066400000000000000000000032751511654350100410500ustar00rootroot00000000000000python-opentelemetry-1.39.1/shim/opentelemetry-opentracing-shim/tests/testbed/test_subtask_span_propagation# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from concurrent.futures import ThreadPoolExecutor # pylint: disable=import-error from ..otel_ot_shim_tracer import MockTracer from ..testcase import OpenTelemetryTestCase class TestThreads(OpenTelemetryTestCase): def setUp(self): # pylint: disable=invalid-name self.tracer = MockTracer() self.executor = ThreadPoolExecutor(max_workers=3) def test_main(self): res = self.executor.submit(self.parent_task, "message").result() self.assertEqual(res, "message::response") spans = self.tracer.finished_spans() self.assertEqual(len(spans), 2) self.assertNamesEqual(spans, ["child", "parent"]) self.assertIsChildOf(spans[0], spans[1]) def parent_task(self, message): with self.tracer.start_active_span("parent") as scope: fut = self.executor.submit(self.child_task, message, scope.span) res = fut.result() return res def child_task(self, message, span): with self.tracer.scope_manager.activate(span, False): with self.tracer.start_active_span("child"): return f"{message}::response" python-opentelemetry-1.39.1/shim/opentelemetry-opentracing-shim/tests/testbed/testcase.py000066400000000000000000000025411511654350100321050ustar00rootroot00000000000000import unittest import opentelemetry.trace as trace_api # pylint: disable=C0103 class OpenTelemetryTestCase(unittest.TestCase): def assertSameTrace(self, spanA, spanB): return self.assertEqual(spanA.context.trace_id, spanB.context.trace_id) def assertNotSameTrace(self, spanA, spanB): return self.assertNotEqual( spanA.context.trace_id, spanB.context.trace_id ) def assertIsChildOf(self, spanA, spanB): # spanA is child of spanB self.assertIsNotNone(spanA.parent) ctxA = spanA.parent if not isinstance(ctxA, trace_api.SpanContext): ctxA = spanA.parent.context ctxB = spanB if not isinstance(ctxB, trace_api.SpanContext): ctxB = spanB.context return self.assertEqual(ctxA.span_id, ctxB.span_id) def assertIsNotChildOf(self, spanA, spanB): # spanA is NOT child of spanB if spanA.parent is None: return ctxA = spanA.parent if not isinstance(ctxA, trace_api.SpanContext): ctxA = spanA.parent.context ctxB = spanB if not isinstance(ctxB, trace_api.SpanContext): ctxB = spanB.context self.assertNotEqual(ctxA.span_id, ctxB.span_id) def assertNamesEqual(self, spans, names): self.assertEqual(list(map(lambda x: x.name, spans)), names) python-opentelemetry-1.39.1/shim/opentelemetry-opentracing-shim/tests/testbed/utils.py000066400000000000000000000034631511654350100314360ustar00rootroot00000000000000import logging import threading import time class RefCount: """Thread-safe counter""" def __init__(self, count=1): self._lock = threading.Lock() self._count = count def incr(self): with self._lock: self._count += 1 return self._count def decr(self): with self._lock: self._count -= 1 return self._count def await_until(func, timeout=5.0): """Polls for func() to return True""" end_time = time.time() + timeout while time.time() < end_time and not func(): time.sleep(0.01) def stop_loop_when(loop, cond_func, timeout=5.0): """ Registers a periodic callback that stops the loop when cond_func() == True. Compatible with both Tornado and asyncio. """ if cond_func() or timeout <= 0.0: loop.stop() return timeout -= 0.1 loop.call_later(0.1, stop_loop_when, loop, cond_func, timeout) def get_logger(name): """Returns a logger with log level set to INFO""" logging.basicConfig(level=logging.INFO) return logging.getLogger(name) def get_one_by_tag(spans, key, value): """Return a single Span with a tag value/key from a list, errors if more than one is found.""" found = [] for span in spans: if span.attributes.get(key) == value: found.append(span) if len(found) > 1: raise RuntimeError("Too many values") return found[0] if len(found) > 0 else None def get_one_by_operation_name(spans, name): """Return a single Span with a name from a list, errors if more than one is found.""" found = [] for span in spans: if span.name == name: found.append(span) if len(found) > 1: raise RuntimeError("Too many values") return found[0] if len(found) > 0 else None python-opentelemetry-1.39.1/tests/000077500000000000000000000000001511654350100171635ustar00rootroot00000000000000python-opentelemetry-1.39.1/tests/opentelemetry-docker-tests/000077500000000000000000000000001511654350100244645ustar00rootroot00000000000000python-opentelemetry-1.39.1/tests/opentelemetry-docker-tests/tests/000077500000000000000000000000001511654350100256265ustar00rootroot00000000000000python-opentelemetry-1.39.1/tests/opentelemetry-docker-tests/tests/docker-compose.yml000066400000000000000000000004601511654350100312630ustar00rootroot00000000000000version: '3' services: otopencensus: image: rafaeljesus/opencensus-collector:latest command: --logging-exporter DEBUG ports: - "8888:8888" - "55678:55678" otcollector: image: otel/opentelemetry-collector:0.31.0 ports: - "4317:4317" - "4318:55681" python-opentelemetry-1.39.1/tests/opentelemetry-docker-tests/tests/opencensus/000077500000000000000000000000001511654350100300105ustar00rootroot00000000000000test_opencensusexporter_functional.py000066400000000000000000000041721511654350100375430ustar00rootroot00000000000000python-opentelemetry-1.39.1/tests/opentelemetry-docker-tests/tests/opencensus# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from opentelemetry import trace from opentelemetry.context import attach, detach, set_value from opentelemetry.exporter.opencensus.trace_exporter import ( OpenCensusSpanExporter, ) from opentelemetry.sdk.trace import TracerProvider from opentelemetry.sdk.trace.export import SimpleSpanProcessor from opentelemetry.test.test_base import TestBase class ExportStatusSpanProcessor(SimpleSpanProcessor): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.export_status = [] def on_end(self, span): token = attach(set_value("suppress_instrumentation", True)) self.export_status.append(self.span_exporter.export((span,))) detach(token) class TestOpenCensusSpanExporter(TestBase): def setUp(self): super().setUp() trace.set_tracer_provider(TracerProvider()) self.tracer = trace.get_tracer(__name__) self.span_processor = ExportStatusSpanProcessor( OpenCensusSpanExporter(endpoint="localhost:55678") ) trace.get_tracer_provider().add_span_processor(self.span_processor) def test_export(self): with self.tracer.start_as_current_span("foo"): with self.tracer.start_as_current_span("bar"): with self.tracer.start_as_current_span("baz"): pass self.assertTrue(len(self.span_processor.export_status), 3) for export_status in self.span_processor.export_status: self.assertEqual(export_status.name, "SUCCESS") self.assertEqual(export_status.value, 0) python-opentelemetry-1.39.1/tests/opentelemetry-docker-tests/tests/otlpexporter/000077500000000000000000000000001511654350100303755ustar00rootroot00000000000000python-opentelemetry-1.39.1/tests/opentelemetry-docker-tests/tests/otlpexporter/__init__.py000066400000000000000000000032631511654350100325120ustar00rootroot00000000000000# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from abc import ABC, abstractmethod from opentelemetry.context import attach, detach, set_value from opentelemetry.sdk.trace.export import SimpleSpanProcessor class ExportStatusSpanProcessor(SimpleSpanProcessor): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.export_status = [] def on_end(self, span): token = attach(set_value("suppress_instrumentation", True)) self.export_status.append(self.span_exporter.export((span,))) detach(token) class BaseTestOTLPExporter(ABC): @abstractmethod def get_span_processor(self): pass # pylint: disable=no-member def test_export(self): with self.tracer.start_as_current_span("foo"): with self.tracer.start_as_current_span("bar"): with self.tracer.start_as_current_span("baz"): pass self.assertTrue(len(self.span_processor.export_status), 3) for export_status in self.span_processor.export_status: self.assertEqual(export_status.name, "SUCCESS") self.assertEqual(export_status.value, 0) test_otlp_grpc_exporter_functional.py000066400000000000000000000025571511654350100401030ustar00rootroot00000000000000python-opentelemetry-1.39.1/tests/opentelemetry-docker-tests/tests/otlpexporter# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from opentelemetry import trace from opentelemetry.exporter.otlp.proto.grpc.trace_exporter import ( OTLPSpanExporter, ) from opentelemetry.sdk.trace import TracerProvider from opentelemetry.test.test_base import TestBase from . import BaseTestOTLPExporter, ExportStatusSpanProcessor class TestOTLPGRPCExporter(BaseTestOTLPExporter, TestBase): # pylint: disable=no-self-use def get_span_processor(self): return ExportStatusSpanProcessor( OTLPSpanExporter(insecure=True, timeout=1) ) def setUp(self): super().setUp() trace.set_tracer_provider(TracerProvider()) self.tracer = trace.get_tracer(__name__) self.span_processor = self.get_span_processor() trace.get_tracer_provider().add_span_processor(self.span_processor) test_otlp_http_exporter_functional.py000066400000000000000000000025011511654350100401140ustar00rootroot00000000000000python-opentelemetry-1.39.1/tests/opentelemetry-docker-tests/tests/otlpexporter# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from opentelemetry import trace from opentelemetry.exporter.otlp.proto.http.trace_exporter import ( OTLPSpanExporter, ) from opentelemetry.sdk.trace import TracerProvider from opentelemetry.test.test_base import TestBase from . import BaseTestOTLPExporter, ExportStatusSpanProcessor class TestOTLPHTTPExporter(BaseTestOTLPExporter, TestBase): # pylint: disable=no-self-use def get_span_processor(self): return ExportStatusSpanProcessor(OTLPSpanExporter()) def setUp(self): super().setUp() trace.set_tracer_provider(TracerProvider()) self.tracer = trace.get_tracer(__name__) self.span_processor = self.get_span_processor() trace.get_tracer_provider().add_span_processor(self.span_processor) python-opentelemetry-1.39.1/tests/opentelemetry-test-utils/000077500000000000000000000000001511654350100241725ustar00rootroot00000000000000python-opentelemetry-1.39.1/tests/opentelemetry-test-utils/README.rst000066400000000000000000000006561511654350100256700ustar00rootroot00000000000000OpenTelemetry Test Utilities ============================ This package provides internal testing utilities for the OpenTelemetry Python project and provides no stability or quality guarantees. Please do not use it for anything other than writing or running tests for the OpenTelemetry Python project (github.com/open-telemetry/opentelemetry-python). References ---------- * `OpenTelemetry Project `_ python-opentelemetry-1.39.1/tests/opentelemetry-test-utils/pyproject.toml000066400000000000000000000024111511654350100271040ustar00rootroot00000000000000[build-system] requires = ["hatchling"] build-backend = "hatchling.build" [project] name = "opentelemetry-test-utils" dynamic = ["version"] description = "Test utilities for OpenTelemetry unit tests" readme = "README.rst" license = "Apache-2.0" requires-python = ">=3.9" authors = [ { name = "OpenTelemetry Authors", email = "cncf-opentelemetry-contributors@lists.cncf.io" }, ] classifiers = [ "Development Status :: 4 - Beta", "Framework :: OpenTelemetry", "Intended Audience :: Developers", "Programming Language :: Python", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.9", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", "Programming Language :: Python :: 3.12", "Programming Language :: Python :: 3.13", ] dependencies = [ "asgiref ~= 3.0", "opentelemetry-api == 1.39.1", "opentelemetry-sdk == 1.39.1", ] [project.urls] Homepage = "https://github.com/open-telemetry/opentelemetry-python/tests/opentelemetry-test-utils" Repository = "https://github.com/open-telemetry/opentelemetry-python" [tool.hatch.version] path = "src/opentelemetry/test/version/__init__.py" [tool.hatch.build.targets.sdist] include = [ "/src", ] [tool.hatch.build.targets.wheel] packages = ["src/opentelemetry"] python-opentelemetry-1.39.1/tests/opentelemetry-test-utils/src/000077500000000000000000000000001511654350100247615ustar00rootroot00000000000000python-opentelemetry-1.39.1/tests/opentelemetry-test-utils/src/opentelemetry/000077500000000000000000000000001511654350100276555ustar00rootroot00000000000000python-opentelemetry-1.39.1/tests/opentelemetry-test-utils/src/opentelemetry/test/000077500000000000000000000000001511654350100306345ustar00rootroot00000000000000python-opentelemetry-1.39.1/tests/opentelemetry-test-utils/src/opentelemetry/test/__init__.py000066400000000000000000000032451511654350100327510ustar00rootroot00000000000000# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # type: ignore from traceback import format_tb from unittest import TestCase class _AssertNotRaisesMixin: class _AssertNotRaises: def __init__(self, test_case): self._test_case = test_case def __enter__(self): return self def __exit__(self, type_, value, tb): # pylint: disable=invalid-name if value is not None and type_ in self._exception_types: self._test_case.fail( "Unexpected exception was raised:\n{}".format( "\n".join(format_tb(tb)) ) ) return True def __call__(self, exception, *exceptions): # pylint: disable=attribute-defined-outside-init self._exception_types = (exception, *exceptions) return self def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) # pylint: disable=invalid-name self.assertNotRaises = self._AssertNotRaises(self) class TestCase(_AssertNotRaisesMixin, TestCase): # pylint: disable=function-redefined pass python-opentelemetry-1.39.1/tests/opentelemetry-test-utils/src/opentelemetry/test/asgitestutil.py000066400000000000000000000063121511654350100337310ustar00rootroot00000000000000# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import asyncio from unittest import IsolatedAsyncioTestCase from asgiref.testing import ApplicationCommunicator from opentelemetry.test.test_base import TestBase def setup_testing_defaults(scope): scope.update( { "client": ("127.0.0.1", 32767), "headers": [], "http_version": "1.0", "method": "GET", "path": "/", "query_string": b"", "scheme": "http", "server": ("127.0.0.1", 80), "type": "http", } ) class AsgiTestBase(TestBase): def setUp(self): super().setUp() self.scope = {} setup_testing_defaults(self.scope) self.communicator = None def tearDown(self): if self.communicator: asyncio.get_event_loop().run_until_complete( self.communicator.wait() ) def seed_app(self, app): self.communicator = ApplicationCommunicator(app, self.scope) def send_input(self, message): asyncio.get_event_loop().run_until_complete( self.communicator.send_input(message) ) def send_default_request(self): self.send_input({"type": "http.request", "body": b""}) def get_output(self): output = asyncio.get_event_loop().run_until_complete( self.communicator.receive_output(0) ) return output def get_all_output(self): outputs = [] while True: try: outputs.append(self.get_output()) except asyncio.TimeoutError: break return outputs class AsyncAsgiTestBase(TestBase, IsolatedAsyncioTestCase): def setUp(self): super().setUp() self.scope = {} setup_testing_defaults(self.scope) self.communicator = None def tearDown(self): if self.communicator: asyncio.get_event_loop().run_until_complete( self.communicator.wait() ) def seed_app(self, app): self.communicator = ApplicationCommunicator(app, self.scope) async def send_input(self, message): await self.communicator.send_input(message) async def send_default_request(self): await self.send_input({"type": "http.request", "body": b""}) async def get_output(self, timeout=0.01): return await self.communicator.receive_output(timeout) async def get_all_output(self, timeout=0.01): outputs = [] while True: try: outputs.append(await self.communicator.receive_output(timeout)) except asyncio.TimeoutError: break return outputs concurrency_test.py000066400000000000000000000053101511654350100345170ustar00rootroot00000000000000python-opentelemetry-1.39.1/tests/opentelemetry-test-utils/src/opentelemetry/test# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import sys import threading import unittest from functools import partial from typing import Callable, List, Optional, TypeVar from unittest.mock import Mock ReturnT = TypeVar("ReturnT") class MockFunc: """A thread safe mock function Use this as part of your mock if you want to count calls across multiple threads. """ def __init__(self) -> None: self.lock = threading.Lock() self.call_count = 0 self.mock = Mock() def __call__(self, *args, **kwargs): with self.lock: self.call_count += 1 return self.mock class ConcurrencyTestBase(unittest.TestCase): """Test base class/mixin for tests of concurrent code This test class calls ``sys.setswitchinterval(1e-12)`` to try to create more contention while running tests that use many threads. It also provides ``run_with_many_threads`` to run some test code in many threads concurrently. """ orig_switch_interval = sys.getswitchinterval() @classmethod def setUpClass(cls) -> None: super().setUpClass() # switch threads more often to increase chance of contention sys.setswitchinterval(1e-12) @classmethod def tearDownClass(cls) -> None: super().tearDownClass() sys.setswitchinterval(cls.orig_switch_interval) @staticmethod def run_with_many_threads( func_to_test: Callable[[], ReturnT], num_threads: int = 100, ) -> List[ReturnT]: """Util to run ``func_to_test`` in ``num_threads`` concurrently""" barrier = threading.Barrier(num_threads) results: List[Optional[ReturnT]] = [None] * num_threads def thread_start(idx: int) -> None: nonlocal results # Get all threads here before releasing them to create contention barrier.wait() results[idx] = func_to_test() threads = [ threading.Thread(target=partial(thread_start, i)) for i in range(num_threads) ] for thread in threads: thread.start() for thread in threads: thread.join() return results # type: ignore python-opentelemetry-1.39.1/tests/opentelemetry-test-utils/src/opentelemetry/test/globals_test.py000066400000000000000000000074351511654350100337010ustar00rootroot00000000000000# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from opentelemetry import trace as trace_api from opentelemetry._logs import _internal as logging_api from opentelemetry.metrics import _internal as metrics_api from opentelemetry.metrics._internal import _ProxyMeterProvider from opentelemetry.util._once import Once # pylint: disable=protected-access def reset_trace_globals() -> None: """WARNING: only use this for tests.""" trace_api._TRACER_PROVIDER_SET_ONCE = Once() trace_api._TRACER_PROVIDER = None trace_api._PROXY_TRACER_PROVIDER = trace_api.ProxyTracerProvider() # pylint: disable=protected-access def reset_metrics_globals() -> None: """WARNING: only use this for tests.""" metrics_api._METER_PROVIDER_SET_ONCE = Once() # type: ignore[attr-defined] metrics_api._METER_PROVIDER = None # type: ignore[attr-defined] metrics_api._PROXY_METER_PROVIDER = _ProxyMeterProvider() # type: ignore[attr-defined] # pylint: disable=protected-access def reset_logging_globals() -> None: """WARNING: only use this for tests.""" logging_api._LOGGER_PROVIDER_SET_ONCE = Once() # type: ignore[attr-defined] logging_api._LOGGER_PROVIDER = None # type: ignore[attr-defined] logging_api._PROXY_LOGGER_PROVIDER = logging_api.ProxyLoggerProvider() # type: ignore[attr-defined] # pylint: disable=protected-access def reset_event_globals() -> None: """WARNING: only use this for tests.""" from opentelemetry import ( # pylint: disable=import-outside-toplevel # noqa: PLC0415 _events as events_api, ) events_api._EVENT_LOGGER_PROVIDER_SET_ONCE = Once() # type: ignore[attr-defined] events_api._EVENT_LOGGER_PROVIDER = None # type: ignore[attr-defined] events_api._PROXY_EVENT_LOGGER_PROVIDER = ( events_api.ProxyEventLoggerProvider() ) # type: ignore[attr-defined] class TraceGlobalsTest(unittest.TestCase): """Resets trace API globals in setUp/tearDown Use as a base class or mixin for your test that modifies trace API globals. """ def setUp(self) -> None: super().setUp() reset_trace_globals() def tearDown(self) -> None: super().tearDown() reset_trace_globals() class MetricsGlobalsTest(unittest.TestCase): """Resets metrics API globals in setUp/tearDown Use as a base class or mixin for your test that modifies metrics API globals. """ def setUp(self) -> None: super().setUp() reset_metrics_globals() def tearDown(self) -> None: super().tearDown() reset_metrics_globals() class LoggingGlobalsTest(unittest.TestCase): """Resets logging API globals in setUp/tearDown Use as a base class or mixin for your test that modifies logging API globals. """ def setUp(self) -> None: super().setUp() reset_logging_globals() def tearDown(self) -> None: super().tearDown() reset_logging_globals() class EventsGlobalsTest(unittest.TestCase): """Resets logging API globals in setUp/tearDown Use as a base class or mixin for your test that modifies logging API globals. """ def setUp(self) -> None: super().setUp() reset_event_globals() def tearDown(self) -> None: super().tearDown() reset_event_globals() python-opentelemetry-1.39.1/tests/opentelemetry-test-utils/src/opentelemetry/test/httptest.py000066400000000000000000000042671511654350100330760ustar00rootroot00000000000000# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import re import unittest from http import HTTPStatus from http.server import BaseHTTPRequestHandler, HTTPServer from threading import Thread class HttpTestBase(unittest.TestCase): DEFAULT_RESPONSE = b"Hello!" class Handler(BaseHTTPRequestHandler): protocol_version = "HTTP/1.1" # Support keep-alive. timeout = 3 # Seconds STATUS_RE = re.compile(r"/status/(\d+)") def do_GET(self): # pylint:disable=invalid-name status_match = self.STATUS_RE.fullmatch(self.path) status = 200 if status_match: status = int(status_match.group(1)) if status == 200: body = HttpTestBase.DEFAULT_RESPONSE self.send_response(HTTPStatus.OK) self.send_header("Content-Length", str(len(body))) self.end_headers() self.wfile.write(body) else: self.send_error(status) @classmethod def create_server(cls): server_address = ("127.0.0.1", 0) # Only bind to localhost. return HTTPServer(server_address, cls.Handler) @classmethod def run_server(cls): httpd = cls.create_server() worker = Thread( target=httpd.serve_forever, daemon=True, name="Test server worker" ) worker.start() return worker, httpd @classmethod def setUpClass(cls): super().setUpClass() cls.server_thread, cls.server = cls.run_server() @classmethod def tearDownClass(cls): cls.server.shutdown() cls.server_thread.join() super().tearDownClass() python-opentelemetry-1.39.1/tests/opentelemetry-test-utils/src/opentelemetry/test/metrictestutil.py000066400000000000000000000071561511654350100343000ustar00rootroot00000000000000# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Optional from opentelemetry.attributes import BoundedAttributes from opentelemetry.sdk.metrics.export import ( AggregationTemporality, Gauge, Histogram, HistogramDataPoint, Metric, NumberDataPoint, Sum, ) from opentelemetry.util.types import Attributes def _generate_metric( name, data, attributes=None, description=None, unit=None ) -> Metric: if description is None: description = "foo" if unit is None: unit = "s" return Metric( name=name, description=description, unit=unit, data=data, ) def _generate_sum( name, value, attributes=None, description=None, unit=None, is_monotonic=True, ) -> Metric: if attributes is None: attributes = BoundedAttributes(attributes={"a": 1, "b": True}) return _generate_metric( name, Sum( data_points=[ NumberDataPoint( attributes=attributes, start_time_unix_nano=1641946015139533244, time_unix_nano=1641946016139533244, value=value, ) ], aggregation_temporality=AggregationTemporality.CUMULATIVE, is_monotonic=is_monotonic, ), description=description, unit=unit, ) def _generate_gauge( name, value, attributes=None, description=None, unit=None ) -> Metric: if attributes is None: attributes = BoundedAttributes(attributes={"a": 1, "b": True}) return _generate_metric( name, Gauge( data_points=[ NumberDataPoint( attributes=attributes, start_time_unix_nano=None, time_unix_nano=1641946016139533244, value=value, ) ], ), description=description, unit=unit, ) def _generate_unsupported_metric( name, attributes=None, description=None, unit=None ) -> Metric: return _generate_metric( name, None, description=description, unit=unit, ) def _generate_histogram( name: str, attributes: Attributes = None, description: Optional[str] = None, unit: Optional[str] = None, ) -> Metric: if attributes is None: attributes = BoundedAttributes(attributes={"a": 1, "b": True}) return _generate_metric( name, Histogram( data_points=[ HistogramDataPoint( attributes=attributes, start_time_unix_nano=1641946016139533244, time_unix_nano=1641946016139533244, count=6, sum=579.0, bucket_counts=[1, 3, 2], explicit_bounds=[123.0, 456.0], min=1, max=457, ) ], aggregation_temporality=AggregationTemporality.CUMULATIVE, ), description=description, unit=unit, ) mock_test_classes.py000066400000000000000000000013741511654350100346410ustar00rootroot00000000000000python-opentelemetry-1.39.1/tests/opentelemetry-test-utils/src/opentelemetry/test# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. class IterEntryPoint: def __init__(self, name, class_type): self.name = name self.class_type = class_type def load(self): return self.class_type python-opentelemetry-1.39.1/tests/opentelemetry-test-utils/src/opentelemetry/test/mock_textmap.py000066400000000000000000000055231511654350100337060ustar00rootroot00000000000000# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import typing from opentelemetry import trace from opentelemetry.context import Context from opentelemetry.propagators.textmap import ( CarrierT, Getter, Setter, TextMapPropagator, default_getter, default_setter, ) class NOOPTextMapPropagator(TextMapPropagator): """A propagator that does not extract nor inject. This class is useful for catching edge cases assuming a SpanContext will always be present. """ def extract( self, carrier: CarrierT, context: typing.Optional[Context] = None, getter: Getter = default_getter, ) -> Context: return Context() def inject( self, carrier: CarrierT, context: typing.Optional[Context] = None, setter: Setter = default_setter, ) -> None: return None @property def fields(self): return set() class MockTextMapPropagator(TextMapPropagator): """Mock propagator for testing purposes.""" TRACE_ID_KEY = "mock-traceid" SPAN_ID_KEY = "mock-spanid" def extract( self, carrier: CarrierT, context: typing.Optional[Context] = None, getter: Getter = default_getter, ) -> Context: if context is None: context = Context() trace_id_list = getter.get(carrier, self.TRACE_ID_KEY) span_id_list = getter.get(carrier, self.SPAN_ID_KEY) if not trace_id_list or not span_id_list: return context return trace.set_span_in_context( trace.NonRecordingSpan( trace.SpanContext( trace_id=int(trace_id_list[0]), span_id=int(span_id_list[0]), is_remote=True, ) ), context, ) def inject( self, carrier: CarrierT, context: typing.Optional[Context] = None, setter: Setter = default_setter, ) -> None: span = trace.get_current_span(context) setter.set( carrier, self.TRACE_ID_KEY, str(span.get_span_context().trace_id) ) setter.set( carrier, self.SPAN_ID_KEY, str(span.get_span_context().span_id) ) @property def fields(self): return {self.TRACE_ID_KEY, self.SPAN_ID_KEY} python-opentelemetry-1.39.1/tests/opentelemetry-test-utils/src/opentelemetry/test/spantestutil.py000066400000000000000000000036121511654350100337470ustar00rootroot00000000000000# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from functools import partial from opentelemetry import trace as trace_api from opentelemetry.sdk import trace as trace_sdk from opentelemetry.sdk.trace import Resource def new_tracer(span_limits=None, resource=None) -> trace_api.Tracer: provider_factory = trace_sdk.TracerProvider if resource is not None: provider_factory = partial(provider_factory, resource=resource) return provider_factory(span_limits=span_limits).get_tracer(__name__) def get_span_with_dropped_attributes_events_links(): attributes = {} for index in range(130): attributes[f"key{index}"] = [f"value{index}"] links = [] for index in range(129): links.append( trace_api.Link( trace_sdk._Span( name=f"span{index}", context=trace_api.INVALID_SPAN_CONTEXT, attributes=attributes, ).get_span_context(), attributes=attributes, ) ) tracer = new_tracer( span_limits=trace_sdk.SpanLimits(), resource=Resource(attributes=attributes), ) with tracer.start_as_current_span( "span", links=links, attributes=attributes ) as span: for index in range(131): span.add_event(f"event{index}", attributes=attributes) return span python-opentelemetry-1.39.1/tests/opentelemetry-test-utils/src/opentelemetry/test/test_base.py000066400000000000000000000224451511654350100331660ustar00rootroot00000000000000# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging import unittest from contextlib import contextmanager from typing import Optional, Sequence, Tuple from opentelemetry import metrics as metrics_api from opentelemetry import trace as trace_api from opentelemetry.sdk.metrics import MeterProvider from opentelemetry.sdk.metrics._internal.aggregation import ( _DEFAULT_EXPLICIT_BUCKET_HISTOGRAM_AGGREGATION_BOUNDARIES, ) from opentelemetry.sdk.metrics._internal.point import Metric from opentelemetry.sdk.metrics.export import ( DataPointT, HistogramDataPoint, InMemoryMetricReader, MetricReader, NumberDataPoint, ) from opentelemetry.sdk.trace import TracerProvider, export from opentelemetry.sdk.trace.export.in_memory_span_exporter import ( InMemorySpanExporter, ) from opentelemetry.test.globals_test import ( reset_metrics_globals, reset_trace_globals, ) class TestBase(unittest.TestCase): # pylint: disable=C0103 def setUp(self): super().setUp() result = self.create_tracer_provider() self.tracer_provider, self.memory_exporter = result # This is done because set_tracer_provider cannot override the # current tracer provider. reset_trace_globals() trace_api.set_tracer_provider(self.tracer_provider) self.memory_exporter.clear() # This is done because set_meter_provider cannot override the # current meter provider. reset_metrics_globals() ( self.meter_provider, self.memory_metrics_reader, ) = self.create_meter_provider() metrics_api.set_meter_provider(self.meter_provider) def tearDown(self): super().tearDown() reset_trace_globals() reset_metrics_globals() def get_finished_spans(self): return FinishedTestSpans( self, self.memory_exporter.get_finished_spans() ) def assertEqualSpanInstrumentationScope(self, span, module): self.assertEqual(span.instrumentation_scope.name, module.__name__) self.assertEqual( span.instrumentation_scope.version, module.__version__ ) def assertSpanHasAttributes(self, span, attributes): for key, val in attributes.items(): self.assertIn(key, span.attributes) self.assertEqual(val, span.attributes[key]) def sorted_spans(self, spans): # pylint: disable=R0201 """ Sorts spans by span creation time. Note: This method should not be used to sort spans in a deterministic way as the order depends on timing precision provided by the platform. """ return sorted( spans, key=lambda s: s._start_time, # pylint: disable=W0212 reverse=True, ) @staticmethod def create_tracer_provider(**kwargs): """Helper to create a configured tracer provider. Creates and configures a `TracerProvider` with a `SimpleSpanProcessor` and a `InMemorySpanExporter`. All the parameters passed are forwarded to the TracerProvider constructor. Returns: A list with the tracer provider in the first element and the in-memory span exporter in the second. """ tracer_provider = TracerProvider(**kwargs) memory_exporter = InMemorySpanExporter() span_processor = export.SimpleSpanProcessor(memory_exporter) tracer_provider.add_span_processor(span_processor) return tracer_provider, memory_exporter @staticmethod def create_meter_provider(**kwargs) -> Tuple[MeterProvider, MetricReader]: """Helper to create a configured meter provider Creates a `MeterProvider` and an `InMemoryMetricReader`. Returns: A tuple with the meter provider in the first element and the in-memory metrics exporter in the second """ memory_reader = InMemoryMetricReader() metric_readers = kwargs.get("metric_readers", []) metric_readers.append(memory_reader) kwargs["metric_readers"] = metric_readers meter_provider = MeterProvider(**kwargs) return meter_provider, memory_reader @staticmethod @contextmanager def disable_logging(highest_level=logging.CRITICAL): logging.disable(highest_level) try: yield finally: logging.disable(logging.NOTSET) def get_sorted_metrics(self): metrics_data = self.memory_metrics_reader.get_metrics_data() resource_metrics = ( metrics_data.resource_metrics if metrics_data else [] ) all_metrics = [] for metrics in resource_metrics: for scope_metrics in metrics.scope_metrics: all_metrics.extend(scope_metrics.metrics) return self.sorted_metrics(all_metrics) @staticmethod def sorted_metrics(metrics): """ Sorts metrics by metric name. """ return sorted( metrics, key=lambda m: m.name, ) def assert_metric_expected( self, metric: Metric, expected_data_points: Sequence[DataPointT], est_value_delta: Optional[float] = 0, ): self.assertEqual( len(expected_data_points), len(metric.data.data_points) ) for expected_data_point in expected_data_points: self.assert_data_point_expected( expected_data_point, metric.data.data_points, est_value_delta ) # pylint: disable=unidiomatic-typecheck @staticmethod def is_data_points_equal( expected_data_point: DataPointT, data_point: DataPointT, est_value_delta: Optional[float] = 0, ): if type(expected_data_point) != type( # noqa: E721 data_point ) or not isinstance( expected_data_point, (HistogramDataPoint, NumberDataPoint) ): return False values_diff = None if isinstance(data_point, NumberDataPoint): values_diff = abs(expected_data_point.value - data_point.value) elif isinstance(data_point, HistogramDataPoint): values_diff = abs(expected_data_point.sum - data_point.sum) if expected_data_point.count != data_point.count or ( est_value_delta == 0 and ( expected_data_point.min != data_point.min or expected_data_point.max != data_point.max ) ): return False if ( expected_data_point.explicit_bounds != data_point.explicit_bounds ): return False return ( values_diff <= est_value_delta and expected_data_point.attributes == dict(data_point.attributes) ) def assert_data_point_expected( self, expected_data_point: DataPointT, data_points: Sequence[DataPointT], est_value_delta: Optional[float] = 0, ): is_data_point_exist = False for data_point in data_points: if self.is_data_points_equal( expected_data_point, data_point, est_value_delta ): is_data_point_exist = True break self.assertTrue( is_data_point_exist, msg=f"Data point {expected_data_point} does not exist", ) @staticmethod def create_number_data_point(value, attributes): return NumberDataPoint( value=value, attributes=attributes, start_time_unix_nano=0, time_unix_nano=0, ) @staticmethod def create_histogram_data_point( sum_data_point, count, max_data_point, min_data_point, attributes, explicit_bounds=None, ): return HistogramDataPoint( count=count, sum=sum_data_point, min=min_data_point, max=max_data_point, attributes=attributes, start_time_unix_nano=0, time_unix_nano=0, bucket_counts=[], explicit_bounds=explicit_bounds if explicit_bounds is not None else _DEFAULT_EXPLICIT_BUCKET_HISTOGRAM_AGGREGATION_BOUNDARIES, ) class FinishedTestSpans(list): def __init__(self, test, spans): super().__init__(spans) self.test = test def by_name(self, name): for span in self: if span.name == name: return span self.test.fail(f"Did not find span with name {name}") return None def by_attr(self, key, value): for span in self: if span.attributes.get(key) == value: return span self.test.fail(f"Did not find span with attrs {key}={value}") return None python-opentelemetry-1.39.1/tests/opentelemetry-test-utils/src/opentelemetry/test/version/000077500000000000000000000000001511654350100323215ustar00rootroot00000000000000__init__.py000066400000000000000000000000271511654350100343520ustar00rootroot00000000000000python-opentelemetry-1.39.1/tests/opentelemetry-test-utils/src/opentelemetry/test/version__version__ = "0.60b1" python-opentelemetry-1.39.1/tests/opentelemetry-test-utils/src/opentelemetry/test/wsgitestutil.py000066400000000000000000000034061511654350100337600ustar00rootroot00000000000000# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import io import wsgiref.util as wsgiref_util from opentelemetry import trace from opentelemetry.test.test_base import TestBase class WsgiTestBase(TestBase): def setUp(self): super().setUp() self.write_buffer = io.BytesIO() self.write = self.write_buffer.write self.environ = {} wsgiref_util.setup_testing_defaults(self.environ) self.status = None self.response_headers = None self.exc_info = None def start_response(self, status, response_headers, exc_info=None): self.status = status self.response_headers = response_headers self.exc_info = exc_info return self.write def assertTraceResponseHeaderMatchesSpan(self, headers, span): # pylint: disable=invalid-name self.assertIn("traceresponse", headers) self.assertEqual( headers["access-control-expose-headers"], "traceresponse", ) trace_id = trace.format_trace_id(span.get_span_context().trace_id) span_id = trace.format_span_id(span.get_span_context().span_id) self.assertEqual( f"00-{trace_id}-{span_id}-01", headers["traceresponse"], ) python-opentelemetry-1.39.1/tests/opentelemetry-test-utils/test-requirements.txt000066400000000000000000000004551511654350100304370ustar00rootroot00000000000000asgiref==3.7.2 importlib-metadata==6.11.0 iniconfig==2.0.0 packaging==24.0 pluggy==1.5.0 py-cpuinfo==9.0.0 pytest==7.4.4 tomli==2.0.1 typing_extensions==4.10.0 wrapt==1.16.0 zipp==3.19.2 -e opentelemetry-api -e opentelemetry-sdk -e opentelemetry-semantic-conventions -e tests/opentelemetry-test-utils python-opentelemetry-1.39.1/tests/opentelemetry-test-utils/tests/000077500000000000000000000000001511654350100253345ustar00rootroot00000000000000python-opentelemetry-1.39.1/tests/opentelemetry-test-utils/tests/__init__.py000066400000000000000000000011101511654350100274360ustar00rootroot00000000000000# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. python-opentelemetry-1.39.1/tests/opentelemetry-test-utils/tests/test_base.py000066400000000000000000000014561511654350100276650ustar00rootroot00000000000000# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from opentelemetry.test.test_base import TestBase class TestBaseTestCase(TestBase): def test_get_sorted_metrics_works_without_metrics(self): metrics = self.get_sorted_metrics() self.assertEqual(metrics, []) python-opentelemetry-1.39.1/tests/opentelemetry-test-utils/tests/test_utils.py000066400000000000000000000050531511654350100301100ustar00rootroot00000000000000# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from opentelemetry.test import TestCase class TestAssertNotRaises(TestCase): def test_no_exception(self): try: with self.assertNotRaises(Exception): pass except Exception as error: # pylint: disable=broad-exception-caught self.fail( # pylint: disable=no-member f"Unexpected exception {error} was raised" ) def test_no_specified_exception_single(self): try: with self.assertNotRaises(KeyError): 1 / 0 # pylint: disable=pointless-statement except Exception as error: # pylint: disable=broad-exception-caught self.fail( # pylint: disable=no-member f"Unexpected exception {error} was raised" ) def test_no_specified_exception_multiple(self): try: with self.assertNotRaises(KeyError, IndexError): 1 / 0 # pylint: disable=pointless-statement except Exception as error: # pylint: disable=broad-exception-caught self.fail( # pylint: disable=no-member f"Unexpected exception {error} was raised" ) def test_exception(self): with self.assertRaises(AssertionError): with self.assertNotRaises(ZeroDivisionError): 1 / 0 # pylint: disable=pointless-statement def test_missing_exception(self): with self.assertRaises(AssertionError) as error: with self.assertNotRaises(ZeroDivisionError): def raise_zero_division_error(): raise ZeroDivisionError() raise_zero_division_error() error_lines = error.exception.args[0].split("\n") stripped_error_lines = [line.strip() for line in error_lines] self.assertIn("Unexpected exception was raised:", stripped_error_lines) self.assertIn("raise_zero_division_error()", stripped_error_lines) self.assertIn("raise ZeroDivisionError()", stripped_error_lines) python-opentelemetry-1.39.1/tests/w3c_tracecontext_validation_server.py000066400000000000000000000046771511654350100266320ustar00rootroot00000000000000# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ This server is intended to be used with the W3C tracecontext validation Service. It implements the APIs needed to be exercised by the test bed. """ import json import flask import requests from opentelemetry import trace from opentelemetry.instrumentation.requests import RequestsInstrumentor from opentelemetry.instrumentation.wsgi import OpenTelemetryMiddleware from opentelemetry.sdk.trace import TracerProvider from opentelemetry.sdk.trace.export import ( ConsoleSpanExporter, SimpleSpanProcessor, ) # FIXME This could likely be avoided by integrating this script into the # standard test running mechanisms. # Integrations are the glue that binds the OpenTelemetry API and the # frameworks and libraries that are used together, automatically creating # Spans and propagating context as appropriate. trace.set_tracer_provider(TracerProvider()) RequestsInstrumentor().instrument() # SpanExporter receives the spans and send them to the target location. span_processor = SimpleSpanProcessor(ConsoleSpanExporter()) trace.get_tracer_provider().add_span_processor(span_processor) app = flask.Flask(__name__) app.wsgi_app = OpenTelemetryMiddleware(app.wsgi_app) @app.route("/verify-tracecontext", methods=["POST"]) def verify_tracecontext(): """Upon reception of some payload, sends a request back to the designated url. This route is designed to be testable with the w3c tracecontext server / client test. """ for action in flask.request.json: requests.post( url=action["url"], data=json.dumps(action["arguments"]), headers={ "Accept": "application/json", "Content-Type": "application/json; charset=utf-8", }, timeout=5.0, ) return "hello" if __name__ == "__main__": try: app.run(debug=False) finally: span_processor.shutdown() python-opentelemetry-1.39.1/tox-uv.toml000066400000000000000000000001271511654350100201600ustar00rootroot00000000000000# https://docs.astral.sh/uv/reference/settings/#pip_no-sources [pip] no-sources = true python-opentelemetry-1.39.1/tox.ini000066400000000000000000000405121511654350100173360ustar00rootroot00000000000000[tox] requires = tox-uv>=1 isolated_build = True skipsdist = True skip_missing_interpreters = True envlist = ; Environments are organized by individual package, allowing ; for specifying supported Python versions per package. py3{9,10,11,12,13}-test-opentelemetry-api pypy3-test-opentelemetry-api lint-opentelemetry-api py3{9,10,11,12,13}-test-opentelemetry-proto-gen-{oldest,latest} pypy3-test-opentelemetry-proto-gen-{oldest,latest} lint-opentelemetry-proto-gen-latest py3{9,10,11,12,13}-test-opentelemetry-sdk pypy3-test-opentelemetry-sdk lint-opentelemetry-sdk benchmark-opentelemetry-sdk py3{9,10,11,12,13}-test-opentelemetry-semantic-conventions pypy3-test-opentelemetry-semantic-conventions lint-opentelemetry-semantic-conventions py3{9,10,11,12,13}-test-opentelemetry-getting-started lint-opentelemetry-getting-started py3{9,10,11,12,13}-test-opentelemetry-opentracing-shim pypy3-test-opentelemetry-opentracing-shim lint-opentelemetry-opentracing-shim py3{9,10,11,12,13}-test-opentelemetry-opencensus-shim ; opencensus-shim intentionally excluded from pypy3 (grpcio install fails) lint-opentelemetry-opencensus-shim py3{9,10,11,12,13}-test-opentelemetry-exporter-opencensus ; exporter-opencensus intentionally excluded from pypy3 lint-opentelemetry-exporter-opencensus py3{9,10,11,12,13}-test-opentelemetry-exporter-otlp-proto-common pypy3-test-opentelemetry-exporter-otlp-proto-common lint-opentelemetry-exporter-otlp-proto-common ; opentelemetry-exporter-otlp py3{9,10,11,12,13}-test-opentelemetry-exporter-otlp-combined ; intentionally excluded from pypy3 lint-opentelemetry-exporter-otlp-combined py3{9,10,11,12,13}-test-opentelemetry-exporter-otlp-proto-grpc-{oldest,latest} ; intentionally excluded from pypy3 lint-opentelemetry-exporter-otlp-proto-grpc-latest benchmark-opentelemetry-exporter-otlp-proto-grpc-latest py3{9,10,11,12,13}-test-opentelemetry-exporter-otlp-proto-http pypy3-test-opentelemetry-exporter-otlp-proto-http lint-opentelemetry-exporter-otlp-proto-http py3{9,10,11,12,13}-test-opentelemetry-exporter-prometheus pypy3-test-opentelemetry-exporter-prometheus lint-opentelemetry-exporter-prometheus ; opentelemetry-exporter-zipkin py3{9,10,11,12,13}-test-opentelemetry-exporter-zipkin-combined pypy3-test-opentelemetry-exporter-zipkin-combined lint-opentelemetry-exporter-zipkin-combined py3{9,10,11,12,13}-test-opentelemetry-exporter-zipkin-proto-http pypy3-test-opentelemetry-exporter-zipkin-proto-http lint-opentelemetry-exporter-zipkin-proto-http py3{9,10,11,12,13}-test-opentelemetry-exporter-zipkin-json pypy3-test-opentelemetry-exporter-zipkin-json lint-opentelemetry-exporter-zipkin-json py3{9,10,11,12,13}-test-opentelemetry-propagator-b3 pypy3-test-opentelemetry-propagator-b3 lint-opentelemetry-propagator-b3 benchmark-opentelemetry-propagator-b3 py3{9,10,11,12,13}-test-opentelemetry-propagator-jaeger pypy3-test-opentelemetry-propagator-jaeger lint-opentelemetry-propagator-jaeger py3{9,10,11,12,13}-test-opentelemetry-test-utils pypy3-test-opentelemetry-test-utils lint-opentelemetry-test-utils spellcheck tracecontext typecheck docs docker-tests-{otlpexporter,opencensus} public-symbols-check shellcheck generate-workflows precommit [testenv] deps = lint: -r dev-requirements.txt coverage: pytest coverage: pytest-cov api: -r {toxinidir}/opentelemetry-api/test-requirements.txt sdk: -r {toxinidir}/opentelemetry-sdk/test-requirements.txt benchmark-opentelemetry-sdk: -r {toxinidir}/opentelemetry-sdk/benchmark-requirements.txt semantic-conventions: -r {toxinidir}/opentelemetry-semantic-conventions/test-requirements.txt test-utils: -r {toxinidir}/tests/opentelemetry-test-utils/test-requirements.txt opentelemetry-proto-gen-oldest: -r {toxinidir}/opentelemetry-proto/test-requirements.oldest.txt opentelemetry-proto-gen-latest: -r {toxinidir}/opentelemetry-proto/test-requirements.latest.txt exporter-opencensus: -r {toxinidir}/exporter/opentelemetry-exporter-opencensus/test-requirements.txt exporter-otlp-proto-common: -r {toxinidir}/exporter/opentelemetry-exporter-otlp-proto-common/test-requirements.txt exporter-otlp-combined: -r {toxinidir}/exporter/opentelemetry-exporter-otlp/test-requirements.txt opentelemetry-exporter-otlp-proto-grpc-oldest: -r {toxinidir}/exporter/opentelemetry-exporter-otlp-proto-grpc/test-requirements.oldest.txt opentelemetry-exporter-otlp-proto-grpc-latest: -r {toxinidir}/exporter/opentelemetry-exporter-otlp-proto-grpc/test-requirements.latest.txt benchmark-exporter-otlp-proto-grpc: -r {toxinidir}/exporter/opentelemetry-exporter-otlp-proto-grpc/benchmark-requirements.txt opentelemetry-exporter-otlp-proto-http: -r {toxinidir}/exporter/opentelemetry-exporter-otlp-proto-http/test-requirements.txt opentracing-shim: -r {toxinidir}/shim/opentelemetry-opentracing-shim/test-requirements.txt opencensus-shim: -r {toxinidir}/shim/opentelemetry-opencensus-shim/test-requirements.txt exporter-prometheus: -r {toxinidir}/exporter/opentelemetry-exporter-prometheus/test-requirements.txt exporter-zipkin-combined: -r {toxinidir}/exporter/opentelemetry-exporter-zipkin/test-requirements.txt exporter-zipkin-proto-http: -r {toxinidir}/exporter/opentelemetry-exporter-zipkin-proto-http/test-requirements.txt exporter-zipkin-json: -r {toxinidir}/exporter/opentelemetry-exporter-zipkin-json/test-requirements.txt propagator-b3: -r {toxinidir}/propagator/opentelemetry-propagator-b3/test-requirements.txt benchmark-opentelemetry-propagator-b3: -r {toxinidir}/propagator/opentelemetry-propagator-b3/benchmark-requirements.txt propagator-jaeger: -r {toxinidir}/propagator/opentelemetry-propagator-jaeger/test-requirements.txt getting-started: -r {toxinidir}/docs/getting_started/tests/requirements.txt getting-started: {env:CONTRIB_REPO}\#egg=opentelemetry-util-http&subdirectory=util/opentelemetry-util-http getting-started: {env:CONTRIB_REPO}\#egg=opentelemetry-instrumentation&subdirectory=opentelemetry-instrumentation getting-started: {env:CONTRIB_REPO}\#egg=opentelemetry-instrumentation-requests&subdirectory=instrumentation/opentelemetry-instrumentation-requests getting-started: {env:CONTRIB_REPO}\#egg=opentelemetry-instrumentation-wsgi&subdirectory=instrumentation/opentelemetry-instrumentation-wsgi getting-started: {env:CONTRIB_REPO}\#egg=opentelemetry-instrumentation-flask&subdirectory=instrumentation/opentelemetry-instrumentation-flask allowlist_externals = sh setenv = ; override CONTRIB_REPO_SHA via env variable when testing other branches/commits than main ; i.e: CONTRIB_REPO_SHA=dde62cebffe519c35875af6d06fae053b3be65ec tox -e CONTRIB_REPO_SHA={env:CONTRIB_REPO_SHA:main} CONTRIB_REPO=git+https://github.com/open-telemetry/opentelemetry-python-contrib.git@{env:CONTRIB_REPO_SHA} UV_CONFIG_FILE={toxinidir}/tox-uv.toml commands_pre = ; In order to get a healthy coverage report, ; we have to install packages in editable mode. coverage: python {toxinidir}/scripts/eachdist.py install --editable commands = test-opentelemetry-api: pytest {toxinidir}/opentelemetry-api/tests {posargs} lint-opentelemetry-api: pylint {toxinidir}/opentelemetry-api test-opentelemetry-sdk: pytest {toxinidir}/opentelemetry-sdk/tests {posargs} lint-opentelemetry-sdk: pylint {toxinidir}/opentelemetry-sdk benchmark-opentelemetry-sdk: pytest {toxinidir}/opentelemetry-sdk/benchmarks --benchmark-json={toxinidir}/opentelemetry-sdk/sdk-benchmark.json {posargs} test-opentelemetry-proto-gen: pytest {toxinidir}/opentelemetry-proto/tests {posargs} lint-opentelemetry-proto-gen: pylint {toxinidir}/opentelemetry-proto test-opentelemetry-semantic-conventions: pytest {toxinidir}/opentelemetry-semantic-conventions/tests {posargs} lint-opentelemetry-semantic-conventions: pylint --rcfile {toxinidir}/opentelemetry-semantic-conventions/.pylintrc {toxinidir}/opentelemetry-semantic-conventions test-opentelemetry-getting-started: pytest {toxinidir}/docs/getting_started/tests {posargs} lint-opentelemetry-getting-started: pylint {toxinidir}/docs/getting_started test-opentelemetry-opentracing-shim: pytest {toxinidir}/shim/opentelemetry-opentracing-shim/tests {posargs} lint-opentelemetry-opentracing-shim: sh -c "cd shim && pylint --rcfile ../.pylintrc {toxinidir}/shim/opentelemetry-opentracing-shim" test-opentelemetry-opencensus-shim: pytest {toxinidir}/shim/opentelemetry-opencensus-shim/tests {posargs} lint-opentelemetry-opencensus-shim: sh -c "cd shim && pylint --rcfile ../.pylintrc {toxinidir}/shim/opentelemetry-opencensus-shim" test-opentelemetry-exporter-opencensus: pytest {toxinidir}/exporter/opentelemetry-exporter-opencensus/tests {posargs} lint-opentelemetry-exporter-opencensus: sh -c "cd exporter && pylint --rcfile ../.pylintrc {toxinidir}/exporter/opentelemetry-exporter-opencensus" test-opentelemetry-exporter-otlp-proto-common: pytest {toxinidir}/exporter/opentelemetry-exporter-otlp-proto-common/tests {posargs} lint-opentelemetry-exporter-otlp-proto-common: sh -c "cd exporter && pylint --prefer-stubs yes --rcfile ../.pylintrc {toxinidir}/exporter/opentelemetry-exporter-otlp-proto-common" test-opentelemetry-exporter-otlp-combined: pytest {toxinidir}/exporter/opentelemetry-exporter-otlp/tests {posargs} lint-opentelemetry-exporter-otlp-combined: sh -c "cd exporter && pylint --rcfile ../.pylintrc {toxinidir}/exporter/opentelemetry-exporter-otlp" test-opentelemetry-exporter-otlp-proto-grpc: pytest {toxinidir}/exporter/opentelemetry-exporter-otlp-proto-grpc/tests {posargs} lint-opentelemetry-exporter-otlp-proto-grpc: sh -c "cd exporter && pylint --prefer-stubs yes --rcfile ../.pylintrc {toxinidir}/exporter/opentelemetry-exporter-otlp-proto-grpc" benchmark-opentelemetry-exporter-otlp-proto-grpc: pytest {toxinidir}/exporter/opentelemetry-exporter-otlp-proto-grpc/benchmarks --benchmark-json=exporter-otlp-proto-grpc-benchmark.json {posargs} test-opentelemetry-exporter-otlp-proto-http: pytest {toxinidir}/exporter/opentelemetry-exporter-otlp-proto-http/tests {posargs} lint-opentelemetry-exporter-otlp-proto-http: sh -c "cd exporter && pylint --prefer-stubs yes --rcfile ../.pylintrc {toxinidir}/exporter/opentelemetry-exporter-otlp-proto-http" test-opentelemetry-exporter-prometheus: pytest {toxinidir}/exporter/opentelemetry-exporter-prometheus/tests {posargs} lint-opentelemetry-exporter-prometheus: sh -c "cd exporter && pylint --rcfile ../.pylintrc {toxinidir}/exporter/opentelemetry-exporter-prometheus" test-opentelemetry-exporter-zipkin-combined: pytest {toxinidir}/exporter/opentelemetry-exporter-zipkin/tests {posargs} lint-opentelemetry-exporter-zipkin-combined: sh -c "cd exporter && pylint --rcfile ../.pylintrc {toxinidir}/exporter/opentelemetry-exporter-zipkin" test-opentelemetry-exporter-zipkin-proto-http: pytest {toxinidir}/exporter/opentelemetry-exporter-zipkin-proto-http/tests {posargs} lint-opentelemetry-exporter-zipkin-proto-http: sh -c "cd exporter && pylint --rcfile ../.pylintrc {toxinidir}/exporter/opentelemetry-exporter-zipkin-proto-http" test-opentelemetry-exporter-zipkin-json: pytest {toxinidir}/exporter/opentelemetry-exporter-zipkin-json/tests {posargs} lint-opentelemetry-exporter-zipkin-json: sh -c "cd exporter && pylint --rcfile ../.pylintrc {toxinidir}/exporter/opentelemetry-exporter-zipkin-json" test-opentelemetry-propagator-b3: pytest {toxinidir}/propagator/opentelemetry-propagator-b3/tests {posargs} lint-opentelemetry-propagator-b3: sh -c "cd propagator && pylint --rcfile ../.pylintrc {toxinidir}/propagator/opentelemetry-propagator-b3" benchmark-opentelemetry-propagator-b3: pytest {toxinidir}/propagator/opentelemetry-propagator-b3/benchmarks --benchmark-json=propagator-b3-benchmark.json {posargs} test-opentelemetry-propagator-jaeger: pytest {toxinidir}/propagator/opentelemetry-propagator-jaeger/tests {posargs} lint-opentelemetry-propagator-jaeger: sh -c "cd propagator && pylint --rcfile ../.pylintrc {toxinidir}/propagator/opentelemetry-propagator-jaeger" test-opentelemetry-test-utils: pytest {toxinidir}/tests/opentelemetry-test-utils/tests {posargs} lint-opentelemetry-test-utils: sh -c "cd tests && pylint --rcfile ../.pylintrc {toxinidir}/tests/opentelemetry-test-utils" coverage: {toxinidir}/scripts/coverage.sh [testenv:spellcheck] basepython: python3 recreate = True deps = codespell==2.2.6 commands = codespell [testenv:docs] basepython: python3 recreate = True deps = -c {toxinidir}/dev-requirements.txt -r {toxinidir}/docs-requirements.txt setenv = ; We need this workaround to allow generating docs for exporters that have different protobuf versions as requirement. ; See https://github.com/open-telemetry/opentelemetry-python/pull/4206 ; We can remove the workaround when opentelemetry-exporter-zipkin-proto-http support protobuf > 5.26 PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION=python changedir = docs commands = sphinx-build -E -a -W -b html -T . _build/html [testenv:tracecontext] deps = # needed for tracecontext aiohttp~=3.6 pytest==7.4.4 # needed for example trace integration flask~=2.3 requests~=2.7 markupsafe~=2.1 -e {toxinidir}/opentelemetry-api -e {toxinidir}/opentelemetry-semantic-conventions -e {toxinidir}/opentelemetry-sdk {env:CONTRIB_REPO}\#egg=opentelemetry-util-http&subdirectory=util/opentelemetry-util-http {env:CONTRIB_REPO}\#egg=opentelemetry-instrumentation&subdirectory=opentelemetry-instrumentation {env:CONTRIB_REPO}\#egg=opentelemetry-instrumentation-requests&subdirectory=instrumentation/opentelemetry-instrumentation-requests {env:CONTRIB_REPO}\#egg=opentelemetry-instrumentation-wsgi&subdirectory=instrumentation/opentelemetry-instrumentation-wsgi allowlist_externals = {toxinidir}/scripts/tracecontext-integration-test.sh commands = {toxinidir}/scripts/tracecontext-integration-test.sh [testenv:docker-tests-{otlpexporter,opencensus}] deps = pytest==7.1.3 # Pinning PyYAML for issue: https://github.com/yaml/pyyaml/issues/724 PyYAML==5.3.1 # Pinning docker for issue: https://github.com/docker/compose/issues/11309 docker<7 docker-compose==1.29.2 requests==2.28.2 ; core packages -e {toxinidir}/opentelemetry-api -e {toxinidir}/opentelemetry-semantic-conventions -e {toxinidir}/opentelemetry-sdk -e {toxinidir}/tests/opentelemetry-test-utils ; OTLP packages otlpexporter: -e {toxinidir}/opentelemetry-proto otlpexporter: -e {toxinidir}/exporter/opentelemetry-exporter-otlp-proto-common otlpexporter: -e {toxinidir}/exporter/opentelemetry-exporter-otlp-proto-grpc otlpexporter: -e {toxinidir}/exporter/opentelemetry-exporter-otlp-proto-http otlpexporter: -e {toxinidir}/exporter/opentelemetry-exporter-otlp opencensus: -e {toxinidir}/exporter/opentelemetry-exporter-opencensus changedir = tests/opentelemetry-docker-tests/tests commands_pre = pip freeze docker-compose up -d commands = otlpexporter: pytest otlpexporter {posargs} opencensus: pytest opencensus {posargs} commands_post = docker-compose down -v [testenv:public-symbols-check] recreate = True deps = GitPython==3.1.40 griffe==1.7.3 toml commands = ; griffe check before to fail fast if there are any issues python {toxinidir}/scripts/griffe_check.py python {toxinidir}/scripts/public_symbols_checker.py [testenv:generate-workflows] deps = tox Jinja2 commands = python {toxinidir}/.github/workflows/generate_workflows.py [testenv:shellcheck] commands_pre = sh -c "sudo apt update -y && sudo apt install --assume-yes shellcheck" commands = sh -c "find {toxinidir} -name \*.sh | xargs shellcheck --severity=warning" [testenv:typecheck] basepython: python3 deps = -c {toxinidir}/dev-requirements.txt pyright psutil -e {toxinidir}/opentelemetry-api -e {toxinidir}/opentelemetry-semantic-conventions -e {toxinidir}/opentelemetry-sdk -e {toxinidir}/tests/opentelemetry-test-utils -e {toxinidir}/exporter/opentelemetry-exporter-otlp-proto-common -e {toxinidir}/exporter/opentelemetry-exporter-otlp -e {toxinidir}/exporter/opentelemetry-exporter-otlp-proto-grpc -e {toxinidir}/exporter/opentelemetry-exporter-otlp-proto-http -e {toxinidir}/opentelemetry-proto commands = pyright --version pyright [testenv:{precommit,ruff}] basepython: python3 deps = -c {toxinidir}/dev-requirements.txt pre-commit commands = pre-commit run --color=always --all-files {posargs} python-opentelemetry-1.39.1/uv.lock000066400000000000000000003615371511654350100173440ustar00rootroot00000000000000version = 1 revision = 3 requires-python = ">=3.9" resolution-markers = [ "python_full_version >= '3.13'", "python_full_version == '3.12.*'", "python_full_version == '3.11.*'", "python_full_version == '3.10.*'", "python_full_version < '3.10'", "python_version < '0'", ] [manifest] members = [ "opentelemetry-api", "opentelemetry-exporter-otlp", "opentelemetry-exporter-otlp-proto-common", "opentelemetry-exporter-otlp-proto-grpc", "opentelemetry-exporter-otlp-proto-http", "opentelemetry-exporter-prometheus", "opentelemetry-exporter-zipkin-json", "opentelemetry-propagator-b3", "opentelemetry-propagator-jaeger", "opentelemetry-proto", "opentelemetry-python", "opentelemetry-sdk", "opentelemetry-semantic-conventions", "opentelemetry-test-utils", ] [[package]] name = "asgiref" version = "3.7.2" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "typing-extensions", marker = "python_full_version < '3.11'" }, ] sdist = { url = "https://files.pythonhosted.org/packages/12/19/64e38c1c2cbf0da9635b7082bbdf0e89052e93329279f59759c24a10cc96/asgiref-3.7.2.tar.gz", hash = "sha256:9e0ce3aa93a819ba5b45120216b23878cf6e8525eb3848653452b4192b92afed", size = 33393, upload-time = "2023-05-27T17:21:42.12Z" } wheels = [ { url = "https://files.pythonhosted.org/packages/9b/80/b9051a4a07ad231558fcd8ffc89232711b4e618c15cb7a392a17384bbeef/asgiref-3.7.2-py3-none-any.whl", hash = "sha256:89b2ef2247e3b562a16eef663bc0e2e703ec6468e2fa8a5cd61cd449786d4f6e", size = 24140, upload-time = "2023-05-27T17:21:40.454Z" }, ] [[package]] name = "cachetools" version = "6.2.1" source = { registry = "https://pypi.org/simple" } sdist = { url = "https://files.pythonhosted.org/packages/cc/7e/b975b5814bd36faf009faebe22c1072a1fa1168db34d285ef0ba071ad78c/cachetools-6.2.1.tar.gz", hash = "sha256:3f391e4bd8f8bf0931169baf7456cc822705f4e2a31f840d218f445b9a854201", size = 31325, upload-time = "2025-10-12T14:55:30.139Z" } wheels = [ { url = "https://files.pythonhosted.org/packages/96/c5/1e741d26306c42e2bf6ab740b2202872727e0f606033c9dd713f8b93f5a8/cachetools-6.2.1-py3-none-any.whl", hash = "sha256:09868944b6dde876dfd44e1d47e18484541eaf12f26f29b7af91b26cc892d701", size = 11280, upload-time = "2025-10-12T14:55:28.382Z" }, ] [[package]] name = "certifi" version = "2025.1.31" source = { registry = "https://pypi.org/simple" } sdist = { url = "https://files.pythonhosted.org/packages/1c/ab/c9f1e32b7b1bf505bf26f0ef697775960db7932abeb7b516de930ba2705f/certifi-2025.1.31.tar.gz", hash = "sha256:3d5da6925056f6f18f119200434a4780a94263f10d1c21d032a6f6b2baa20651", size = 167577, upload-time = "2025-01-31T02:16:47.166Z" } wheels = [ { url = "https://files.pythonhosted.org/packages/38/fc/bce832fd4fd99766c04d1ee0eead6b0ec6486fb100ae5e74c1d91292b982/certifi-2025.1.31-py3-none-any.whl", hash = "sha256:ca78db4565a652026a4db2bcdf68f2fb589ea80d0be70e03929ed730746b84fe", size = 166393, upload-time = "2025-01-31T02:16:45.015Z" }, ] [[package]] name = "cfgv" version = "3.4.0" source = { registry = "https://pypi.org/simple" } sdist = { url = "https://files.pythonhosted.org/packages/11/74/539e56497d9bd1d484fd863dd69cbbfa653cd2aa27abfe35653494d85e94/cfgv-3.4.0.tar.gz", hash = "sha256:e52591d4c5f5dead8e0f673fb16db7949d2cfb3f7da4582893288f0ded8fe560", size = 7114, upload-time = "2023-08-12T20:38:17.776Z" } wheels = [ { url = "https://files.pythonhosted.org/packages/c5/55/51844dd50c4fc7a33b653bfaba4c2456f06955289ca770a5dbd5fd267374/cfgv-3.4.0-py2.py3-none-any.whl", hash = "sha256:b7265b1f29fd3316bfcd2b330d63d024f2bfd8bcb8b0272f8e19a504856c48f9", size = 7249, upload-time = "2023-08-12T20:38:16.269Z" }, ] [[package]] name = "chardet" version = "5.2.0" source = { registry = "https://pypi.org/simple" } sdist = { url = "https://files.pythonhosted.org/packages/f3/0d/f7b6ab21ec75897ed80c17d79b15951a719226b9fababf1e40ea74d69079/chardet-5.2.0.tar.gz", hash = "sha256:1b3b6ff479a8c414bc3fa2c0852995695c4a026dcd6d0633b2dd092ca39c1cf7", size = 2069618, upload-time = "2023-08-01T19:23:02.662Z" } wheels = [ { url = "https://files.pythonhosted.org/packages/38/6f/f5fbc992a329ee4e0f288c1fe0e2ad9485ed064cac731ed2fe47dcc38cbf/chardet-5.2.0-py3-none-any.whl", hash = "sha256:e1cf59446890a00105fe7b7912492ea04b6e6f06d4b742b2c788469e34c82970", size = 199385, upload-time = "2023-08-01T19:23:00.661Z" }, ] [[package]] name = "charset-normalizer" version = "3.4.1" source = { registry = "https://pypi.org/simple" } sdist = { url = "https://files.pythonhosted.org/packages/16/b0/572805e227f01586461c80e0fd25d65a2115599cc9dad142fee4b747c357/charset_normalizer-3.4.1.tar.gz", hash = "sha256:44251f18cd68a75b56585dd00dae26183e102cd5e0f9f1466e6df5da2ed64ea3", size = 123188, upload-time = "2024-12-24T18:12:35.43Z" } wheels = [ { url = "https://files.pythonhosted.org/packages/0d/58/5580c1716040bc89206c77d8f74418caf82ce519aae06450393ca73475d1/charset_normalizer-3.4.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:91b36a978b5ae0ee86c394f5a54d6ef44db1de0815eb43de826d41d21e4af3de", size = 198013, upload-time = "2024-12-24T18:09:43.671Z" }, { url = "https://files.pythonhosted.org/packages/d0/11/00341177ae71c6f5159a08168bcb98c6e6d196d372c94511f9f6c9afe0c6/charset_normalizer-3.4.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7461baadb4dc00fd9e0acbe254e3d7d2112e7f92ced2adc96e54ef6501c5f176", size = 141285, upload-time = "2024-12-24T18:09:48.113Z" }, { url = "https://files.pythonhosted.org/packages/01/09/11d684ea5819e5a8f5100fb0b38cf8d02b514746607934134d31233e02c8/charset_normalizer-3.4.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e218488cd232553829be0664c2292d3af2eeeb94b32bea483cf79ac6a694e037", size = 151449, upload-time = "2024-12-24T18:09:50.845Z" }, { url = "https://files.pythonhosted.org/packages/08/06/9f5a12939db324d905dc1f70591ae7d7898d030d7662f0d426e2286f68c9/charset_normalizer-3.4.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:80ed5e856eb7f30115aaf94e4a08114ccc8813e6ed1b5efa74f9f82e8509858f", size = 143892, upload-time = "2024-12-24T18:09:52.078Z" }, { url = "https://files.pythonhosted.org/packages/93/62/5e89cdfe04584cb7f4d36003ffa2936681b03ecc0754f8e969c2becb7e24/charset_normalizer-3.4.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b010a7a4fd316c3c484d482922d13044979e78d1861f0e0650423144c616a46a", size = 146123, upload-time = "2024-12-24T18:09:54.575Z" }, { url = "https://files.pythonhosted.org/packages/a9/ac/ab729a15c516da2ab70a05f8722ecfccc3f04ed7a18e45c75bbbaa347d61/charset_normalizer-3.4.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4532bff1b8421fd0a320463030c7520f56a79c9024a4e88f01c537316019005a", size = 147943, upload-time = "2024-12-24T18:09:57.324Z" }, { url = "https://files.pythonhosted.org/packages/03/d2/3f392f23f042615689456e9a274640c1d2e5dd1d52de36ab8f7955f8f050/charset_normalizer-3.4.1-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:d973f03c0cb71c5ed99037b870f2be986c3c05e63622c017ea9816881d2dd247", size = 142063, upload-time = "2024-12-24T18:09:59.794Z" }, { url = "https://files.pythonhosted.org/packages/f2/e3/e20aae5e1039a2cd9b08d9205f52142329f887f8cf70da3650326670bddf/charset_normalizer-3.4.1-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:3a3bd0dcd373514dcec91c411ddb9632c0d7d92aed7093b8c3bbb6d69ca74408", size = 150578, upload-time = "2024-12-24T18:10:02.357Z" }, { url = "https://files.pythonhosted.org/packages/8d/af/779ad72a4da0aed925e1139d458adc486e61076d7ecdcc09e610ea8678db/charset_normalizer-3.4.1-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:d9c3cdf5390dcd29aa8056d13e8e99526cda0305acc038b96b30352aff5ff2bb", size = 153629, upload-time = "2024-12-24T18:10:03.678Z" }, { url = "https://files.pythonhosted.org/packages/c2/b6/7aa450b278e7aa92cf7732140bfd8be21f5f29d5bf334ae987c945276639/charset_normalizer-3.4.1-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:2bdfe3ac2e1bbe5b59a1a63721eb3b95fc9b6817ae4a46debbb4e11f6232428d", size = 150778, upload-time = "2024-12-24T18:10:06.197Z" }, { url = "https://files.pythonhosted.org/packages/39/f4/d9f4f712d0951dcbfd42920d3db81b00dd23b6ab520419626f4023334056/charset_normalizer-3.4.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:eab677309cdb30d047996b36d34caeda1dc91149e4fdca0b1a039b3f79d9a807", size = 146453, upload-time = "2024-12-24T18:10:08.848Z" }, { url = "https://files.pythonhosted.org/packages/49/2b/999d0314e4ee0cff3cb83e6bc9aeddd397eeed693edb4facb901eb8fbb69/charset_normalizer-3.4.1-cp310-cp310-win32.whl", hash = "sha256:c0429126cf75e16c4f0ad00ee0eae4242dc652290f940152ca8c75c3a4b6ee8f", size = 95479, upload-time = "2024-12-24T18:10:10.044Z" }, { url = "https://files.pythonhosted.org/packages/2d/ce/3cbed41cff67e455a386fb5e5dd8906cdda2ed92fbc6297921f2e4419309/charset_normalizer-3.4.1-cp310-cp310-win_amd64.whl", hash = "sha256:9f0b8b1c6d84c8034a44893aba5e767bf9c7a211e313a9605d9c617d7083829f", size = 102790, upload-time = "2024-12-24T18:10:11.323Z" }, { url = "https://files.pythonhosted.org/packages/72/80/41ef5d5a7935d2d3a773e3eaebf0a9350542f2cab4eac59a7a4741fbbbbe/charset_normalizer-3.4.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:8bfa33f4f2672964266e940dd22a195989ba31669bd84629f05fab3ef4e2d125", size = 194995, upload-time = "2024-12-24T18:10:12.838Z" }, { url = "https://files.pythonhosted.org/packages/7a/28/0b9fefa7b8b080ec492110af6d88aa3dea91c464b17d53474b6e9ba5d2c5/charset_normalizer-3.4.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:28bf57629c75e810b6ae989f03c0828d64d6b26a5e205535585f96093e405ed1", size = 139471, upload-time = "2024-12-24T18:10:14.101Z" }, { url = "https://files.pythonhosted.org/packages/71/64/d24ab1a997efb06402e3fc07317e94da358e2585165930d9d59ad45fcae2/charset_normalizer-3.4.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f08ff5e948271dc7e18a35641d2f11a4cd8dfd5634f55228b691e62b37125eb3", size = 149831, upload-time = "2024-12-24T18:10:15.512Z" }, { url = "https://files.pythonhosted.org/packages/37/ed/be39e5258e198655240db5e19e0b11379163ad7070962d6b0c87ed2c4d39/charset_normalizer-3.4.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:234ac59ea147c59ee4da87a0c0f098e9c8d169f4dc2a159ef720f1a61bbe27cd", size = 142335, upload-time = "2024-12-24T18:10:18.369Z" }, { url = "https://files.pythonhosted.org/packages/88/83/489e9504711fa05d8dde1574996408026bdbdbd938f23be67deebb5eca92/charset_normalizer-3.4.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fd4ec41f914fa74ad1b8304bbc634b3de73d2a0889bd32076342a573e0779e00", size = 143862, upload-time = "2024-12-24T18:10:19.743Z" }, { url = "https://files.pythonhosted.org/packages/c6/c7/32da20821cf387b759ad24627a9aca289d2822de929b8a41b6241767b461/charset_normalizer-3.4.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:eea6ee1db730b3483adf394ea72f808b6e18cf3cb6454b4d86e04fa8c4327a12", size = 145673, upload-time = "2024-12-24T18:10:21.139Z" }, { url = "https://files.pythonhosted.org/packages/68/85/f4288e96039abdd5aeb5c546fa20a37b50da71b5cf01e75e87f16cd43304/charset_normalizer-3.4.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:c96836c97b1238e9c9e3fe90844c947d5afbf4f4c92762679acfe19927d81d77", size = 140211, upload-time = "2024-12-24T18:10:22.382Z" }, { url = "https://files.pythonhosted.org/packages/28/a3/a42e70d03cbdabc18997baf4f0227c73591a08041c149e710045c281f97b/charset_normalizer-3.4.1-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:4d86f7aff21ee58f26dcf5ae81a9addbd914115cdebcbb2217e4f0ed8982e146", size = 148039, upload-time = "2024-12-24T18:10:24.802Z" }, { url = "https://files.pythonhosted.org/packages/85/e4/65699e8ab3014ecbe6f5c71d1a55d810fb716bbfd74f6283d5c2aa87febf/charset_normalizer-3.4.1-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:09b5e6733cbd160dcc09589227187e242a30a49ca5cefa5a7edd3f9d19ed53fd", size = 151939, upload-time = "2024-12-24T18:10:26.124Z" }, { url = "https://files.pythonhosted.org/packages/b1/82/8e9fe624cc5374193de6860aba3ea8070f584c8565ee77c168ec13274bd2/charset_normalizer-3.4.1-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:5777ee0881f9499ed0f71cc82cf873d9a0ca8af166dfa0af8ec4e675b7df48e6", size = 149075, upload-time = "2024-12-24T18:10:30.027Z" }, { url = "https://files.pythonhosted.org/packages/3d/7b/82865ba54c765560c8433f65e8acb9217cb839a9e32b42af4aa8e945870f/charset_normalizer-3.4.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:237bdbe6159cff53b4f24f397d43c6336c6b0b42affbe857970cefbb620911c8", size = 144340, upload-time = "2024-12-24T18:10:32.679Z" }, { url = "https://files.pythonhosted.org/packages/b5/b6/9674a4b7d4d99a0d2df9b215da766ee682718f88055751e1e5e753c82db0/charset_normalizer-3.4.1-cp311-cp311-win32.whl", hash = "sha256:8417cb1f36cc0bc7eaba8ccb0e04d55f0ee52df06df3ad55259b9a323555fc8b", size = 95205, upload-time = "2024-12-24T18:10:34.724Z" }, { url = "https://files.pythonhosted.org/packages/1e/ab/45b180e175de4402dcf7547e4fb617283bae54ce35c27930a6f35b6bef15/charset_normalizer-3.4.1-cp311-cp311-win_amd64.whl", hash = "sha256:d7f50a1f8c450f3925cb367d011448c39239bb3eb4117c36a6d354794de4ce76", size = 102441, upload-time = "2024-12-24T18:10:37.574Z" }, { url = "https://files.pythonhosted.org/packages/0a/9a/dd1e1cdceb841925b7798369a09279bd1cf183cef0f9ddf15a3a6502ee45/charset_normalizer-3.4.1-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:73d94b58ec7fecbc7366247d3b0b10a21681004153238750bb67bd9012414545", size = 196105, upload-time = "2024-12-24T18:10:38.83Z" }, { url = "https://files.pythonhosted.org/packages/d3/8c/90bfabf8c4809ecb648f39794cf2a84ff2e7d2a6cf159fe68d9a26160467/charset_normalizer-3.4.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dad3e487649f498dd991eeb901125411559b22e8d7ab25d3aeb1af367df5efd7", size = 140404, upload-time = "2024-12-24T18:10:44.272Z" }, { url = "https://files.pythonhosted.org/packages/ad/8f/e410d57c721945ea3b4f1a04b74f70ce8fa800d393d72899f0a40526401f/charset_normalizer-3.4.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c30197aa96e8eed02200a83fba2657b4c3acd0f0aa4bdc9f6c1af8e8962e0757", size = 150423, upload-time = "2024-12-24T18:10:45.492Z" }, { url = "https://files.pythonhosted.org/packages/f0/b8/e6825e25deb691ff98cf5c9072ee0605dc2acfca98af70c2d1b1bc75190d/charset_normalizer-3.4.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2369eea1ee4a7610a860d88f268eb39b95cb588acd7235e02fd5a5601773d4fa", size = 143184, upload-time = "2024-12-24T18:10:47.898Z" }, { url = "https://files.pythonhosted.org/packages/3e/a2/513f6cbe752421f16d969e32f3583762bfd583848b763913ddab8d9bfd4f/charset_normalizer-3.4.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bc2722592d8998c870fa4e290c2eec2c1569b87fe58618e67d38b4665dfa680d", size = 145268, upload-time = "2024-12-24T18:10:50.589Z" }, { url = "https://files.pythonhosted.org/packages/74/94/8a5277664f27c3c438546f3eb53b33f5b19568eb7424736bdc440a88a31f/charset_normalizer-3.4.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ffc9202a29ab3920fa812879e95a9e78b2465fd10be7fcbd042899695d75e616", size = 147601, upload-time = "2024-12-24T18:10:52.541Z" }, { url = "https://files.pythonhosted.org/packages/7c/5f/6d352c51ee763623a98e31194823518e09bfa48be2a7e8383cf691bbb3d0/charset_normalizer-3.4.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:804a4d582ba6e5b747c625bf1255e6b1507465494a40a2130978bda7b932c90b", size = 141098, upload-time = "2024-12-24T18:10:53.789Z" }, { url = "https://files.pythonhosted.org/packages/78/d4/f5704cb629ba5ab16d1d3d741396aec6dc3ca2b67757c45b0599bb010478/charset_normalizer-3.4.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:0f55e69f030f7163dffe9fd0752b32f070566451afe180f99dbeeb81f511ad8d", size = 149520, upload-time = "2024-12-24T18:10:55.048Z" }, { url = "https://files.pythonhosted.org/packages/c5/96/64120b1d02b81785f222b976c0fb79a35875457fa9bb40827678e54d1bc8/charset_normalizer-3.4.1-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:c4c3e6da02df6fa1410a7680bd3f63d4f710232d3139089536310d027950696a", size = 152852, upload-time = "2024-12-24T18:10:57.647Z" }, { url = "https://files.pythonhosted.org/packages/84/c9/98e3732278a99f47d487fd3468bc60b882920cef29d1fa6ca460a1fdf4e6/charset_normalizer-3.4.1-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:5df196eb874dae23dcfb968c83d4f8fdccb333330fe1fc278ac5ceeb101003a9", size = 150488, upload-time = "2024-12-24T18:10:59.43Z" }, { url = "https://files.pythonhosted.org/packages/13/0e/9c8d4cb99c98c1007cc11eda969ebfe837bbbd0acdb4736d228ccaabcd22/charset_normalizer-3.4.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:e358e64305fe12299a08e08978f51fc21fac060dcfcddd95453eabe5b93ed0e1", size = 146192, upload-time = "2024-12-24T18:11:00.676Z" }, { url = "https://files.pythonhosted.org/packages/b2/21/2b6b5b860781a0b49427309cb8670785aa543fb2178de875b87b9cc97746/charset_normalizer-3.4.1-cp312-cp312-win32.whl", hash = "sha256:9b23ca7ef998bc739bf6ffc077c2116917eabcc901f88da1b9856b210ef63f35", size = 95550, upload-time = "2024-12-24T18:11:01.952Z" }, { url = "https://files.pythonhosted.org/packages/21/5b/1b390b03b1d16c7e382b561c5329f83cc06623916aab983e8ab9239c7d5c/charset_normalizer-3.4.1-cp312-cp312-win_amd64.whl", hash = "sha256:6ff8a4a60c227ad87030d76e99cd1698345d4491638dfa6673027c48b3cd395f", size = 102785, upload-time = "2024-12-24T18:11:03.142Z" }, { url = "https://files.pythonhosted.org/packages/38/94/ce8e6f63d18049672c76d07d119304e1e2d7c6098f0841b51c666e9f44a0/charset_normalizer-3.4.1-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:aabfa34badd18f1da5ec1bc2715cadc8dca465868a4e73a0173466b688f29dda", size = 195698, upload-time = "2024-12-24T18:11:05.834Z" }, { url = "https://files.pythonhosted.org/packages/24/2e/dfdd9770664aae179a96561cc6952ff08f9a8cd09a908f259a9dfa063568/charset_normalizer-3.4.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:22e14b5d70560b8dd51ec22863f370d1e595ac3d024cb8ad7d308b4cd95f8313", size = 140162, upload-time = "2024-12-24T18:11:07.064Z" }, { url = "https://files.pythonhosted.org/packages/24/4e/f646b9093cff8fc86f2d60af2de4dc17c759de9d554f130b140ea4738ca6/charset_normalizer-3.4.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8436c508b408b82d87dc5f62496973a1805cd46727c34440b0d29d8a2f50a6c9", size = 150263, upload-time = "2024-12-24T18:11:08.374Z" }, { url = "https://files.pythonhosted.org/packages/5e/67/2937f8d548c3ef6e2f9aab0f6e21001056f692d43282b165e7c56023e6dd/charset_normalizer-3.4.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2d074908e1aecee37a7635990b2c6d504cd4766c7bc9fc86d63f9c09af3fa11b", size = 142966, upload-time = "2024-12-24T18:11:09.831Z" }, { url = "https://files.pythonhosted.org/packages/52/ed/b7f4f07de100bdb95c1756d3a4d17b90c1a3c53715c1a476f8738058e0fa/charset_normalizer-3.4.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:955f8851919303c92343d2f66165294848d57e9bba6cf6e3625485a70a038d11", size = 144992, upload-time = "2024-12-24T18:11:12.03Z" }, { url = "https://files.pythonhosted.org/packages/96/2c/d49710a6dbcd3776265f4c923bb73ebe83933dfbaa841c5da850fe0fd20b/charset_normalizer-3.4.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:44ecbf16649486d4aebafeaa7ec4c9fed8b88101f4dd612dcaf65d5e815f837f", size = 147162, upload-time = "2024-12-24T18:11:13.372Z" }, { url = "https://files.pythonhosted.org/packages/b4/41/35ff1f9a6bd380303dea55e44c4933b4cc3c4850988927d4082ada230273/charset_normalizer-3.4.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:0924e81d3d5e70f8126529951dac65c1010cdf117bb75eb02dd12339b57749dd", size = 140972, upload-time = "2024-12-24T18:11:14.628Z" }, { url = "https://files.pythonhosted.org/packages/fb/43/c6a0b685fe6910d08ba971f62cd9c3e862a85770395ba5d9cad4fede33ab/charset_normalizer-3.4.1-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:2967f74ad52c3b98de4c3b32e1a44e32975e008a9cd2a8cc8966d6a5218c5cb2", size = 149095, upload-time = "2024-12-24T18:11:17.672Z" }, { url = "https://files.pythonhosted.org/packages/4c/ff/a9a504662452e2d2878512115638966e75633519ec11f25fca3d2049a94a/charset_normalizer-3.4.1-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:c75cb2a3e389853835e84a2d8fb2b81a10645b503eca9bcb98df6b5a43eb8886", size = 152668, upload-time = "2024-12-24T18:11:18.989Z" }, { url = "https://files.pythonhosted.org/packages/6c/71/189996b6d9a4b932564701628af5cee6716733e9165af1d5e1b285c530ed/charset_normalizer-3.4.1-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:09b26ae6b1abf0d27570633b2b078a2a20419c99d66fb2823173d73f188ce601", size = 150073, upload-time = "2024-12-24T18:11:21.507Z" }, { url = "https://files.pythonhosted.org/packages/e4/93/946a86ce20790e11312c87c75ba68d5f6ad2208cfb52b2d6a2c32840d922/charset_normalizer-3.4.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:fa88b843d6e211393a37219e6a1c1df99d35e8fd90446f1118f4216e307e48cd", size = 145732, upload-time = "2024-12-24T18:11:22.774Z" }, { url = "https://files.pythonhosted.org/packages/cd/e5/131d2fb1b0dddafc37be4f3a2fa79aa4c037368be9423061dccadfd90091/charset_normalizer-3.4.1-cp313-cp313-win32.whl", hash = "sha256:eb8178fe3dba6450a3e024e95ac49ed3400e506fd4e9e5c32d30adda88cbd407", size = 95391, upload-time = "2024-12-24T18:11:24.139Z" }, { url = "https://files.pythonhosted.org/packages/27/f2/4f9a69cc7712b9b5ad8fdb87039fd89abba997ad5cbe690d1835d40405b0/charset_normalizer-3.4.1-cp313-cp313-win_amd64.whl", hash = "sha256:b1ac5992a838106edb89654e0aebfc24f5848ae2547d22c2c3f66454daa11971", size = 102702, upload-time = "2024-12-24T18:11:26.535Z" }, { url = "https://files.pythonhosted.org/packages/7f/c0/b913f8f02836ed9ab32ea643c6fe4d3325c3d8627cf6e78098671cafff86/charset_normalizer-3.4.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:b97e690a2118911e39b4042088092771b4ae3fc3aa86518f84b8cf6888dbdb41", size = 197867, upload-time = "2024-12-24T18:12:10.438Z" }, { url = "https://files.pythonhosted.org/packages/0f/6c/2bee440303d705b6fb1e2ec789543edec83d32d258299b16eed28aad48e0/charset_normalizer-3.4.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:78baa6d91634dfb69ec52a463534bc0df05dbd546209b79a3880a34487f4b84f", size = 141385, upload-time = "2024-12-24T18:12:11.847Z" }, { url = "https://files.pythonhosted.org/packages/3d/04/cb42585f07f6f9fd3219ffb6f37d5a39b4fd2db2355b23683060029c35f7/charset_normalizer-3.4.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1a2bc9f351a75ef49d664206d51f8e5ede9da246602dc2d2726837620ea034b2", size = 151367, upload-time = "2024-12-24T18:12:13.177Z" }, { url = "https://files.pythonhosted.org/packages/54/54/2412a5b093acb17f0222de007cc129ec0e0df198b5ad2ce5699355269dfe/charset_normalizer-3.4.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:75832c08354f595c760a804588b9357d34ec00ba1c940c15e31e96d902093770", size = 143928, upload-time = "2024-12-24T18:12:14.497Z" }, { url = "https://files.pythonhosted.org/packages/5a/6d/e2773862b043dcf8a221342954f375392bb2ce6487bcd9f2c1b34e1d6781/charset_normalizer-3.4.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0af291f4fe114be0280cdd29d533696a77b5b49cfde5467176ecab32353395c4", size = 146203, upload-time = "2024-12-24T18:12:15.731Z" }, { url = "https://files.pythonhosted.org/packages/b9/f8/ca440ef60d8f8916022859885f231abb07ada3c347c03d63f283bec32ef5/charset_normalizer-3.4.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0167ddc8ab6508fe81860a57dd472b2ef4060e8d378f0cc555707126830f2537", size = 148082, upload-time = "2024-12-24T18:12:18.641Z" }, { url = "https://files.pythonhosted.org/packages/04/d2/42fd330901aaa4b805a1097856c2edf5095e260a597f65def493f4b8c833/charset_normalizer-3.4.1-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:2a75d49014d118e4198bcee5ee0a6f25856b29b12dbf7cd012791f8a6cc5c496", size = 142053, upload-time = "2024-12-24T18:12:20.036Z" }, { url = "https://files.pythonhosted.org/packages/9e/af/3a97a4fa3c53586f1910dadfc916e9c4f35eeada36de4108f5096cb7215f/charset_normalizer-3.4.1-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:363e2f92b0f0174b2f8238240a1a30142e3db7b957a5dd5689b0e75fb717cc78", size = 150625, upload-time = "2024-12-24T18:12:22.804Z" }, { url = "https://files.pythonhosted.org/packages/26/ae/23d6041322a3556e4da139663d02fb1b3c59a23ab2e2b56432bd2ad63ded/charset_normalizer-3.4.1-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:ab36c8eb7e454e34e60eb55ca5d241a5d18b2c6244f6827a30e451c42410b5f7", size = 153549, upload-time = "2024-12-24T18:12:24.163Z" }, { url = "https://files.pythonhosted.org/packages/94/22/b8f2081c6a77cb20d97e57e0b385b481887aa08019d2459dc2858ed64871/charset_normalizer-3.4.1-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:4c0907b1928a36d5a998d72d64d8eaa7244989f7aaaf947500d3a800c83a3fd6", size = 150945, upload-time = "2024-12-24T18:12:25.415Z" }, { url = "https://files.pythonhosted.org/packages/c7/0b/c5ec5092747f801b8b093cdf5610e732b809d6cb11f4c51e35fc28d1d389/charset_normalizer-3.4.1-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:04432ad9479fa40ec0f387795ddad4437a2b50417c69fa275e212933519ff294", size = 146595, upload-time = "2024-12-24T18:12:28.03Z" }, { url = "https://files.pythonhosted.org/packages/0c/5a/0b59704c38470df6768aa154cc87b1ac7c9bb687990a1559dc8765e8627e/charset_normalizer-3.4.1-cp39-cp39-win32.whl", hash = "sha256:3bed14e9c89dcb10e8f3a29f9ccac4955aebe93c71ae803af79265c9ca5644c5", size = 95453, upload-time = "2024-12-24T18:12:29.569Z" }, { url = "https://files.pythonhosted.org/packages/85/2d/a9790237cb4d01a6d57afadc8573c8b73c609ade20b80f4cda30802009ee/charset_normalizer-3.4.1-cp39-cp39-win_amd64.whl", hash = "sha256:49402233c892a461407c512a19435d1ce275543138294f7ef013f0b63d5d3765", size = 102811, upload-time = "2024-12-24T18:12:30.83Z" }, { url = "https://files.pythonhosted.org/packages/0e/f6/65ecc6878a89bb1c23a086ea335ad4bf21a588990c3f535a227b9eea9108/charset_normalizer-3.4.1-py3-none-any.whl", hash = "sha256:d98b1668f06378c6dbefec3b92299716b931cd4e6061f3c875a71ced1780ab85", size = 49767, upload-time = "2024-12-24T18:12:32.852Z" }, ] [[package]] name = "colorama" version = "0.4.6" source = { registry = "https://pypi.org/simple" } sdist = { url = "https://files.pythonhosted.org/packages/d8/53/6f443c9a4a8358a93a6792e2acffb9d9d5cb0a5cfd8802644b7b1c9a02e4/colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44", size = 27697, upload-time = "2022-10-25T02:36:22.414Z" } wheels = [ { url = "https://files.pythonhosted.org/packages/d1/d6/3965ed04c63042e047cb6a3e6ed1a63a35087b6a609aa3a15ed8ac56c221/colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6", size = 25335, upload-time = "2022-10-25T02:36:20.889Z" }, ] [[package]] name = "distlib" version = "0.4.0" source = { registry = "https://pypi.org/simple" } sdist = { url = "https://files.pythonhosted.org/packages/96/8e/709914eb2b5749865801041647dc7f4e6d00b549cfe88b65ca192995f07c/distlib-0.4.0.tar.gz", hash = "sha256:feec40075be03a04501a973d81f633735b4b69f98b05450592310c0f401a4e0d", size = 614605, upload-time = "2025-07-17T16:52:00.465Z" } wheels = [ { url = "https://files.pythonhosted.org/packages/33/6b/e0547afaf41bf2c42e52430072fa5658766e3d65bd4b03a563d1b6336f57/distlib-0.4.0-py2.py3-none-any.whl", hash = "sha256:9659f7d87e46584a30b5780e43ac7a2143098441670ff0a49d5f9034c54a6c16", size = 469047, upload-time = "2025-07-17T16:51:58.613Z" }, ] [[package]] name = "filelock" version = "3.19.1" source = { registry = "https://pypi.org/simple" } resolution-markers = [ "python_full_version < '3.10'", ] sdist = { url = "https://files.pythonhosted.org/packages/40/bb/0ab3e58d22305b6f5440629d20683af28959bf793d98d11950e305c1c326/filelock-3.19.1.tar.gz", hash = "sha256:66eda1888b0171c998b35be2bcc0f6d75c388a7ce20c3f3f37aa8e96c2dddf58", size = 17687, upload-time = "2025-08-14T16:56:03.016Z" } wheels = [ { url = "https://files.pythonhosted.org/packages/42/14/42b2651a2f46b022ccd948bca9f2d5af0fd8929c4eec235b8d6d844fbe67/filelock-3.19.1-py3-none-any.whl", hash = "sha256:d38e30481def20772f5baf097c122c3babc4fcdb7e14e57049eb9d88c6dc017d", size = 15988, upload-time = "2025-08-14T16:56:01.633Z" }, ] [[package]] name = "filelock" version = "3.20.0" source = { registry = "https://pypi.org/simple" } resolution-markers = [ "python_full_version >= '3.13'", "python_full_version == '3.12.*'", "python_full_version == '3.11.*'", "python_full_version == '3.10.*'", ] sdist = { url = "https://files.pythonhosted.org/packages/58/46/0028a82567109b5ef6e4d2a1f04a583fb513e6cf9527fcdd09afd817deeb/filelock-3.20.0.tar.gz", hash = "sha256:711e943b4ec6be42e1d4e6690b48dc175c822967466bb31c0c293f34334c13f4", size = 18922, upload-time = "2025-10-08T18:03:50.056Z" } wheels = [ { url = "https://files.pythonhosted.org/packages/76/91/7216b27286936c16f5b4d0c530087e4a54eead683e6b0b73dd0c64844af6/filelock-3.20.0-py3-none-any.whl", hash = "sha256:339b4732ffda5cd79b13f4e2711a31b0365ce445d95d243bb996273d072546a2", size = 16054, upload-time = "2025-10-08T18:03:48.35Z" }, ] [[package]] name = "google-auth" version = "2.42.1" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "cachetools" }, { name = "pyasn1-modules" }, { name = "rsa" }, ] sdist = { url = "https://files.pythonhosted.org/packages/25/6b/22a77135757c3a7854c9f008ffed6bf4e8851616d77faf13147e9ab5aae6/google_auth-2.42.1.tar.gz", hash = "sha256:30178b7a21aa50bffbdc1ffcb34ff770a2f65c712170ecd5446c4bef4dc2b94e", size = 295541 } wheels = [ { url = "https://files.pythonhosted.org/packages/92/05/adeb6c495aec4f9d93f9e2fc29eeef6e14d452bba11d15bdb874ce1d5b10/google_auth-2.42.1-py2.py3-none-any.whl", hash = "sha256:eb73d71c91fc95dbd221a2eb87477c278a355e7367a35c0d84e6b0e5f9b4ad11", size = 222550 }, ] [[package]] name = "googleapis-common-protos" version = "1.68.0" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "protobuf" }, ] sdist = { url = "https://files.pythonhosted.org/packages/54/d2/c08f0d9f94b45faca68e355771329cba2411c777c8713924dd1baee0e09c/googleapis_common_protos-1.68.0.tar.gz", hash = "sha256:95d38161f4f9af0d9423eed8fb7b64ffd2568c3464eb542ff02c5bfa1953ab3c", size = 57367, upload-time = "2025-02-20T19:08:28.426Z" } wheels = [ { url = "https://files.pythonhosted.org/packages/3f/85/c99a157ee99d67cc6c9ad123abb8b1bfb476fab32d2f3511c59314548e4f/googleapis_common_protos-1.68.0-py2.py3-none-any.whl", hash = "sha256:aaf179b2f81df26dfadac95def3b16a95064c76a5f45f07e4c68a21bb371c4ac", size = 164985, upload-time = "2025-02-20T19:08:26.964Z" }, ] [[package]] name = "grpcio" version = "1.70.0" source = { registry = "https://pypi.org/simple" } sdist = { url = "https://files.pythonhosted.org/packages/69/e1/4b21b5017c33f3600dcc32b802bb48fe44a4d36d6c066f52650c7c2690fa/grpcio-1.70.0.tar.gz", hash = "sha256:8d1584a68d5922330025881e63a6c1b54cc8117291d382e4fa69339b6d914c56", size = 12788932, upload-time = "2025-01-23T18:00:17.288Z" } wheels = [ { url = "https://files.pythonhosted.org/packages/10/e9/f72408bac1f7b05b25e4df569b02d6b200c8e7857193aa9f1df7a3744add/grpcio-1.70.0-cp310-cp310-linux_armv7l.whl", hash = "sha256:95469d1977429f45fe7df441f586521361e235982a0b39e33841549143ae2851", size = 5229736, upload-time = "2025-01-23T17:52:55.697Z" }, { url = "https://files.pythonhosted.org/packages/b3/17/e65139ea76dac7bcd8a3f17cbd37e3d1a070c44db3098d0be5e14c5bd6a1/grpcio-1.70.0-cp310-cp310-macosx_12_0_universal2.whl", hash = "sha256:ed9718f17fbdb472e33b869c77a16d0b55e166b100ec57b016dc7de9c8d236bf", size = 11432751, upload-time = "2025-01-23T17:52:58.338Z" }, { url = "https://files.pythonhosted.org/packages/a0/12/42de6082b4ab14a59d30b2fc7786882fdaa75813a4a4f3d4a8c4acd6ed59/grpcio-1.70.0-cp310-cp310-manylinux_2_17_aarch64.whl", hash = "sha256:374d014f29f9dfdb40510b041792e0e2828a1389281eb590df066e1cc2b404e5", size = 5711439, upload-time = "2025-01-23T17:53:21.438Z" }, { url = "https://files.pythonhosted.org/packages/34/f8/b5a19524d273cbd119274a387bb72d6fbb74578e13927a473bc34369f079/grpcio-1.70.0-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f2af68a6f5c8f78d56c145161544ad0febbd7479524a59c16b3e25053f39c87f", size = 6330777, upload-time = "2025-01-23T17:53:23.655Z" }, { url = "https://files.pythonhosted.org/packages/1a/67/3d6c0ad786238aac7fa93b79246fc452978fbfe9e5f86f70da8e8a2797d0/grpcio-1.70.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ce7df14b2dcd1102a2ec32f621cc9fab6695effef516efbc6b063ad749867295", size = 5944639, upload-time = "2025-01-23T17:53:26.699Z" }, { url = "https://files.pythonhosted.org/packages/76/0d/d9f7cbc41c2743cf18236a29b6a582f41bd65572a7144d92b80bc1e68479/grpcio-1.70.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:c78b339869f4dbf89881e0b6fbf376313e4f845a42840a7bdf42ee6caed4b11f", size = 6643543, upload-time = "2025-01-23T17:53:30.758Z" }, { url = "https://files.pythonhosted.org/packages/fc/24/bdd7e606b3400c14330e33a4698fa3a49e38a28c9e0a831441adbd3380d2/grpcio-1.70.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:58ad9ba575b39edef71f4798fdb5c7b6d02ad36d47949cd381d4392a5c9cbcd3", size = 6199897, upload-time = "2025-01-23T17:53:34.656Z" }, { url = "https://files.pythonhosted.org/packages/d1/33/8132eb370087960c82d01b89faeb28f3e58f5619ffe19889f57c58a19c18/grpcio-1.70.0-cp310-cp310-win32.whl", hash = "sha256:2b0d02e4b25a5c1f9b6c7745d4fa06efc9fd6a611af0fb38d3ba956786b95199", size = 3617513, upload-time = "2025-01-23T17:53:37.323Z" }, { url = "https://files.pythonhosted.org/packages/99/bc/0fce5cfc0ca969df66f5dca6cf8d2258abb88146bf9ab89d8cf48e970137/grpcio-1.70.0-cp310-cp310-win_amd64.whl", hash = "sha256:0de706c0a5bb9d841e353f6343a9defc9fc35ec61d6eb6111802f3aa9fef29e1", size = 4303342, upload-time = "2025-01-23T17:53:41.719Z" }, { url = "https://files.pythonhosted.org/packages/65/c4/1f67d23d6bcadd2fd61fb460e5969c52b3390b4a4e254b5e04a6d1009e5e/grpcio-1.70.0-cp311-cp311-linux_armv7l.whl", hash = "sha256:17325b0be0c068f35770f944124e8839ea3185d6d54862800fc28cc2ffad205a", size = 5229017, upload-time = "2025-01-23T17:53:44.732Z" }, { url = "https://files.pythonhosted.org/packages/e4/bd/cc36811c582d663a740fb45edf9f99ddbd99a10b6ba38267dc925e1e193a/grpcio-1.70.0-cp311-cp311-macosx_10_14_universal2.whl", hash = "sha256:dbe41ad140df911e796d4463168e33ef80a24f5d21ef4d1e310553fcd2c4a386", size = 11472027, upload-time = "2025-01-23T17:53:50.417Z" }, { url = "https://files.pythonhosted.org/packages/7e/32/8538bb2ace5cd72da7126d1c9804bf80b4fe3be70e53e2d55675c24961a8/grpcio-1.70.0-cp311-cp311-manylinux_2_17_aarch64.whl", hash = "sha256:5ea67c72101d687d44d9c56068328da39c9ccba634cabb336075fae2eab0d04b", size = 5707785, upload-time = "2025-01-23T17:53:54.511Z" }, { url = "https://files.pythonhosted.org/packages/ce/5c/a45f85f2a0dfe4a6429dee98717e0e8bd7bd3f604315493c39d9679ca065/grpcio-1.70.0-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:cb5277db254ab7586769e490b7b22f4ddab3876c490da0a1a9d7c695ccf0bf77", size = 6331599, upload-time = "2025-01-23T17:53:58.156Z" }, { url = "https://files.pythonhosted.org/packages/9f/e5/5316b239380b8b2ad30373eb5bb25d9fd36c0375e94a98a0a60ea357d254/grpcio-1.70.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e7831a0fc1beeeb7759f737f5acd9fdcda520e955049512d68fda03d91186eea", size = 5940834, upload-time = "2025-01-23T17:54:00.404Z" }, { url = "https://files.pythonhosted.org/packages/05/33/dbf035bc6d167068b4a9f2929dfe0b03fb763f0f861ecb3bb1709a14cb65/grpcio-1.70.0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:27cc75e22c5dba1fbaf5a66c778e36ca9b8ce850bf58a9db887754593080d839", size = 6641191, upload-time = "2025-01-23T17:54:02.916Z" }, { url = "https://files.pythonhosted.org/packages/4c/c4/684d877517e5bfd6232d79107e5a1151b835e9f99051faef51fed3359ec4/grpcio-1.70.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:d63764963412e22f0491d0d32833d71087288f4e24cbcddbae82476bfa1d81fd", size = 6198744, upload-time = "2025-01-23T17:54:06.842Z" }, { url = "https://files.pythonhosted.org/packages/e9/43/92fe5eeaf340650a7020cfb037402c7b9209e7a0f3011ea1626402219034/grpcio-1.70.0-cp311-cp311-win32.whl", hash = "sha256:bb491125103c800ec209d84c9b51f1c60ea456038e4734688004f377cfacc113", size = 3617111, upload-time = "2025-01-23T17:54:10.329Z" }, { url = "https://files.pythonhosted.org/packages/55/15/b6cf2c9515c028aff9da6984761a3ab484a472b0dc6435fcd07ced42127d/grpcio-1.70.0-cp311-cp311-win_amd64.whl", hash = "sha256:d24035d49e026353eb042bf7b058fb831db3e06d52bee75c5f2f3ab453e71aca", size = 4304604, upload-time = "2025-01-23T17:54:12.844Z" }, { url = "https://files.pythonhosted.org/packages/4c/a4/ddbda79dd176211b518f0f3795af78b38727a31ad32bc149d6a7b910a731/grpcio-1.70.0-cp312-cp312-linux_armv7l.whl", hash = "sha256:ef4c14508299b1406c32bdbb9fb7b47612ab979b04cf2b27686ea31882387cff", size = 5198135, upload-time = "2025-01-23T17:54:16.026Z" }, { url = "https://files.pythonhosted.org/packages/30/5c/60eb8a063ea4cb8d7670af8fac3f2033230fc4b75f62669d67c66ac4e4b0/grpcio-1.70.0-cp312-cp312-macosx_10_14_universal2.whl", hash = "sha256:aa47688a65643afd8b166928a1da6247d3f46a2784d301e48ca1cc394d2ffb40", size = 11447529, upload-time = "2025-01-23T17:54:18.568Z" }, { url = "https://files.pythonhosted.org/packages/fb/b9/1bf8ab66729f13b44e8f42c9de56417d3ee6ab2929591cfee78dce749b57/grpcio-1.70.0-cp312-cp312-manylinux_2_17_aarch64.whl", hash = "sha256:880bfb43b1bb8905701b926274eafce5c70a105bc6b99e25f62e98ad59cb278e", size = 5664484, upload-time = "2025-01-23T17:54:22.169Z" }, { url = "https://files.pythonhosted.org/packages/d1/06/2f377d6906289bee066d96e9bdb91e5e96d605d173df9bb9856095cccb57/grpcio-1.70.0-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9e654c4b17d07eab259d392e12b149c3a134ec52b11ecdc6a515b39aceeec898", size = 6303739, upload-time = "2025-01-23T17:54:25.612Z" }, { url = "https://files.pythonhosted.org/packages/ae/50/64c94cfc4db8d9ed07da71427a936b5a2bd2b27c66269b42fbda82c7c7a4/grpcio-1.70.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2394e3381071045a706ee2eeb6e08962dd87e8999b90ac15c55f56fa5a8c9597", size = 5910417, upload-time = "2025-01-23T17:54:28.336Z" }, { url = "https://files.pythonhosted.org/packages/53/89/8795dfc3db4389c15554eb1765e14cba8b4c88cc80ff828d02f5572965af/grpcio-1.70.0-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:b3c76701428d2df01964bc6479422f20e62fcbc0a37d82ebd58050b86926ef8c", size = 6626797, upload-time = "2025-01-23T17:54:31.372Z" }, { url = "https://files.pythonhosted.org/packages/9c/b2/6a97ac91042a2c59d18244c479ee3894e7fb6f8c3a90619bb5a7757fa30c/grpcio-1.70.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:ac073fe1c4cd856ebcf49e9ed6240f4f84d7a4e6ee95baa5d66ea05d3dd0df7f", size = 6190055, upload-time = "2025-01-23T17:54:34.254Z" }, { url = "https://files.pythonhosted.org/packages/86/2b/28db55c8c4d156053a8c6f4683e559cd0a6636f55a860f87afba1ac49a51/grpcio-1.70.0-cp312-cp312-win32.whl", hash = "sha256:cd24d2d9d380fbbee7a5ac86afe9787813f285e684b0271599f95a51bce33528", size = 3600214, upload-time = "2025-01-23T17:54:36.631Z" }, { url = "https://files.pythonhosted.org/packages/17/c3/a7a225645a965029ed432e5b5e9ed959a574e62100afab553eef58be0e37/grpcio-1.70.0-cp312-cp312-win_amd64.whl", hash = "sha256:0495c86a55a04a874c7627fd33e5beaee771917d92c0e6d9d797628ac40e7655", size = 4292538, upload-time = "2025-01-23T17:54:38.845Z" }, { url = "https://files.pythonhosted.org/packages/68/38/66d0f32f88feaf7d83f8559cd87d899c970f91b1b8a8819b58226de0a496/grpcio-1.70.0-cp313-cp313-linux_armv7l.whl", hash = "sha256:aa573896aeb7d7ce10b1fa425ba263e8dddd83d71530d1322fd3a16f31257b4a", size = 5199218, upload-time = "2025-01-23T17:54:40.964Z" }, { url = "https://files.pythonhosted.org/packages/c1/96/947df763a0b18efb5cc6c2ae348e56d97ca520dc5300c01617b234410173/grpcio-1.70.0-cp313-cp313-macosx_10_14_universal2.whl", hash = "sha256:d405b005018fd516c9ac529f4b4122342f60ec1cee181788249372524e6db429", size = 11445983, upload-time = "2025-01-23T17:54:43.527Z" }, { url = "https://files.pythonhosted.org/packages/fd/5b/f3d4b063e51b2454bedb828e41f3485800889a3609c49e60f2296cc8b8e5/grpcio-1.70.0-cp313-cp313-manylinux_2_17_aarch64.whl", hash = "sha256:f32090238b720eb585248654db8e3afc87b48d26ac423c8dde8334a232ff53c9", size = 5663954, upload-time = "2025-01-23T17:54:47.532Z" }, { url = "https://files.pythonhosted.org/packages/bd/0b/dab54365fcedf63e9f358c1431885478e77d6f190d65668936b12dd38057/grpcio-1.70.0-cp313-cp313-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:dfa089a734f24ee5f6880c83d043e4f46bf812fcea5181dcb3a572db1e79e01c", size = 6304323, upload-time = "2025-01-23T17:54:50.036Z" }, { url = "https://files.pythonhosted.org/packages/76/a8/8f965a7171ddd336ce32946e22954aa1bbc6f23f095e15dadaa70604ba20/grpcio-1.70.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f19375f0300b96c0117aca118d400e76fede6db6e91f3c34b7b035822e06c35f", size = 5910939, upload-time = "2025-01-23T17:54:52.455Z" }, { url = "https://files.pythonhosted.org/packages/1b/05/0bbf68be8b17d1ed6f178435a3c0c12e665a1e6054470a64ce3cb7896596/grpcio-1.70.0-cp313-cp313-musllinux_1_1_i686.whl", hash = "sha256:7c73c42102e4a5ec76608d9b60227d917cea46dff4d11d372f64cbeb56d259d0", size = 6631405, upload-time = "2025-01-23T17:54:55.808Z" }, { url = "https://files.pythonhosted.org/packages/79/6a/5df64b6df405a1ed1482cb6c10044b06ec47fd28e87c2232dbcf435ecb33/grpcio-1.70.0-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:0a5c78d5198a1f0aa60006cd6eb1c912b4a1520b6a3968e677dbcba215fabb40", size = 6190982, upload-time = "2025-01-23T17:54:58.405Z" }, { url = "https://files.pythonhosted.org/packages/42/aa/aeaac87737e6d25d1048c53b8ec408c056d3ed0c922e7c5efad65384250c/grpcio-1.70.0-cp313-cp313-win32.whl", hash = "sha256:fe9dbd916df3b60e865258a8c72ac98f3ac9e2a9542dcb72b7a34d236242a5ce", size = 3598359, upload-time = "2025-01-23T17:55:00.671Z" }, { url = "https://files.pythonhosted.org/packages/1f/79/8edd2442d2de1431b4a3de84ef91c37002f12de0f9b577fb07b452989dbc/grpcio-1.70.0-cp313-cp313-win_amd64.whl", hash = "sha256:4119fed8abb7ff6c32e3d2255301e59c316c22d31ab812b3fbcbaf3d0d87cc68", size = 4293938, upload-time = "2025-01-23T17:55:02.821Z" }, { url = "https://files.pythonhosted.org/packages/9d/0e/64061c9746a2dd6e07cb0a0f3829f0a431344add77ec36397cc452541ff6/grpcio-1.70.0-cp39-cp39-linux_armv7l.whl", hash = "sha256:4f1937f47c77392ccd555728f564a49128b6a197a05a5cd527b796d36f3387d0", size = 5231123, upload-time = "2025-01-23T17:55:34.09Z" }, { url = "https://files.pythonhosted.org/packages/72/9f/c93501d5f361aecee0146ab19300d5acb1c2747b00217c641f06fffbcd62/grpcio-1.70.0-cp39-cp39-macosx_10_14_universal2.whl", hash = "sha256:0cd430b9215a15c10b0e7d78f51e8a39d6cf2ea819fd635a7214fae600b1da27", size = 11467217, upload-time = "2025-01-23T17:55:37.042Z" }, { url = "https://files.pythonhosted.org/packages/0a/1a/980d115b701023450a304881bf3f6309f6fb15787f9b78d2728074f3bf86/grpcio-1.70.0-cp39-cp39-manylinux_2_17_aarch64.whl", hash = "sha256:e27585831aa6b57b9250abaf147003e126cd3a6c6ca0c531a01996f31709bed1", size = 5710913, upload-time = "2025-01-23T17:55:40.998Z" }, { url = "https://files.pythonhosted.org/packages/a0/84/af420067029808f9790e98143b3dd0f943bebba434a4706755051a520c91/grpcio-1.70.0-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c1af8e15b0f0fe0eac75195992a63df17579553b0c4af9f8362cc7cc99ccddf4", size = 6330947, upload-time = "2025-01-23T17:55:43.538Z" }, { url = "https://files.pythonhosted.org/packages/24/1c/e1f06a7d29a1fa5053dcaf5352a50f8e1f04855fd194a65422a9d685d375/grpcio-1.70.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cbce24409beaee911c574a3d75d12ffb8c3e3dd1b813321b1d7a96bbcac46bf4", size = 5943913, upload-time = "2025-01-23T17:55:45.936Z" }, { url = "https://files.pythonhosted.org/packages/41/8f/de13838e4467519a50cd0693e98b0b2bcc81d656013c38a1dd7dcb801526/grpcio-1.70.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:ff4a8112a79464919bb21c18e956c54add43ec9a4850e3949da54f61c241a4a6", size = 6643236, upload-time = "2025-01-23T17:55:48.9Z" }, { url = "https://files.pythonhosted.org/packages/ac/73/d68c745d34e43a80440da4f3d79fa02c56cb118c2a26ba949f3cfd8316d7/grpcio-1.70.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:5413549fdf0b14046c545e19cfc4eb1e37e9e1ebba0ca390a8d4e9963cab44d2", size = 6199038, upload-time = "2025-01-23T17:55:58.854Z" }, { url = "https://files.pythonhosted.org/packages/7e/dd/991f100b8c31636b4bb2a941dbbf54dbcc55d69c722cfa038c3d017eaa0c/grpcio-1.70.0-cp39-cp39-win32.whl", hash = "sha256:b745d2c41b27650095e81dea7091668c040457483c9bdb5d0d9de8f8eb25e59f", size = 3617512, upload-time = "2025-01-23T17:56:01.326Z" }, { url = "https://files.pythonhosted.org/packages/4d/80/1aa2ba791207a13e314067209b48e1a0893ed8d1f43ef012e194aaa6c2de/grpcio-1.70.0-cp39-cp39-win_amd64.whl", hash = "sha256:a31d7e3b529c94e930a117b2175b2efd179d96eb3c7a21ccb0289a8ab05b645c", size = 4303506, upload-time = "2025-01-23T17:56:03.842Z" }, ] [[package]] name = "identify" version = "2.6.15" source = { registry = "https://pypi.org/simple" } sdist = { url = "https://files.pythonhosted.org/packages/ff/e7/685de97986c916a6d93b3876139e00eef26ad5bbbd61925d670ae8013449/identify-2.6.15.tar.gz", hash = "sha256:e4f4864b96c6557ef2a1e1c951771838f4edc9df3a72ec7118b338801b11c7bf", size = 99311, upload-time = "2025-10-02T17:43:40.631Z" } wheels = [ { url = "https://files.pythonhosted.org/packages/0f/1c/e5fd8f973d4f375adb21565739498e2e9a1e54c858a97b9a8ccfdc81da9b/identify-2.6.15-py2.py3-none-any.whl", hash = "sha256:1181ef7608e00704db228516541eb83a88a9f94433a8c80bb9b5bd54b1d81757", size = 99183, upload-time = "2025-10-02T17:43:39.137Z" }, ] [[package]] name = "idna" version = "3.10" source = { registry = "https://pypi.org/simple" } sdist = { url = "https://files.pythonhosted.org/packages/f1/70/7703c29685631f5a7590aa73f1f1d3fa9a380e654b86af429e0934a32f7d/idna-3.10.tar.gz", hash = "sha256:12f65c9b470abda6dc35cf8e63cc574b1c52b11df2c86030af0ac09b01b13ea9", size = 190490, upload-time = "2024-09-15T18:07:39.745Z" } wheels = [ { url = "https://files.pythonhosted.org/packages/76/c6/c88e154df9c4e1a2a66ccf0005a88dfb2650c1dffb6f5ce603dfbd452ce3/idna-3.10-py3-none-any.whl", hash = "sha256:946d195a0d259cbba61165e88e65941f16e9b36ea6ddb97f00452bae8b1287d3", size = 70442, upload-time = "2024-09-15T18:07:37.964Z" }, ] [[package]] name = "importlib-metadata" version = "8.6.1" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "zipp" }, ] sdist = { url = "https://files.pythonhosted.org/packages/33/08/c1395a292bb23fd03bdf572a1357c5a733d3eecbab877641ceacab23db6e/importlib_metadata-8.6.1.tar.gz", hash = "sha256:310b41d755445d74569f993ccfc22838295d9fe005425094fad953d7f15c8580", size = 55767, upload-time = "2025-01-20T22:21:30.429Z" } wheels = [ { url = "https://files.pythonhosted.org/packages/79/9d/0fb148dc4d6fa4a7dd1d8378168d9b4cd8d4560a6fbf6f0121c5fc34eb68/importlib_metadata-8.6.1-py3-none-any.whl", hash = "sha256:02a89390c1e15fdfdc0d7c6b25cb3e62650d0494005c97d6f148bf5b9787525e", size = 26971, upload-time = "2025-01-20T22:21:29.177Z" }, ] [[package]] name = "nodeenv" version = "1.9.1" source = { registry = "https://pypi.org/simple" } sdist = { url = "https://files.pythonhosted.org/packages/43/16/fc88b08840de0e0a72a2f9d8c6bae36be573e475a6326ae854bcc549fc45/nodeenv-1.9.1.tar.gz", hash = "sha256:6ec12890a2dab7946721edbfbcd91f3319c6ccc9aec47be7c7e6b7011ee6645f", size = 47437, upload-time = "2024-06-04T18:44:11.171Z" } wheels = [ { url = "https://files.pythonhosted.org/packages/d2/1d/1b658dbd2b9fa9c4c9f32accbfc0205d532c8c6194dc0f2a4c0428e7128a/nodeenv-1.9.1-py2.py3-none-any.whl", hash = "sha256:ba11c9782d29c27c70ffbdda2d7415098754709be8a7056d79a737cd901155c9", size = 22314, upload-time = "2024-06-04T18:44:08.352Z" }, ] [[package]] name = "opentelemetry-api" source = { editable = "opentelemetry-api" } dependencies = [ { name = "importlib-metadata" }, { name = "typing-extensions" }, ] [package.metadata] requires-dist = [ { name = "importlib-metadata", specifier = ">=6.0,<8.8.0" }, { name = "typing-extensions", specifier = ">=4.5.0" }, ] [[package]] name = "opentelemetry-exporter-credential-provider-gcp" version = "0.59b0" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "google-auth" }, { name = "grpcio" }, { name = "requests" }, ] sdist = { url = "https://files.pythonhosted.org/packages/87/2c/e8ebd03eccaba7b1cf8aa83625f2d8a8db8842a6acdfbd73950190842957/opentelemetry_exporter_credential_provider_gcp-0.59b0.tar.gz", hash = "sha256:b951f71f7ab0f3bf97556ca3c6520bc9826e6be3a15f826a924b786b80563fa8", size = 7161 } wheels = [ { url = "https://files.pythonhosted.org/packages/30/30/44bc046033ead9affc529036ed03a94b40553cf98044216d893c6f149321/opentelemetry_exporter_credential_provider_gcp-0.59b0-py3-none-any.whl", hash = "sha256:034d46511e82d25c75d98fc04c61f78d6d05d06e587377850f51fbf0d9bb7ab1", size = 8340 }, ] [[package]] name = "opentelemetry-exporter-otlp" source = { editable = "exporter/opentelemetry-exporter-otlp" } dependencies = [ { name = "opentelemetry-exporter-otlp-proto-grpc" }, { name = "opentelemetry-exporter-otlp-proto-http" }, ] [package.metadata] requires-dist = [ { name = "opentelemetry-exporter-otlp-proto-grpc", editable = "exporter/opentelemetry-exporter-otlp-proto-grpc" }, { name = "opentelemetry-exporter-otlp-proto-http", editable = "exporter/opentelemetry-exporter-otlp-proto-http" }, ] [[package]] name = "opentelemetry-exporter-otlp-proto-common" source = { editable = "exporter/opentelemetry-exporter-otlp-proto-common" } dependencies = [ { name = "opentelemetry-proto" }, ] [package.metadata] requires-dist = [{ name = "opentelemetry-proto", editable = "opentelemetry-proto" }] [[package]] name = "opentelemetry-exporter-otlp-proto-grpc" source = { editable = "exporter/opentelemetry-exporter-otlp-proto-grpc" } dependencies = [ { name = "googleapis-common-protos" }, { name = "grpcio" }, { name = "opentelemetry-api" }, { name = "opentelemetry-exporter-otlp-proto-common" }, { name = "opentelemetry-proto" }, { name = "opentelemetry-sdk" }, { name = "typing-extensions" }, ] [package.optional-dependencies] gcp-auth = [ { name = "opentelemetry-exporter-credential-provider-gcp" }, ] [package.metadata] requires-dist = [ { name = "googleapis-common-protos", specifier = "~=1.57" }, { name = "grpcio", marker = "python_full_version < '3.13'", specifier = ">=1.63.2,<2.0.0" }, { name = "grpcio", marker = "python_full_version >= '3.13'", specifier = ">=1.66.2,<2.0.0" }, { name = "opentelemetry-api", editable = "opentelemetry-api" }, { name = "opentelemetry-exporter-credential-provider-gcp", marker = "extra == 'gcp-auth'", specifier = ">=0.59b0" }, { name = "opentelemetry-exporter-otlp-proto-common", editable = "exporter/opentelemetry-exporter-otlp-proto-common" }, { name = "opentelemetry-proto", editable = "opentelemetry-proto" }, { name = "opentelemetry-sdk", editable = "opentelemetry-sdk" }, { name = "typing-extensions", specifier = ">=4.6.0" }, ] provides-extras = ["gcp-auth"] [[package]] name = "opentelemetry-exporter-otlp-proto-http" source = { editable = "exporter/opentelemetry-exporter-otlp-proto-http" } dependencies = [ { name = "googleapis-common-protos" }, { name = "opentelemetry-api" }, { name = "opentelemetry-exporter-otlp-proto-common" }, { name = "opentelemetry-proto" }, { name = "opentelemetry-sdk" }, { name = "requests" }, { name = "typing-extensions" }, ] [package.optional-dependencies] gcp-auth = [ { name = "opentelemetry-exporter-credential-provider-gcp" }, ] [package.metadata] requires-dist = [ { name = "googleapis-common-protos", specifier = "~=1.52" }, { name = "opentelemetry-api", editable = "opentelemetry-api" }, { name = "opentelemetry-exporter-credential-provider-gcp", marker = "extra == 'gcp-auth'", specifier = ">=0.59b0" }, { name = "opentelemetry-exporter-otlp-proto-common", editable = "exporter/opentelemetry-exporter-otlp-proto-common" }, { name = "opentelemetry-proto", editable = "opentelemetry-proto" }, { name = "opentelemetry-sdk", editable = "opentelemetry-sdk" }, { name = "requests", specifier = "~=2.7" }, { name = "typing-extensions", specifier = ">=4.5.0" }, ] provides-extras = ["gcp-auth"] [[package]] name = "opentelemetry-exporter-prometheus" source = { editable = "exporter/opentelemetry-exporter-prometheus" } dependencies = [ { name = "opentelemetry-api" }, { name = "opentelemetry-sdk" }, { name = "prometheus-client" }, ] [package.metadata] requires-dist = [ { name = "opentelemetry-api", editable = "opentelemetry-api" }, { name = "opentelemetry-sdk", editable = "opentelemetry-sdk" }, { name = "prometheus-client", specifier = ">=0.5.0,<1.0.0" }, ] [[package]] name = "opentelemetry-exporter-zipkin-json" source = { editable = "exporter/opentelemetry-exporter-zipkin-json" } dependencies = [ { name = "opentelemetry-api" }, { name = "opentelemetry-sdk" }, { name = "requests" }, ] [package.metadata] requires-dist = [ { name = "opentelemetry-api", editable = "opentelemetry-api" }, { name = "opentelemetry-sdk", editable = "opentelemetry-sdk" }, { name = "requests", specifier = "~=2.7" }, ] [[package]] name = "opentelemetry-propagator-b3" source = { editable = "propagator/opentelemetry-propagator-b3" } dependencies = [ { name = "opentelemetry-api" }, { name = "typing-extensions" }, ] [package.metadata] requires-dist = [ { name = "opentelemetry-api", editable = "opentelemetry-api" }, { name = "typing-extensions", specifier = ">=4.5.0" }, ] [[package]] name = "opentelemetry-propagator-jaeger" source = { editable = "propagator/opentelemetry-propagator-jaeger" } dependencies = [ { name = "opentelemetry-api" }, ] [package.metadata] requires-dist = [{ name = "opentelemetry-api", editable = "opentelemetry-api" }] [[package]] name = "opentelemetry-proto" source = { editable = "opentelemetry-proto" } dependencies = [ { name = "protobuf" }, ] [package.metadata] requires-dist = [{ name = "protobuf", specifier = ">=5.0,<7.0" }] [[package]] name = "opentelemetry-python" version = "0.0.0" source = { virtual = "." } dependencies = [ { name = "opentelemetry-api" }, { name = "opentelemetry-exporter-otlp-proto-common" }, { name = "opentelemetry-exporter-otlp-proto-grpc" }, { name = "opentelemetry-exporter-otlp-proto-http" }, { name = "opentelemetry-exporter-prometheus" }, { name = "opentelemetry-exporter-zipkin-json" }, { name = "opentelemetry-propagator-b3" }, { name = "opentelemetry-propagator-jaeger" }, { name = "opentelemetry-proto" }, { name = "opentelemetry-sdk" }, { name = "opentelemetry-semantic-conventions" }, { name = "opentelemetry-test-utils" }, ] [package.dev-dependencies] dev = [ { name = "pre-commit" }, { name = "tox", version = "4.27.0", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.11'" }, { name = "tox", version = "4.32.0", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.11'" }, { name = "tox-uv", version = "1.28.0", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.11'" }, { name = "tox-uv", version = "1.29.0", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.11'" }, ] [package.metadata] requires-dist = [ { name = "opentelemetry-api", editable = "opentelemetry-api" }, { name = "opentelemetry-exporter-otlp-proto-common", editable = "exporter/opentelemetry-exporter-otlp-proto-common" }, { name = "opentelemetry-exporter-otlp-proto-grpc", editable = "exporter/opentelemetry-exporter-otlp-proto-grpc" }, { name = "opentelemetry-exporter-otlp-proto-http", editable = "exporter/opentelemetry-exporter-otlp-proto-http" }, { name = "opentelemetry-exporter-prometheus", editable = "exporter/opentelemetry-exporter-prometheus" }, { name = "opentelemetry-exporter-zipkin-json", editable = "exporter/opentelemetry-exporter-zipkin-json" }, { name = "opentelemetry-propagator-b3", editable = "propagator/opentelemetry-propagator-b3" }, { name = "opentelemetry-propagator-jaeger", editable = "propagator/opentelemetry-propagator-jaeger" }, { name = "opentelemetry-proto", editable = "opentelemetry-proto" }, { name = "opentelemetry-sdk", editable = "opentelemetry-sdk" }, { name = "opentelemetry-semantic-conventions", editable = "opentelemetry-semantic-conventions" }, { name = "opentelemetry-test-utils", editable = "tests/opentelemetry-test-utils" }, ] [package.metadata.requires-dev] dev = [ { name = "pre-commit" }, { name = "tox" }, { name = "tox-uv", specifier = ">=1" }, ] [[package]] name = "opentelemetry-sdk" source = { editable = "opentelemetry-sdk" } dependencies = [ { name = "opentelemetry-api" }, { name = "opentelemetry-semantic-conventions" }, { name = "typing-extensions" }, ] [package.metadata] requires-dist = [ { name = "opentelemetry-api", editable = "opentelemetry-api" }, { name = "opentelemetry-semantic-conventions", editable = "opentelemetry-semantic-conventions" }, { name = "typing-extensions", specifier = ">=4.5.0" }, ] [[package]] name = "opentelemetry-semantic-conventions" source = { editable = "opentelemetry-semantic-conventions" } dependencies = [ { name = "opentelemetry-api" }, { name = "typing-extensions" }, ] [package.metadata] requires-dist = [ { name = "opentelemetry-api", editable = "opentelemetry-api" }, { name = "typing-extensions", specifier = ">=4.5.0" }, ] [[package]] name = "opentelemetry-test-utils" source = { editable = "tests/opentelemetry-test-utils" } dependencies = [ { name = "asgiref" }, { name = "opentelemetry-api" }, { name = "opentelemetry-sdk" }, ] [package.metadata] requires-dist = [ { name = "asgiref", specifier = "~=3.0" }, { name = "opentelemetry-api", editable = "opentelemetry-api" }, { name = "opentelemetry-sdk", editable = "opentelemetry-sdk" }, ] [[package]] name = "packaging" version = "25.0" source = { registry = "https://pypi.org/simple" } sdist = { url = "https://files.pythonhosted.org/packages/a1/d4/1fc4078c65507b51b96ca8f8c3ba19e6a61c8253c72794544580a7b6c24d/packaging-25.0.tar.gz", hash = "sha256:d443872c98d677bf60f6a1f2f8c1cb748e8fe762d2bf9d3148b5599295b0fc4f", size = 165727, upload-time = "2025-04-19T11:48:59.673Z" } wheels = [ { url = "https://files.pythonhosted.org/packages/20/12/38679034af332785aac8774540895e234f4d07f7545804097de4b666afd8/packaging-25.0-py3-none-any.whl", hash = "sha256:29572ef2b1f17581046b3a2227d5c611fb25ec70ca1ba8554b24b0e69331a484", size = 66469, upload-time = "2025-04-19T11:48:57.875Z" }, ] [[package]] name = "platformdirs" version = "4.4.0" source = { registry = "https://pypi.org/simple" } resolution-markers = [ "python_full_version < '3.10'", ] sdist = { url = "https://files.pythonhosted.org/packages/23/e8/21db9c9987b0e728855bd57bff6984f67952bea55d6f75e055c46b5383e8/platformdirs-4.4.0.tar.gz", hash = "sha256:ca753cf4d81dc309bc67b0ea38fd15dc97bc30ce419a7f58d13eb3bf14c4febf", size = 21634, upload-time = "2025-08-26T14:32:04.268Z" } wheels = [ { url = "https://files.pythonhosted.org/packages/40/4b/2028861e724d3bd36227adfa20d3fd24c3fc6d52032f4a93c133be5d17ce/platformdirs-4.4.0-py3-none-any.whl", hash = "sha256:abd01743f24e5287cd7a5db3752faf1a2d65353f38ec26d98e25a6db65958c85", size = 18654, upload-time = "2025-08-26T14:32:02.735Z" }, ] [[package]] name = "platformdirs" version = "4.5.0" source = { registry = "https://pypi.org/simple" } resolution-markers = [ "python_full_version >= '3.13'", "python_full_version == '3.12.*'", "python_full_version == '3.11.*'", "python_full_version == '3.10.*'", ] sdist = { url = "https://files.pythonhosted.org/packages/61/33/9611380c2bdb1225fdef633e2a9610622310fed35ab11dac9620972ee088/platformdirs-4.5.0.tar.gz", hash = "sha256:70ddccdd7c99fc5942e9fc25636a8b34d04c24b335100223152c2803e4063312", size = 21632, upload-time = "2025-10-08T17:44:48.791Z" } wheels = [ { url = "https://files.pythonhosted.org/packages/73/cb/ac7874b3e5d58441674fb70742e6c374b28b0c7cb988d37d991cde47166c/platformdirs-4.5.0-py3-none-any.whl", hash = "sha256:e578a81bb873cbb89a41fcc904c7ef523cc18284b7e3b3ccf06aca1403b7ebd3", size = 18651, upload-time = "2025-10-08T17:44:47.223Z" }, ] [[package]] name = "pluggy" version = "1.6.0" source = { registry = "https://pypi.org/simple" } sdist = { url = "https://files.pythonhosted.org/packages/f9/e2/3e91f31a7d2b083fe6ef3fa267035b518369d9511ffab804f839851d2779/pluggy-1.6.0.tar.gz", hash = "sha256:7dcc130b76258d33b90f61b658791dede3486c3e6bfb003ee5c9bfb396dd22f3", size = 69412, upload-time = "2025-05-15T12:30:07.975Z" } wheels = [ { url = "https://files.pythonhosted.org/packages/54/20/4d324d65cc6d9205fabedc306948156824eb9f0ee1633355a8f7ec5c66bf/pluggy-1.6.0-py3-none-any.whl", hash = "sha256:e920276dd6813095e9377c0bc5566d94c932c33b27a3e3945d8389c374dd4746", size = 20538, upload-time = "2025-05-15T12:30:06.134Z" }, ] [[package]] name = "pre-commit" version = "4.3.0" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "cfgv" }, { name = "identify" }, { name = "nodeenv" }, { name = "pyyaml" }, { name = "virtualenv", version = "20.33.1", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.11'" }, { name = "virtualenv", version = "20.35.4", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.11'" }, ] sdist = { url = "https://files.pythonhosted.org/packages/ff/29/7cf5bbc236333876e4b41f56e06857a87937ce4bf91e117a6991a2dbb02a/pre_commit-4.3.0.tar.gz", hash = "sha256:499fe450cc9d42e9d58e606262795ecb64dd05438943c62b66f6a8673da30b16", size = 193792, upload-time = "2025-08-09T18:56:14.651Z" } wheels = [ { url = "https://files.pythonhosted.org/packages/5b/a5/987a405322d78a73b66e39e4a90e4ef156fd7141bf71df987e50717c321b/pre_commit-4.3.0-py2.py3-none-any.whl", hash = "sha256:2b0747ad7e6e967169136edffee14c16e148a778a54e4f967921aa1ebf2308d8", size = 220965, upload-time = "2025-08-09T18:56:13.192Z" }, ] [[package]] name = "prometheus-client" version = "0.21.1" source = { registry = "https://pypi.org/simple" } sdist = { url = "https://files.pythonhosted.org/packages/62/14/7d0f567991f3a9af8d1cd4f619040c93b68f09a02b6d0b6ab1b2d1ded5fe/prometheus_client-0.21.1.tar.gz", hash = "sha256:252505a722ac04b0456be05c05f75f45d760c2911ffc45f2a06bcaed9f3ae3fb", size = 78551, upload-time = "2024-12-03T14:59:12.164Z" } wheels = [ { url = "https://files.pythonhosted.org/packages/ff/c2/ab7d37426c179ceb9aeb109a85cda8948bb269b7561a0be870cc656eefe4/prometheus_client-0.21.1-py3-none-any.whl", hash = "sha256:594b45c410d6f4f8888940fe80b5cc2521b305a1fafe1c58609ef715a001f301", size = 54682, upload-time = "2024-12-03T14:59:10.935Z" }, ] [[package]] name = "protobuf" version = "5.29.3" source = { registry = "https://pypi.org/simple" } sdist = { url = "https://files.pythonhosted.org/packages/f7/d1/e0a911544ca9993e0f17ce6d3cc0932752356c1b0a834397f28e63479344/protobuf-5.29.3.tar.gz", hash = "sha256:5da0f41edaf117bde316404bad1a486cb4ededf8e4a54891296f648e8e076620", size = 424945, upload-time = "2025-01-08T21:38:51.572Z" } wheels = [ { url = "https://files.pythonhosted.org/packages/dc/7a/1e38f3cafa022f477ca0f57a1f49962f21ad25850c3ca0acd3b9d0091518/protobuf-5.29.3-cp310-abi3-win32.whl", hash = "sha256:3ea51771449e1035f26069c4c7fd51fba990d07bc55ba80701c78f886bf9c888", size = 422708, upload-time = "2025-01-08T21:38:31.799Z" }, { url = "https://files.pythonhosted.org/packages/61/fa/aae8e10512b83de633f2646506a6d835b151edf4b30d18d73afd01447253/protobuf-5.29.3-cp310-abi3-win_amd64.whl", hash = "sha256:a4fa6f80816a9a0678429e84973f2f98cbc218cca434abe8db2ad0bffc98503a", size = 434508, upload-time = "2025-01-08T21:38:35.489Z" }, { url = "https://files.pythonhosted.org/packages/dd/04/3eaedc2ba17a088961d0e3bd396eac764450f431621b58a04ce898acd126/protobuf-5.29.3-cp38-abi3-macosx_10_9_universal2.whl", hash = "sha256:a8434404bbf139aa9e1300dbf989667a83d42ddda9153d8ab76e0d5dcaca484e", size = 417825, upload-time = "2025-01-08T21:38:36.642Z" }, { url = "https://files.pythonhosted.org/packages/4f/06/7c467744d23c3979ce250397e26d8ad8eeb2bea7b18ca12ad58313c1b8d5/protobuf-5.29.3-cp38-abi3-manylinux2014_aarch64.whl", hash = "sha256:daaf63f70f25e8689c072cfad4334ca0ac1d1e05a92fc15c54eb9cf23c3efd84", size = 319573, upload-time = "2025-01-08T21:38:37.896Z" }, { url = "https://files.pythonhosted.org/packages/a8/45/2ebbde52ad2be18d3675b6bee50e68cd73c9e0654de77d595540b5129df8/protobuf-5.29.3-cp38-abi3-manylinux2014_x86_64.whl", hash = "sha256:c027e08a08be10b67c06bf2370b99c811c466398c357e615ca88c91c07f0910f", size = 319672, upload-time = "2025-01-08T21:38:40.204Z" }, { url = "https://files.pythonhosted.org/packages/85/a6/bf65a38f8be5ab8c3b575822acfd338702fdf7ac9abd8c81630cc7c9f4bd/protobuf-5.29.3-cp39-cp39-win32.whl", hash = "sha256:0eb32bfa5219fc8d4111803e9a690658aa2e6366384fd0851064b963b6d1f2a7", size = 422676, upload-time = "2025-01-08T21:38:46.611Z" }, { url = "https://files.pythonhosted.org/packages/ac/e2/48d46adc86369ff092eaece3e537f76b3baaab45ca3dde257838cde831d2/protobuf-5.29.3-cp39-cp39-win_amd64.whl", hash = "sha256:6ce8cc3389a20693bfde6c6562e03474c40851b44975c9b2bf6df7d8c4f864da", size = 434593, upload-time = "2025-01-08T21:38:49.108Z" }, { url = "https://files.pythonhosted.org/packages/fd/b2/ab07b09e0f6d143dfb839693aa05765257bceaa13d03bf1a696b78323e7a/protobuf-5.29.3-py3-none-any.whl", hash = "sha256:0a18ed4a24198528f2333802eb075e59dea9d679ab7a6c5efb017a59004d849f", size = 172550, upload-time = "2025-01-08T21:38:50.439Z" }, ] [[package]] name = "pyproject-api" version = "1.9.1" source = { registry = "https://pypi.org/simple" } resolution-markers = [ "python_full_version < '3.10'", ] dependencies = [ { name = "packaging", marker = "python_full_version < '3.10'" }, { name = "tomli", marker = "python_full_version < '3.10'" }, ] sdist = { url = "https://files.pythonhosted.org/packages/19/fd/437901c891f58a7b9096511750247535e891d2d5a5a6eefbc9386a2b41d5/pyproject_api-1.9.1.tar.gz", hash = "sha256:43c9918f49daab37e302038fc1aed54a8c7a91a9fa935d00b9a485f37e0f5335", size = 22710, upload-time = "2025-05-12T14:41:58.025Z" } wheels = [ { url = "https://files.pythonhosted.org/packages/ef/e6/c293c06695d4a3ab0260ef124a74ebadba5f4c511ce3a4259e976902c00b/pyproject_api-1.9.1-py3-none-any.whl", hash = "sha256:7d6238d92f8962773dd75b5f0c4a6a27cce092a14b623b811dba656f3b628948", size = 13158, upload-time = "2025-05-12T14:41:56.217Z" }, ] [[package]] name = "pyproject-api" version = "1.10.0" source = { registry = "https://pypi.org/simple" } resolution-markers = [ "python_full_version >= '3.13'", "python_full_version == '3.12.*'", "python_full_version == '3.11.*'", "python_full_version == '3.10.*'", ] dependencies = [ { name = "packaging", marker = "python_full_version >= '3.10'" }, { name = "tomli", marker = "python_full_version == '3.10.*'" }, ] sdist = { url = "https://files.pythonhosted.org/packages/45/7b/c0e1333b61d41c69e59e5366e727b18c4992688caf0de1be10b3e5265f6b/pyproject_api-1.10.0.tar.gz", hash = "sha256:40c6f2d82eebdc4afee61c773ed208c04c19db4c4a60d97f8d7be3ebc0bbb330", size = 22785, upload-time = "2025-10-09T19:12:27.21Z" } wheels = [ { url = "https://files.pythonhosted.org/packages/54/cc/cecf97be298bee2b2a37dd360618c819a2a7fd95251d8e480c1f0eb88f3b/pyproject_api-1.10.0-py3-none-any.whl", hash = "sha256:8757c41a79c0f4ab71b99abed52b97ecf66bd20b04fa59da43b5840bac105a09", size = 13218, upload-time = "2025-10-09T19:12:24.428Z" }, ] [[package]] name = "pyyaml" version = "6.0.3" source = { registry = "https://pypi.org/simple" } sdist = { url = "https://files.pythonhosted.org/packages/05/8e/961c0007c59b8dd7729d542c61a4d537767a59645b82a0b521206e1e25c2/pyyaml-6.0.3.tar.gz", hash = "sha256:d76623373421df22fb4cf8817020cbb7ef15c725b9d5e45f17e189bfc384190f", size = 130960, upload-time = "2025-09-25T21:33:16.546Z" } wheels = [ { url = "https://files.pythonhosted.org/packages/f4/a0/39350dd17dd6d6c6507025c0e53aef67a9293a6d37d3511f23ea510d5800/pyyaml-6.0.3-cp310-cp310-macosx_10_13_x86_64.whl", hash = "sha256:214ed4befebe12df36bcc8bc2b64b396ca31be9304b8f59e25c11cf94a4c033b", size = 184227, upload-time = "2025-09-25T21:31:46.04Z" }, { url = "https://files.pythonhosted.org/packages/05/14/52d505b5c59ce73244f59c7a50ecf47093ce4765f116cdb98286a71eeca2/pyyaml-6.0.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:02ea2dfa234451bbb8772601d7b8e426c2bfa197136796224e50e35a78777956", size = 174019, upload-time = "2025-09-25T21:31:47.706Z" }, { url = "https://files.pythonhosted.org/packages/43/f7/0e6a5ae5599c838c696adb4e6330a59f463265bfa1e116cfd1fbb0abaaae/pyyaml-6.0.3-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:b30236e45cf30d2b8e7b3e85881719e98507abed1011bf463a8fa23e9c3e98a8", size = 740646, upload-time = "2025-09-25T21:31:49.21Z" }, { url = "https://files.pythonhosted.org/packages/2f/3a/61b9db1d28f00f8fd0ae760459a5c4bf1b941baf714e207b6eb0657d2578/pyyaml-6.0.3-cp310-cp310-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:66291b10affd76d76f54fad28e22e51719ef9ba22b29e1d7d03d6777a9174198", size = 840793, upload-time = "2025-09-25T21:31:50.735Z" }, { url = "https://files.pythonhosted.org/packages/7a/1e/7acc4f0e74c4b3d9531e24739e0ab832a5edf40e64fbae1a9c01941cabd7/pyyaml-6.0.3-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:9c7708761fccb9397fe64bbc0395abcae8c4bf7b0eac081e12b809bf47700d0b", size = 770293, upload-time = "2025-09-25T21:31:51.828Z" }, { url = "https://files.pythonhosted.org/packages/8b/ef/abd085f06853af0cd59fa5f913d61a8eab65d7639ff2a658d18a25d6a89d/pyyaml-6.0.3-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:418cf3f2111bc80e0933b2cd8cd04f286338bb88bdc7bc8e6dd775ebde60b5e0", size = 732872, upload-time = "2025-09-25T21:31:53.282Z" }, { url = "https://files.pythonhosted.org/packages/1f/15/2bc9c8faf6450a8b3c9fc5448ed869c599c0a74ba2669772b1f3a0040180/pyyaml-6.0.3-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:5e0b74767e5f8c593e8c9b5912019159ed0533c70051e9cce3e8b6aa699fcd69", size = 758828, upload-time = "2025-09-25T21:31:54.807Z" }, { url = "https://files.pythonhosted.org/packages/a3/00/531e92e88c00f4333ce359e50c19b8d1de9fe8d581b1534e35ccfbc5f393/pyyaml-6.0.3-cp310-cp310-win32.whl", hash = "sha256:28c8d926f98f432f88adc23edf2e6d4921ac26fb084b028c733d01868d19007e", size = 142415, upload-time = "2025-09-25T21:31:55.885Z" }, { url = "https://files.pythonhosted.org/packages/2a/fa/926c003379b19fca39dd4634818b00dec6c62d87faf628d1394e137354d4/pyyaml-6.0.3-cp310-cp310-win_amd64.whl", hash = "sha256:bdb2c67c6c1390b63c6ff89f210c8fd09d9a1217a465701eac7316313c915e4c", size = 158561, upload-time = "2025-09-25T21:31:57.406Z" }, { url = "https://files.pythonhosted.org/packages/6d/16/a95b6757765b7b031c9374925bb718d55e0a9ba8a1b6a12d25962ea44347/pyyaml-6.0.3-cp311-cp311-macosx_10_13_x86_64.whl", hash = "sha256:44edc647873928551a01e7a563d7452ccdebee747728c1080d881d68af7b997e", size = 185826, upload-time = "2025-09-25T21:31:58.655Z" }, { url = "https://files.pythonhosted.org/packages/16/19/13de8e4377ed53079ee996e1ab0a9c33ec2faf808a4647b7b4c0d46dd239/pyyaml-6.0.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:652cb6edd41e718550aad172851962662ff2681490a8a711af6a4d288dd96824", size = 175577, upload-time = "2025-09-25T21:32:00.088Z" }, { url = "https://files.pythonhosted.org/packages/0c/62/d2eb46264d4b157dae1275b573017abec435397aa59cbcdab6fc978a8af4/pyyaml-6.0.3-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:10892704fc220243f5305762e276552a0395f7beb4dbf9b14ec8fd43b57f126c", size = 775556, upload-time = "2025-09-25T21:32:01.31Z" }, { url = "https://files.pythonhosted.org/packages/10/cb/16c3f2cf3266edd25aaa00d6c4350381c8b012ed6f5276675b9eba8d9ff4/pyyaml-6.0.3-cp311-cp311-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:850774a7879607d3a6f50d36d04f00ee69e7fc816450e5f7e58d7f17f1ae5c00", size = 882114, upload-time = "2025-09-25T21:32:03.376Z" }, { url = "https://files.pythonhosted.org/packages/71/60/917329f640924b18ff085ab889a11c763e0b573da888e8404ff486657602/pyyaml-6.0.3-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:b8bb0864c5a28024fac8a632c443c87c5aa6f215c0b126c449ae1a150412f31d", size = 806638, upload-time = "2025-09-25T21:32:04.553Z" }, { url = "https://files.pythonhosted.org/packages/dd/6f/529b0f316a9fd167281a6c3826b5583e6192dba792dd55e3203d3f8e655a/pyyaml-6.0.3-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:1d37d57ad971609cf3c53ba6a7e365e40660e3be0e5175fa9f2365a379d6095a", size = 767463, upload-time = "2025-09-25T21:32:06.152Z" }, { url = "https://files.pythonhosted.org/packages/f2/6a/b627b4e0c1dd03718543519ffb2f1deea4a1e6d42fbab8021936a4d22589/pyyaml-6.0.3-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:37503bfbfc9d2c40b344d06b2199cf0e96e97957ab1c1b546fd4f87e53e5d3e4", size = 794986, upload-time = "2025-09-25T21:32:07.367Z" }, { url = "https://files.pythonhosted.org/packages/45/91/47a6e1c42d9ee337c4839208f30d9f09caa9f720ec7582917b264defc875/pyyaml-6.0.3-cp311-cp311-win32.whl", hash = "sha256:8098f252adfa6c80ab48096053f512f2321f0b998f98150cea9bd23d83e1467b", size = 142543, upload-time = "2025-09-25T21:32:08.95Z" }, { url = "https://files.pythonhosted.org/packages/da/e3/ea007450a105ae919a72393cb06f122f288ef60bba2dc64b26e2646fa315/pyyaml-6.0.3-cp311-cp311-win_amd64.whl", hash = "sha256:9f3bfb4965eb874431221a3ff3fdcddc7e74e3b07799e0e84ca4a0f867d449bf", size = 158763, upload-time = "2025-09-25T21:32:09.96Z" }, { url = "https://files.pythonhosted.org/packages/d1/33/422b98d2195232ca1826284a76852ad5a86fe23e31b009c9886b2d0fb8b2/pyyaml-6.0.3-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:7f047e29dcae44602496db43be01ad42fc6f1cc0d8cd6c83d342306c32270196", size = 182063, upload-time = "2025-09-25T21:32:11.445Z" }, { url = "https://files.pythonhosted.org/packages/89/a0/6cf41a19a1f2f3feab0e9c0b74134aa2ce6849093d5517a0c550fe37a648/pyyaml-6.0.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:fc09d0aa354569bc501d4e787133afc08552722d3ab34836a80547331bb5d4a0", size = 173973, upload-time = "2025-09-25T21:32:12.492Z" }, { url = "https://files.pythonhosted.org/packages/ed/23/7a778b6bd0b9a8039df8b1b1d80e2e2ad78aa04171592c8a5c43a56a6af4/pyyaml-6.0.3-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:9149cad251584d5fb4981be1ecde53a1ca46c891a79788c0df828d2f166bda28", size = 775116, upload-time = "2025-09-25T21:32:13.652Z" }, { url = "https://files.pythonhosted.org/packages/65/30/d7353c338e12baef4ecc1b09e877c1970bd3382789c159b4f89d6a70dc09/pyyaml-6.0.3-cp312-cp312-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:5fdec68f91a0c6739b380c83b951e2c72ac0197ace422360e6d5a959d8d97b2c", size = 844011, upload-time = "2025-09-25T21:32:15.21Z" }, { url = "https://files.pythonhosted.org/packages/8b/9d/b3589d3877982d4f2329302ef98a8026e7f4443c765c46cfecc8858c6b4b/pyyaml-6.0.3-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:ba1cc08a7ccde2d2ec775841541641e4548226580ab850948cbfda66a1befcdc", size = 807870, upload-time = "2025-09-25T21:32:16.431Z" }, { url = "https://files.pythonhosted.org/packages/05/c0/b3be26a015601b822b97d9149ff8cb5ead58c66f981e04fedf4e762f4bd4/pyyaml-6.0.3-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:8dc52c23056b9ddd46818a57b78404882310fb473d63f17b07d5c40421e47f8e", size = 761089, upload-time = "2025-09-25T21:32:17.56Z" }, { url = "https://files.pythonhosted.org/packages/be/8e/98435a21d1d4b46590d5459a22d88128103f8da4c2d4cb8f14f2a96504e1/pyyaml-6.0.3-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:41715c910c881bc081f1e8872880d3c650acf13dfa8214bad49ed4cede7c34ea", size = 790181, upload-time = "2025-09-25T21:32:18.834Z" }, { url = "https://files.pythonhosted.org/packages/74/93/7baea19427dcfbe1e5a372d81473250b379f04b1bd3c4c5ff825e2327202/pyyaml-6.0.3-cp312-cp312-win32.whl", hash = "sha256:96b533f0e99f6579b3d4d4995707cf36df9100d67e0c8303a0c55b27b5f99bc5", size = 137658, upload-time = "2025-09-25T21:32:20.209Z" }, { url = "https://files.pythonhosted.org/packages/86/bf/899e81e4cce32febab4fb42bb97dcdf66bc135272882d1987881a4b519e9/pyyaml-6.0.3-cp312-cp312-win_amd64.whl", hash = "sha256:5fcd34e47f6e0b794d17de1b4ff496c00986e1c83f7ab2fb8fcfe9616ff7477b", size = 154003, upload-time = "2025-09-25T21:32:21.167Z" }, { url = "https://files.pythonhosted.org/packages/1a/08/67bd04656199bbb51dbed1439b7f27601dfb576fb864099c7ef0c3e55531/pyyaml-6.0.3-cp312-cp312-win_arm64.whl", hash = "sha256:64386e5e707d03a7e172c0701abfb7e10f0fb753ee1d773128192742712a98fd", size = 140344, upload-time = "2025-09-25T21:32:22.617Z" }, { url = "https://files.pythonhosted.org/packages/d1/11/0fd08f8192109f7169db964b5707a2f1e8b745d4e239b784a5a1dd80d1db/pyyaml-6.0.3-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:8da9669d359f02c0b91ccc01cac4a67f16afec0dac22c2ad09f46bee0697eba8", size = 181669, upload-time = "2025-09-25T21:32:23.673Z" }, { url = "https://files.pythonhosted.org/packages/b1/16/95309993f1d3748cd644e02e38b75d50cbc0d9561d21f390a76242ce073f/pyyaml-6.0.3-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:2283a07e2c21a2aa78d9c4442724ec1eb15f5e42a723b99cb3d822d48f5f7ad1", size = 173252, upload-time = "2025-09-25T21:32:25.149Z" }, { url = "https://files.pythonhosted.org/packages/50/31/b20f376d3f810b9b2371e72ef5adb33879b25edb7a6d072cb7ca0c486398/pyyaml-6.0.3-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:ee2922902c45ae8ccada2c5b501ab86c36525b883eff4255313a253a3160861c", size = 767081, upload-time = "2025-09-25T21:32:26.575Z" }, { url = "https://files.pythonhosted.org/packages/49/1e/a55ca81e949270d5d4432fbbd19dfea5321eda7c41a849d443dc92fd1ff7/pyyaml-6.0.3-cp313-cp313-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:a33284e20b78bd4a18c8c2282d549d10bc8408a2a7ff57653c0cf0b9be0afce5", size = 841159, upload-time = "2025-09-25T21:32:27.727Z" }, { url = "https://files.pythonhosted.org/packages/74/27/e5b8f34d02d9995b80abcef563ea1f8b56d20134d8f4e5e81733b1feceb2/pyyaml-6.0.3-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:0f29edc409a6392443abf94b9cf89ce99889a1dd5376d94316ae5145dfedd5d6", size = 801626, upload-time = "2025-09-25T21:32:28.878Z" }, { url = "https://files.pythonhosted.org/packages/f9/11/ba845c23988798f40e52ba45f34849aa8a1f2d4af4b798588010792ebad6/pyyaml-6.0.3-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:f7057c9a337546edc7973c0d3ba84ddcdf0daa14533c2065749c9075001090e6", size = 753613, upload-time = "2025-09-25T21:32:30.178Z" }, { url = "https://files.pythonhosted.org/packages/3d/e0/7966e1a7bfc0a45bf0a7fb6b98ea03fc9b8d84fa7f2229e9659680b69ee3/pyyaml-6.0.3-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:eda16858a3cab07b80edaf74336ece1f986ba330fdb8ee0d6c0d68fe82bc96be", size = 794115, upload-time = "2025-09-25T21:32:31.353Z" }, { url = "https://files.pythonhosted.org/packages/de/94/980b50a6531b3019e45ddeada0626d45fa85cbe22300844a7983285bed3b/pyyaml-6.0.3-cp313-cp313-win32.whl", hash = "sha256:d0eae10f8159e8fdad514efdc92d74fd8d682c933a6dd088030f3834bc8e6b26", size = 137427, upload-time = "2025-09-25T21:32:32.58Z" }, { url = "https://files.pythonhosted.org/packages/97/c9/39d5b874e8b28845e4ec2202b5da735d0199dbe5b8fb85f91398814a9a46/pyyaml-6.0.3-cp313-cp313-win_amd64.whl", hash = "sha256:79005a0d97d5ddabfeeea4cf676af11e647e41d81c9a7722a193022accdb6b7c", size = 154090, upload-time = "2025-09-25T21:32:33.659Z" }, { url = "https://files.pythonhosted.org/packages/73/e8/2bdf3ca2090f68bb3d75b44da7bbc71843b19c9f2b9cb9b0f4ab7a5a4329/pyyaml-6.0.3-cp313-cp313-win_arm64.whl", hash = "sha256:5498cd1645aa724a7c71c8f378eb29ebe23da2fc0d7a08071d89469bf1d2defb", size = 140246, upload-time = "2025-09-25T21:32:34.663Z" }, { url = "https://files.pythonhosted.org/packages/9d/8c/f4bd7f6465179953d3ac9bc44ac1a8a3e6122cf8ada906b4f96c60172d43/pyyaml-6.0.3-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:8d1fab6bb153a416f9aeb4b8763bc0f22a5586065f86f7664fc23339fc1c1fac", size = 181814, upload-time = "2025-09-25T21:32:35.712Z" }, { url = "https://files.pythonhosted.org/packages/bd/9c/4d95bb87eb2063d20db7b60faa3840c1b18025517ae857371c4dd55a6b3a/pyyaml-6.0.3-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:34d5fcd24b8445fadc33f9cf348c1047101756fd760b4dacb5c3e99755703310", size = 173809, upload-time = "2025-09-25T21:32:36.789Z" }, { url = "https://files.pythonhosted.org/packages/92/b5/47e807c2623074914e29dabd16cbbdd4bf5e9b2db9f8090fa64411fc5382/pyyaml-6.0.3-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:501a031947e3a9025ed4405a168e6ef5ae3126c59f90ce0cd6f2bfc477be31b7", size = 766454, upload-time = "2025-09-25T21:32:37.966Z" }, { url = "https://files.pythonhosted.org/packages/02/9e/e5e9b168be58564121efb3de6859c452fccde0ab093d8438905899a3a483/pyyaml-6.0.3-cp314-cp314-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:b3bc83488de33889877a0f2543ade9f70c67d66d9ebb4ac959502e12de895788", size = 836355, upload-time = "2025-09-25T21:32:39.178Z" }, { url = "https://files.pythonhosted.org/packages/88/f9/16491d7ed2a919954993e48aa941b200f38040928474c9e85ea9e64222c3/pyyaml-6.0.3-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:c458b6d084f9b935061bc36216e8a69a7e293a2f1e68bf956dcd9e6cbcd143f5", size = 794175, upload-time = "2025-09-25T21:32:40.865Z" }, { url = "https://files.pythonhosted.org/packages/dd/3f/5989debef34dc6397317802b527dbbafb2b4760878a53d4166579111411e/pyyaml-6.0.3-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:7c6610def4f163542a622a73fb39f534f8c101d690126992300bf3207eab9764", size = 755228, upload-time = "2025-09-25T21:32:42.084Z" }, { url = "https://files.pythonhosted.org/packages/d7/ce/af88a49043cd2e265be63d083fc75b27b6ed062f5f9fd6cdc223ad62f03e/pyyaml-6.0.3-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:5190d403f121660ce8d1d2c1bb2ef1bd05b5f68533fc5c2ea899bd15f4399b35", size = 789194, upload-time = "2025-09-25T21:32:43.362Z" }, { url = "https://files.pythonhosted.org/packages/23/20/bb6982b26a40bb43951265ba29d4c246ef0ff59c9fdcdf0ed04e0687de4d/pyyaml-6.0.3-cp314-cp314-win_amd64.whl", hash = "sha256:4a2e8cebe2ff6ab7d1050ecd59c25d4c8bd7e6f400f5f82b96557ac0abafd0ac", size = 156429, upload-time = "2025-09-25T21:32:57.844Z" }, { url = "https://files.pythonhosted.org/packages/f4/f4/a4541072bb9422c8a883ab55255f918fa378ecf083f5b85e87fc2b4eda1b/pyyaml-6.0.3-cp314-cp314-win_arm64.whl", hash = "sha256:93dda82c9c22deb0a405ea4dc5f2d0cda384168e466364dec6255b293923b2f3", size = 143912, upload-time = "2025-09-25T21:32:59.247Z" }, { url = "https://files.pythonhosted.org/packages/7c/f9/07dd09ae774e4616edf6cda684ee78f97777bdd15847253637a6f052a62f/pyyaml-6.0.3-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:02893d100e99e03eda1c8fd5c441d8c60103fd175728e23e431db1b589cf5ab3", size = 189108, upload-time = "2025-09-25T21:32:44.377Z" }, { url = "https://files.pythonhosted.org/packages/4e/78/8d08c9fb7ce09ad8c38ad533c1191cf27f7ae1effe5bb9400a46d9437fcf/pyyaml-6.0.3-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:c1ff362665ae507275af2853520967820d9124984e0f7466736aea23d8611fba", size = 183641, upload-time = "2025-09-25T21:32:45.407Z" }, { url = "https://files.pythonhosted.org/packages/7b/5b/3babb19104a46945cf816d047db2788bcaf8c94527a805610b0289a01c6b/pyyaml-6.0.3-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:6adc77889b628398debc7b65c073bcb99c4a0237b248cacaf3fe8a557563ef6c", size = 831901, upload-time = "2025-09-25T21:32:48.83Z" }, { url = "https://files.pythonhosted.org/packages/8b/cc/dff0684d8dc44da4d22a13f35f073d558c268780ce3c6ba1b87055bb0b87/pyyaml-6.0.3-cp314-cp314t-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:a80cb027f6b349846a3bf6d73b5e95e782175e52f22108cfa17876aaeff93702", size = 861132, upload-time = "2025-09-25T21:32:50.149Z" }, { url = "https://files.pythonhosted.org/packages/b1/5e/f77dc6b9036943e285ba76b49e118d9ea929885becb0a29ba8a7c75e29fe/pyyaml-6.0.3-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:00c4bdeba853cc34e7dd471f16b4114f4162dc03e6b7afcc2128711f0eca823c", size = 839261, upload-time = "2025-09-25T21:32:51.808Z" }, { url = "https://files.pythonhosted.org/packages/ce/88/a9db1376aa2a228197c58b37302f284b5617f56a5d959fd1763fb1675ce6/pyyaml-6.0.3-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:66e1674c3ef6f541c35191caae2d429b967b99e02040f5ba928632d9a7f0f065", size = 805272, upload-time = "2025-09-25T21:32:52.941Z" }, { url = "https://files.pythonhosted.org/packages/da/92/1446574745d74df0c92e6aa4a7b0b3130706a4142b2d1a5869f2eaa423c6/pyyaml-6.0.3-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:16249ee61e95f858e83976573de0f5b2893b3677ba71c9dd36b9cf8be9ac6d65", size = 829923, upload-time = "2025-09-25T21:32:54.537Z" }, { url = "https://files.pythonhosted.org/packages/f0/7a/1c7270340330e575b92f397352af856a8c06f230aa3e76f86b39d01b416a/pyyaml-6.0.3-cp314-cp314t-win_amd64.whl", hash = "sha256:4ad1906908f2f5ae4e5a8ddfce73c320c2a1429ec52eafd27138b7f1cbe341c9", size = 174062, upload-time = "2025-09-25T21:32:55.767Z" }, { url = "https://files.pythonhosted.org/packages/f1/12/de94a39c2ef588c7e6455cfbe7343d3b2dc9d6b6b2f40c4c6565744c873d/pyyaml-6.0.3-cp314-cp314t-win_arm64.whl", hash = "sha256:ebc55a14a21cb14062aa4162f906cd962b28e2e9ea38f9b4391244cd8de4ae0b", size = 149341, upload-time = "2025-09-25T21:32:56.828Z" }, { url = "https://files.pythonhosted.org/packages/9f/62/67fc8e68a75f738c9200422bf65693fb79a4cd0dc5b23310e5202e978090/pyyaml-6.0.3-cp39-cp39-macosx_10_13_x86_64.whl", hash = "sha256:b865addae83924361678b652338317d1bd7e79b1f4596f96b96c77a5a34b34da", size = 184450, upload-time = "2025-09-25T21:33:00.618Z" }, { url = "https://files.pythonhosted.org/packages/ae/92/861f152ce87c452b11b9d0977952259aa7df792d71c1053365cc7b09cc08/pyyaml-6.0.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:c3355370a2c156cffb25e876646f149d5d68f5e0a3ce86a5084dd0b64a994917", size = 174319, upload-time = "2025-09-25T21:33:02.086Z" }, { url = "https://files.pythonhosted.org/packages/d0/cd/f0cfc8c74f8a030017a2b9c771b7f47e5dd702c3e28e5b2071374bda2948/pyyaml-6.0.3-cp39-cp39-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:3c5677e12444c15717b902a5798264fa7909e41153cdf9ef7ad571b704a63dd9", size = 737631, upload-time = "2025-09-25T21:33:03.25Z" }, { url = "https://files.pythonhosted.org/packages/ef/b2/18f2bd28cd2055a79a46c9b0895c0b3d987ce40ee471cecf58a1a0199805/pyyaml-6.0.3-cp39-cp39-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:5ed875a24292240029e4483f9d4a4b8a1ae08843b9c54f43fcc11e404532a8a5", size = 836795, upload-time = "2025-09-25T21:33:05.014Z" }, { url = "https://files.pythonhosted.org/packages/73/b9/793686b2d54b531203c160ef12bec60228a0109c79bae6c1277961026770/pyyaml-6.0.3-cp39-cp39-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:0150219816b6a1fa26fb4699fb7daa9caf09eb1999f3b70fb6e786805e80375a", size = 750767, upload-time = "2025-09-25T21:33:06.398Z" }, { url = "https://files.pythonhosted.org/packages/a9/86/a137b39a611def2ed78b0e66ce2fe13ee701a07c07aebe55c340ed2a050e/pyyaml-6.0.3-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:fa160448684b4e94d80416c0fa4aac48967a969efe22931448d853ada8baf926", size = 727982, upload-time = "2025-09-25T21:33:08.708Z" }, { url = "https://files.pythonhosted.org/packages/dd/62/71c27c94f457cf4418ef8ccc71735324c549f7e3ea9d34aba50874563561/pyyaml-6.0.3-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:27c0abcb4a5dac13684a37f76e701e054692a9b2d3064b70f5e4eb54810553d7", size = 755677, upload-time = "2025-09-25T21:33:09.876Z" }, { url = "https://files.pythonhosted.org/packages/29/3d/6f5e0d58bd924fb0d06c3a6bad00effbdae2de5adb5cda5648006ffbd8d3/pyyaml-6.0.3-cp39-cp39-win32.whl", hash = "sha256:1ebe39cb5fc479422b83de611d14e2c0d3bb2a18bbcb01f229ab3cfbd8fee7a0", size = 142592, upload-time = "2025-09-25T21:33:10.983Z" }, { url = "https://files.pythonhosted.org/packages/f0/0c/25113e0b5e103d7f1490c0e947e303fe4a696c10b501dea7a9f49d4e876c/pyyaml-6.0.3-cp39-cp39-win_amd64.whl", hash = "sha256:2e71d11abed7344e42a8849600193d15b6def118602c4c176f748e4583246007", size = 158777, upload-time = "2025-09-25T21:33:15.55Z" }, ] [[package]] name = "pyasn1" version = "0.6.1" source = { registry = "https://pypi.org/simple" } sdist = { url = "https://files.pythonhosted.org/packages/ba/e9/01f1a64245b89f039897cb0130016d79f77d52669aae6ee7b159a6c4c018/pyasn1-0.6.1.tar.gz", hash = "sha256:6f580d2bdd84365380830acf45550f2511469f673cb4a5ae3857a3170128b034", size = 145322 } wheels = [ { url = "https://files.pythonhosted.org/packages/c8/f1/d6a797abb14f6283c0ddff96bbdd46937f64122b8c925cab503dd37f8214/pyasn1-0.6.1-py3-none-any.whl", hash = "sha256:0d632f46f2ba09143da3a8afe9e33fb6f92fa2320ab7e886e2d0f7672af84629", size = 83135 }, ] [[package]] name = "pyasn1-modules" version = "0.4.2" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "pyasn1" }, ] sdist = { url = "https://files.pythonhosted.org/packages/e9/e6/78ebbb10a8c8e4b61a59249394a4a594c1a7af95593dc933a349c8d00964/pyasn1_modules-0.4.2.tar.gz", hash = "sha256:677091de870a80aae844b1ca6134f54652fa2c8c5a52aa396440ac3106e941e6", size = 307892 } wheels = [ { url = "https://files.pythonhosted.org/packages/47/8d/d529b5d697919ba8c11ad626e835d4039be708a35b0d22de83a269a6682c/pyasn1_modules-0.4.2-py3-none-any.whl", hash = "sha256:29253a9207ce32b64c3ac6600edc75368f98473906e8fd1043bd6b5b1de2c14a", size = 181259 }, ] [[package]] name = "requests" version = "2.32.3" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "certifi" }, { name = "charset-normalizer" }, { name = "idna" }, { name = "urllib3" }, ] sdist = { url = "https://files.pythonhosted.org/packages/63/70/2bf7780ad2d390a8d301ad0b550f1581eadbd9a20f896afe06353c2a2913/requests-2.32.3.tar.gz", hash = "sha256:55365417734eb18255590a9ff9eb97e9e1da868d4ccd6402399eaf68af20a760", size = 131218, upload-time = "2024-05-29T15:37:49.536Z" } wheels = [ { url = "https://files.pythonhosted.org/packages/f9/9b/335f9764261e915ed497fcdeb11df5dfd6f7bf257d4a6a2a686d80da4d54/requests-2.32.3-py3-none-any.whl", hash = "sha256:70761cfe03c773ceb22aa2f671b4757976145175cdfca038c02654d061d6dcc6", size = 64928, upload-time = "2024-05-29T15:37:47.027Z" }, ] [[package]] name = "tomli" version = "2.3.0" source = { registry = "https://pypi.org/simple" } sdist = { url = "https://files.pythonhosted.org/packages/52/ed/3f73f72945444548f33eba9a87fc7a6e969915e7b1acc8260b30e1f76a2f/tomli-2.3.0.tar.gz", hash = "sha256:64be704a875d2a59753d80ee8a533c3fe183e3f06807ff7dc2232938ccb01549", size = 17392, upload-time = "2025-10-08T22:01:47.119Z" } wheels = [ { url = "https://files.pythonhosted.org/packages/b3/2e/299f62b401438d5fe1624119c723f5d877acc86a4c2492da405626665f12/tomli-2.3.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:88bd15eb972f3664f5ed4b57c1634a97153b4bac4479dcb6a495f41921eb7f45", size = 153236, upload-time = "2025-10-08T22:01:00.137Z" }, { url = "https://files.pythonhosted.org/packages/86/7f/d8fffe6a7aefdb61bced88fcb5e280cfd71e08939da5894161bd71bea022/tomli-2.3.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:883b1c0d6398a6a9d29b508c331fa56adbcdff647f6ace4dfca0f50e90dfd0ba", size = 148084, upload-time = "2025-10-08T22:01:01.63Z" }, { url = "https://files.pythonhosted.org/packages/47/5c/24935fb6a2ee63e86d80e4d3b58b222dafaf438c416752c8b58537c8b89a/tomli-2.3.0-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:d1381caf13ab9f300e30dd8feadb3de072aeb86f1d34a8569453ff32a7dea4bf", size = 234832, upload-time = "2025-10-08T22:01:02.543Z" }, { url = "https://files.pythonhosted.org/packages/89/da/75dfd804fc11e6612846758a23f13271b76d577e299592b4371a4ca4cd09/tomli-2.3.0-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:a0e285d2649b78c0d9027570d4da3425bdb49830a6156121360b3f8511ea3441", size = 242052, upload-time = "2025-10-08T22:01:03.836Z" }, { url = "https://files.pythonhosted.org/packages/70/8c/f48ac899f7b3ca7eb13af73bacbc93aec37f9c954df3c08ad96991c8c373/tomli-2.3.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:0a154a9ae14bfcf5d8917a59b51ffd5a3ac1fd149b71b47a3a104ca4edcfa845", size = 239555, upload-time = "2025-10-08T22:01:04.834Z" }, { url = "https://files.pythonhosted.org/packages/ba/28/72f8afd73f1d0e7829bfc093f4cb98ce0a40ffc0cc997009ee1ed94ba705/tomli-2.3.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:74bf8464ff93e413514fefd2be591c3b0b23231a77f901db1eb30d6f712fc42c", size = 245128, upload-time = "2025-10-08T22:01:05.84Z" }, { url = "https://files.pythonhosted.org/packages/b6/eb/a7679c8ac85208706d27436e8d421dfa39d4c914dcf5fa8083a9305f58d9/tomli-2.3.0-cp311-cp311-win32.whl", hash = "sha256:00b5f5d95bbfc7d12f91ad8c593a1659b6387b43f054104cda404be6bda62456", size = 96445, upload-time = "2025-10-08T22:01:06.896Z" }, { url = "https://files.pythonhosted.org/packages/0a/fe/3d3420c4cb1ad9cb462fb52967080575f15898da97e21cb6f1361d505383/tomli-2.3.0-cp311-cp311-win_amd64.whl", hash = "sha256:4dc4ce8483a5d429ab602f111a93a6ab1ed425eae3122032db7e9acf449451be", size = 107165, upload-time = "2025-10-08T22:01:08.107Z" }, { url = "https://files.pythonhosted.org/packages/ff/b7/40f36368fcabc518bb11c8f06379a0fd631985046c038aca08c6d6a43c6e/tomli-2.3.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:d7d86942e56ded512a594786a5ba0a5e521d02529b3826e7761a05138341a2ac", size = 154891, upload-time = "2025-10-08T22:01:09.082Z" }, { url = "https://files.pythonhosted.org/packages/f9/3f/d9dd692199e3b3aab2e4e4dd948abd0f790d9ded8cd10cbaae276a898434/tomli-2.3.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:73ee0b47d4dad1c5e996e3cd33b8a76a50167ae5f96a2607cbe8cc773506ab22", size = 148796, upload-time = "2025-10-08T22:01:10.266Z" }, { url = "https://files.pythonhosted.org/packages/60/83/59bff4996c2cf9f9387a0f5a3394629c7efa5ef16142076a23a90f1955fa/tomli-2.3.0-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:792262b94d5d0a466afb5bc63c7daa9d75520110971ee269152083270998316f", size = 242121, upload-time = "2025-10-08T22:01:11.332Z" }, { url = "https://files.pythonhosted.org/packages/45/e5/7c5119ff39de8693d6baab6c0b6dcb556d192c165596e9fc231ea1052041/tomli-2.3.0-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:4f195fe57ecceac95a66a75ac24d9d5fbc98ef0962e09b2eddec5d39375aae52", size = 250070, upload-time = "2025-10-08T22:01:12.498Z" }, { url = "https://files.pythonhosted.org/packages/45/12/ad5126d3a278f27e6701abde51d342aa78d06e27ce2bb596a01f7709a5a2/tomli-2.3.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:e31d432427dcbf4d86958c184b9bfd1e96b5b71f8eb17e6d02531f434fd335b8", size = 245859, upload-time = "2025-10-08T22:01:13.551Z" }, { url = "https://files.pythonhosted.org/packages/fb/a1/4d6865da6a71c603cfe6ad0e6556c73c76548557a8d658f9e3b142df245f/tomli-2.3.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:7b0882799624980785240ab732537fcfc372601015c00f7fc367c55308c186f6", size = 250296, upload-time = "2025-10-08T22:01:14.614Z" }, { url = "https://files.pythonhosted.org/packages/a0/b7/a7a7042715d55c9ba6e8b196d65d2cb662578b4d8cd17d882d45322b0d78/tomli-2.3.0-cp312-cp312-win32.whl", hash = "sha256:ff72b71b5d10d22ecb084d345fc26f42b5143c5533db5e2eaba7d2d335358876", size = 97124, upload-time = "2025-10-08T22:01:15.629Z" }, { url = "https://files.pythonhosted.org/packages/06/1e/f22f100db15a68b520664eb3328fb0ae4e90530887928558112c8d1f4515/tomli-2.3.0-cp312-cp312-win_amd64.whl", hash = "sha256:1cb4ed918939151a03f33d4242ccd0aa5f11b3547d0cf30f7c74a408a5b99878", size = 107698, upload-time = "2025-10-08T22:01:16.51Z" }, { url = "https://files.pythonhosted.org/packages/89/48/06ee6eabe4fdd9ecd48bf488f4ac783844fd777f547b8d1b61c11939974e/tomli-2.3.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:5192f562738228945d7b13d4930baffda67b69425a7f0da96d360b0a3888136b", size = 154819, upload-time = "2025-10-08T22:01:17.964Z" }, { url = "https://files.pythonhosted.org/packages/f1/01/88793757d54d8937015c75dcdfb673c65471945f6be98e6a0410fba167ed/tomli-2.3.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:be71c93a63d738597996be9528f4abe628d1adf5e6eb11607bc8fe1a510b5dae", size = 148766, upload-time = "2025-10-08T22:01:18.959Z" }, { url = "https://files.pythonhosted.org/packages/42/17/5e2c956f0144b812e7e107f94f1cc54af734eb17b5191c0bbfb72de5e93e/tomli-2.3.0-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:c4665508bcbac83a31ff8ab08f424b665200c0e1e645d2bd9ab3d3e557b6185b", size = 240771, upload-time = "2025-10-08T22:01:20.106Z" }, { url = "https://files.pythonhosted.org/packages/d5/f4/0fbd014909748706c01d16824eadb0307115f9562a15cbb012cd9b3512c5/tomli-2.3.0-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:4021923f97266babc6ccab9f5068642a0095faa0a51a246a6a02fccbb3514eaf", size = 248586, upload-time = "2025-10-08T22:01:21.164Z" }, { url = "https://files.pythonhosted.org/packages/30/77/fed85e114bde5e81ecf9bc5da0cc69f2914b38f4708c80ae67d0c10180c5/tomli-2.3.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:a4ea38c40145a357d513bffad0ed869f13c1773716cf71ccaa83b0fa0cc4e42f", size = 244792, upload-time = "2025-10-08T22:01:22.417Z" }, { url = "https://files.pythonhosted.org/packages/55/92/afed3d497f7c186dc71e6ee6d4fcb0acfa5f7d0a1a2878f8beae379ae0cc/tomli-2.3.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:ad805ea85eda330dbad64c7ea7a4556259665bdf9d2672f5dccc740eb9d3ca05", size = 248909, upload-time = "2025-10-08T22:01:23.859Z" }, { url = "https://files.pythonhosted.org/packages/f8/84/ef50c51b5a9472e7265ce1ffc7f24cd4023d289e109f669bdb1553f6a7c2/tomli-2.3.0-cp313-cp313-win32.whl", hash = "sha256:97d5eec30149fd3294270e889b4234023f2c69747e555a27bd708828353ab606", size = 96946, upload-time = "2025-10-08T22:01:24.893Z" }, { url = "https://files.pythonhosted.org/packages/b2/b7/718cd1da0884f281f95ccfa3a6cc572d30053cba64603f79d431d3c9b61b/tomli-2.3.0-cp313-cp313-win_amd64.whl", hash = "sha256:0c95ca56fbe89e065c6ead5b593ee64b84a26fca063b5d71a1122bf26e533999", size = 107705, upload-time = "2025-10-08T22:01:26.153Z" }, { url = "https://files.pythonhosted.org/packages/19/94/aeafa14a52e16163008060506fcb6aa1949d13548d13752171a755c65611/tomli-2.3.0-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:cebc6fe843e0733ee827a282aca4999b596241195f43b4cc371d64fc6639da9e", size = 154244, upload-time = "2025-10-08T22:01:27.06Z" }, { url = "https://files.pythonhosted.org/packages/db/e4/1e58409aa78eefa47ccd19779fc6f36787edbe7d4cd330eeeedb33a4515b/tomli-2.3.0-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:4c2ef0244c75aba9355561272009d934953817c49f47d768070c3c94355c2aa3", size = 148637, upload-time = "2025-10-08T22:01:28.059Z" }, { url = "https://files.pythonhosted.org/packages/26/b6/d1eccb62f665e44359226811064596dd6a366ea1f985839c566cd61525ae/tomli-2.3.0-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:c22a8bf253bacc0cf11f35ad9808b6cb75ada2631c2d97c971122583b129afbc", size = 241925, upload-time = "2025-10-08T22:01:29.066Z" }, { url = "https://files.pythonhosted.org/packages/70/91/7cdab9a03e6d3d2bb11beae108da5bdc1c34bdeb06e21163482544ddcc90/tomli-2.3.0-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:0eea8cc5c5e9f89c9b90c4896a8deefc74f518db5927d0e0e8d4a80953d774d0", size = 249045, upload-time = "2025-10-08T22:01:31.98Z" }, { url = "https://files.pythonhosted.org/packages/15/1b/8c26874ed1f6e4f1fcfeb868db8a794cbe9f227299402db58cfcc858766c/tomli-2.3.0-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:b74a0e59ec5d15127acdabd75ea17726ac4c5178ae51b85bfe39c4f8a278e879", size = 245835, upload-time = "2025-10-08T22:01:32.989Z" }, { url = "https://files.pythonhosted.org/packages/fd/42/8e3c6a9a4b1a1360c1a2a39f0b972cef2cc9ebd56025168c4137192a9321/tomli-2.3.0-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:b5870b50c9db823c595983571d1296a6ff3e1b88f734a4c8f6fc6188397de005", size = 253109, upload-time = "2025-10-08T22:01:34.052Z" }, { url = "https://files.pythonhosted.org/packages/22/0c/b4da635000a71b5f80130937eeac12e686eefb376b8dee113b4a582bba42/tomli-2.3.0-cp314-cp314-win32.whl", hash = "sha256:feb0dacc61170ed7ab602d3d972a58f14ee3ee60494292d384649a3dc38ef463", size = 97930, upload-time = "2025-10-08T22:01:35.082Z" }, { url = "https://files.pythonhosted.org/packages/b9/74/cb1abc870a418ae99cd5c9547d6bce30701a954e0e721821df483ef7223c/tomli-2.3.0-cp314-cp314-win_amd64.whl", hash = "sha256:b273fcbd7fc64dc3600c098e39136522650c49bca95df2d11cf3b626422392c8", size = 107964, upload-time = "2025-10-08T22:01:36.057Z" }, { url = "https://files.pythonhosted.org/packages/54/78/5c46fff6432a712af9f792944f4fcd7067d8823157949f4e40c56b8b3c83/tomli-2.3.0-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:940d56ee0410fa17ee1f12b817b37a4d4e4dc4d27340863cc67236c74f582e77", size = 163065, upload-time = "2025-10-08T22:01:37.27Z" }, { url = "https://files.pythonhosted.org/packages/39/67/f85d9bd23182f45eca8939cd2bc7050e1f90c41f4a2ecbbd5963a1d1c486/tomli-2.3.0-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:f85209946d1fe94416debbb88d00eb92ce9cd5266775424ff81bc959e001acaf", size = 159088, upload-time = "2025-10-08T22:01:38.235Z" }, { url = "https://files.pythonhosted.org/packages/26/5a/4b546a0405b9cc0659b399f12b6adb750757baf04250b148d3c5059fc4eb/tomli-2.3.0-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:a56212bdcce682e56b0aaf79e869ba5d15a6163f88d5451cbde388d48b13f530", size = 268193, upload-time = "2025-10-08T22:01:39.712Z" }, { url = "https://files.pythonhosted.org/packages/42/4f/2c12a72ae22cf7b59a7fe75b3465b7aba40ea9145d026ba41cb382075b0e/tomli-2.3.0-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:c5f3ffd1e098dfc032d4d3af5c0ac64f6d286d98bc148698356847b80fa4de1b", size = 275488, upload-time = "2025-10-08T22:01:40.773Z" }, { url = "https://files.pythonhosted.org/packages/92/04/a038d65dbe160c3aa5a624e93ad98111090f6804027d474ba9c37c8ae186/tomli-2.3.0-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:5e01decd096b1530d97d5d85cb4dff4af2d8347bd35686654a004f8dea20fc67", size = 272669, upload-time = "2025-10-08T22:01:41.824Z" }, { url = "https://files.pythonhosted.org/packages/be/2f/8b7c60a9d1612a7cbc39ffcca4f21a73bf368a80fc25bccf8253e2563267/tomli-2.3.0-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:8a35dd0e643bb2610f156cca8db95d213a90015c11fee76c946aa62b7ae7e02f", size = 279709, upload-time = "2025-10-08T22:01:43.177Z" }, { url = "https://files.pythonhosted.org/packages/7e/46/cc36c679f09f27ded940281c38607716c86cf8ba4a518d524e349c8b4874/tomli-2.3.0-cp314-cp314t-win32.whl", hash = "sha256:a1f7f282fe248311650081faafa5f4732bdbfef5d45fe3f2e702fbc6f2d496e0", size = 107563, upload-time = "2025-10-08T22:01:44.233Z" }, { url = "https://files.pythonhosted.org/packages/84/ff/426ca8683cf7b753614480484f6437f568fd2fda2edbdf57a2d3d8b27a0b/tomli-2.3.0-cp314-cp314t-win_amd64.whl", hash = "sha256:70a251f8d4ba2d9ac2542eecf008b3c8a9fc5c3f9f02c56a9d7952612be2fdba", size = 119756, upload-time = "2025-10-08T22:01:45.234Z" }, { url = "https://files.pythonhosted.org/packages/77/b8/0135fadc89e73be292b473cb820b4f5a08197779206b33191e801feeae40/tomli-2.3.0-py3-none-any.whl", hash = "sha256:e95b1af3c5b07d9e643909b5abbec77cd9f1217e6d0bca72b0234736b9fb1f1b", size = 14408, upload-time = "2025-10-08T22:01:46.04Z" }, ] [[package]] name = "tox" version = "4.27.0" source = { registry = "https://pypi.org/simple" } resolution-markers = [ "python_full_version == '3.10.*'", "python_full_version < '3.10'", ] dependencies = [ { name = "cachetools", marker = "python_full_version < '3.11'" }, { name = "chardet", marker = "python_full_version < '3.11'" }, { name = "colorama", marker = "python_full_version < '3.11'" }, { name = "filelock", version = "3.19.1", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.10'" }, { name = "filelock", version = "3.20.0", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version == '3.10.*'" }, { name = "packaging", marker = "python_full_version < '3.11'" }, { name = "platformdirs", version = "4.4.0", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.10'" }, { name = "platformdirs", version = "4.5.0", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version == '3.10.*'" }, { name = "pluggy", marker = "python_full_version < '3.11'" }, { name = "pyproject-api", version = "1.9.1", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.10'" }, { name = "pyproject-api", version = "1.10.0", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version == '3.10.*'" }, { name = "tomli", marker = "python_full_version < '3.11'" }, { name = "typing-extensions", marker = "python_full_version < '3.11'" }, { name = "virtualenv", version = "20.33.1", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.11'" }, ] sdist = { url = "https://files.pythonhosted.org/packages/a5/b7/19c01717747076f63c54d871ada081cd711a7c9a7572f2225675c3858b94/tox-4.27.0.tar.gz", hash = "sha256:b97d5ecc0c0d5755bcc5348387fef793e1bfa68eb33746412f4c60881d7f5f57", size = 198351, upload-time = "2025-06-17T15:17:50.585Z" } wheels = [ { url = "https://files.pythonhosted.org/packages/c1/3a/30889167f41ecaffb957ec4409e1cbc1d5d558a5bbbdfb734a5b9911930f/tox-4.27.0-py3-none-any.whl", hash = "sha256:2b8a7fb986b82aa2c830c0615082a490d134e0626dbc9189986da46a313c4f20", size = 173441, upload-time = "2025-06-17T15:17:48.689Z" }, ] [[package]] name = "tox" version = "4.32.0" source = { registry = "https://pypi.org/simple" } resolution-markers = [ "python_full_version >= '3.13'", "python_full_version == '3.12.*'", "python_full_version == '3.11.*'", ] dependencies = [ { name = "cachetools", marker = "python_full_version >= '3.11'" }, { name = "chardet", marker = "python_full_version >= '3.11'" }, { name = "colorama", marker = "python_full_version >= '3.11'" }, { name = "filelock", version = "3.20.0", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.11'" }, { name = "packaging", marker = "python_full_version >= '3.11'" }, { name = "platformdirs", version = "4.5.0", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.11'" }, { name = "pluggy", marker = "python_full_version >= '3.11'" }, { name = "pyproject-api", version = "1.10.0", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.11'" }, { name = "virtualenv", version = "20.35.4", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.11'" }, ] sdist = { url = "https://files.pythonhosted.org/packages/59/bf/0e4dbd42724cbae25959f0e34c95d0c730df03ab03f54d52accd9abfc614/tox-4.32.0.tar.gz", hash = "sha256:1ad476b5f4d3679455b89a992849ffc3367560bbc7e9495ee8a3963542e7c8ff", size = 203330, upload-time = "2025-10-24T18:03:38.132Z" } wheels = [ { url = "https://files.pythonhosted.org/packages/fc/cc/e09c0d663a004945f82beecd4f147053567910479314e8d01ba71e5d5dea/tox-4.32.0-py3-none-any.whl", hash = "sha256:451e81dc02ba8d1ed20efd52ee409641ae4b5d5830e008af10fe8823ef1bd551", size = 175905, upload-time = "2025-10-24T18:03:36.337Z" }, ] [[package]] name = "tox-uv" version = "1.28.0" source = { registry = "https://pypi.org/simple" } resolution-markers = [ "python_full_version == '3.10.*'", "python_full_version < '3.10'", ] dependencies = [ { name = "packaging", marker = "python_full_version < '3.11'" }, { name = "tomli", marker = "python_full_version < '3.11'" }, { name = "tox", version = "4.27.0", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.11'" }, { name = "typing-extensions", marker = "python_full_version < '3.10'" }, { name = "uv", marker = "python_full_version < '3.11'" }, ] sdist = { url = "https://files.pythonhosted.org/packages/f9/9a/f4b675ebcbd623854129891e87045f80c1d8e91b2957496f1fe6e463f291/tox_uv-1.28.0.tar.gz", hash = "sha256:a06ff909f73232b2b7965de19090d887b12b44e44eb0843b2c07266d2957ade2", size = 23265, upload-time = "2025-08-14T17:53:07.909Z" } wheels = [ { url = "https://files.pythonhosted.org/packages/1f/ac/b32555d190c4440b8d2779d4a19439e5fbd5a3950f7e5a17ead7c7d30cad/tox_uv-1.28.0-py3-none-any.whl", hash = "sha256:3fbe13fa6eb6961df5512e63fc4a5cc0c8d264872674ee09164649f441839053", size = 17225, upload-time = "2025-08-14T17:53:06.299Z" }, ] [[package]] name = "tox-uv" version = "1.29.0" source = { registry = "https://pypi.org/simple" } resolution-markers = [ "python_full_version >= '3.13'", "python_full_version == '3.12.*'", "python_full_version == '3.11.*'", ] dependencies = [ { name = "packaging", marker = "python_full_version >= '3.11'" }, { name = "tox", version = "4.32.0", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.11'" }, { name = "uv", marker = "python_full_version >= '3.11'" }, ] sdist = { url = "https://files.pythonhosted.org/packages/4f/90/06752775b8cfadba8856190f5beae9f552547e0f287e0246677972107375/tox_uv-1.29.0.tar.gz", hash = "sha256:30fa9e6ad507df49d3c6a2f88894256bcf90f18e240a00764da6ecab1db24895", size = 23427, upload-time = "2025-10-09T20:40:27.384Z" } wheels = [ { url = "https://files.pythonhosted.org/packages/5c/17/221d62937c4130b044bb437caac4181e7e13d5536bbede65264db1f0ac9f/tox_uv-1.29.0-py3-none-any.whl", hash = "sha256:b1d251286edeeb4bc4af1e24c8acfdd9404700143c2199ccdbb4ea195f7de6cc", size = 17254, upload-time = "2025-10-09T20:40:25.885Z" }, ] [[package]] name = "rsa" version = "4.9.1" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "pyasn1" }, ] sdist = { url = "https://files.pythonhosted.org/packages/da/8a/22b7beea3ee0d44b1916c0c1cb0ee3af23b700b6da9f04991899d0c555d4/rsa-4.9.1.tar.gz", hash = "sha256:e7bdbfdb5497da4c07dfd35530e1a902659db6ff241e39d9953cad06ebd0ae75", size = 29034 } wheels = [ { url = "https://files.pythonhosted.org/packages/64/8d/0133e4eb4beed9e425d9a98ed6e081a55d195481b7632472be1af08d2f6b/rsa-4.9.1-py3-none-any.whl", hash = "sha256:68635866661c6836b8d39430f97a996acbd61bfa49406748ea243539fe239762", size = 34696 }, ] [[package]] name = "typing-extensions" version = "4.12.2" source = { registry = "https://pypi.org/simple" } sdist = { url = "https://files.pythonhosted.org/packages/df/db/f35a00659bc03fec321ba8bce9420de607a1d37f8342eee1863174c69557/typing_extensions-4.12.2.tar.gz", hash = "sha256:1a7ead55c7e559dd4dee8856e3a88b41225abfe1ce8df57b7c13915fe121ffb8", size = 85321, upload-time = "2024-06-07T18:52:15.995Z" } wheels = [ { url = "https://files.pythonhosted.org/packages/26/9f/ad63fc0248c5379346306f8668cda6e2e2e9c95e01216d2b8ffd9ff037d0/typing_extensions-4.12.2-py3-none-any.whl", hash = "sha256:04e5ca0351e0f3f85c6853954072df659d0d13fac324d0072316b67d7794700d", size = 37438, upload-time = "2024-06-07T18:52:13.582Z" }, ] [[package]] name = "urllib3" version = "2.3.0" source = { registry = "https://pypi.org/simple" } sdist = { url = "https://files.pythonhosted.org/packages/aa/63/e53da845320b757bf29ef6a9062f5c669fe997973f966045cb019c3f4b66/urllib3-2.3.0.tar.gz", hash = "sha256:f8c5449b3cf0861679ce7e0503c7b44b5ec981bec0d1d3795a07f1ba96f0204d", size = 307268, upload-time = "2024-12-22T07:47:30.032Z" } wheels = [ { url = "https://files.pythonhosted.org/packages/c8/19/4ec628951a74043532ca2cf5d97b7b14863931476d117c471e8e2b1eb39f/urllib3-2.3.0-py3-none-any.whl", hash = "sha256:1cee9ad369867bfdbbb48b7dd50374c0967a0bb7710050facf0dd6911440e3df", size = 128369, upload-time = "2024-12-22T07:47:28.074Z" }, ] [[package]] name = "uv" version = "0.9.7" source = { registry = "https://pypi.org/simple" } sdist = { url = "https://files.pythonhosted.org/packages/cc/f6/9914f57d152cfcb85f3a26f8fbac3c88e4eb9cbe88639076241e16819334/uv-0.9.7.tar.gz", hash = "sha256:555ee72146b8782c73d755e4a21c9885c6bfc81db0ffca2220d52dddae007eb7", size = 3705596, upload-time = "2025-10-30T22:17:18.652Z" } wheels = [ { url = "https://files.pythonhosted.org/packages/58/38/cee64a9dcefd46f83a922c4e31d9cd9d91ce0d27a594192f7df677151eb4/uv-0.9.7-py3-none-linux_armv6l.whl", hash = "sha256:134e0daac56f9e399ccdfc9e4635bc0a13c234cad9224994c67bae462e07399a", size = 20614967, upload-time = "2025-10-30T22:16:31.274Z" }, { url = "https://files.pythonhosted.org/packages/6f/b7/1b1ff8dfde05e9d27abf29ebf22da48428fe1e16f0b4d65a839bd2211303/uv-0.9.7-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:1aaf79b4234400e9e2fbf5b50b091726ccbb0b6d4d032edd3dfd4c9673d89dca", size = 19692886, upload-time = "2025-10-30T22:16:35.893Z" }, { url = "https://files.pythonhosted.org/packages/f5/7d/b618174d8a8216af350398ace03805b2b2df6267b1745abf45556c2fda58/uv-0.9.7-py3-none-macosx_11_0_arm64.whl", hash = "sha256:0fdbfad5b367e7a3968264af6da5bbfffd4944a90319042f166e8df1a2d9de09", size = 18345022, upload-time = "2025-10-30T22:16:38.45Z" }, { url = "https://files.pythonhosted.org/packages/13/4c/03fafb7d28289d54ac7a34507f1e97e527971f8b0ee2c5e957045966a1a6/uv-0.9.7-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.musllinux_1_1_aarch64.whl", hash = "sha256:635e82c2d0d8b001618af82e4f2724350f15814f6462a71b3ebd44adec21f03c", size = 20170427, upload-time = "2025-10-30T22:16:41.099Z" }, { url = "https://files.pythonhosted.org/packages/35/0e/f1316da150453755bb88cf4232e8934de71a0091eb274a8b69d948535453/uv-0.9.7-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:56a440ccde7624a7bc070e1c2492b358c67aea9b8f17bc243ea27c5871c8d02c", size = 20234277, upload-time = "2025-10-30T22:16:43.521Z" }, { url = "https://files.pythonhosted.org/packages/37/b8/cb62cd78151b235c5da9290f0e3fb032b36706f2922208a691678aa0f2df/uv-0.9.7-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b5f1fb8203a77853db176000e8f30d5815ab175dc46199db059f97a72fc51110", size = 21180078, upload-time = "2025-10-30T22:16:45.857Z" }, { url = "https://files.pythonhosted.org/packages/be/e5/6107249d23f06fa1739496e89699e76169037b4643144b28b324efc3075d/uv-0.9.7-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:bb8bfcc2897f7653522abc2cae80233af756ad857bfbbbbe176f79460cbba417", size = 22743896, upload-time = "2025-10-30T22:16:48.487Z" }, { url = "https://files.pythonhosted.org/packages/df/94/69d8e0bb29c140305e7677bc8c98c765468a55cb10966e77bb8c69bf815d/uv-0.9.7-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:89697fa0d7384ba047daf75df844ee7800235105e41d08e0c876861a2b4aa90e", size = 22361126, upload-time = "2025-10-30T22:16:51.366Z" }, { url = "https://files.pythonhosted.org/packages/c0/0d/d186456cd0d7972ed026e5977b8a12e1f94c923fc3d6e86c7826c6f0d1fe/uv-0.9.7-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c9810ee8173dce129c49b338d5e97f3d7c7e9435f73e0b9b26c2f37743d3bb9e", size = 21477489, upload-time = "2025-10-30T22:16:53.757Z" }, { url = "https://files.pythonhosted.org/packages/c7/59/61d8e9f1734069049abe9e593961de602397c7194712346906c075fec65f/uv-0.9.7-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8cf6bc2482d1293cc630f66b862b494c09acda9b7faff7307ef52667a2b3ad49", size = 21382006, upload-time = "2025-10-30T22:16:56.117Z" }, { url = "https://files.pythonhosted.org/packages/74/ac/090dbde63abb56001190392d29ca2aa654eebc146a693b5dda68da0df2fb/uv-0.9.7-py3-none-manylinux_2_28_aarch64.whl", hash = "sha256:7019f4416925f4091b9d28c1cf3e8444cf910c4ede76bdf1f6b9a56ca5f97985", size = 20255103, upload-time = "2025-10-30T22:16:58.434Z" }, { url = "https://files.pythonhosted.org/packages/56/e7/ca2d99a4ce86366731547a84b5a2c946528b8d6d28c74ac659c925955a0c/uv-0.9.7-py3-none-manylinux_2_31_riscv64.whl", hash = "sha256:edd768f6730bba06aa10fdbd80ee064569f7236806f636bf65b68136a430aad0", size = 21311768, upload-time = "2025-10-30T22:17:01.259Z" }, { url = "https://files.pythonhosted.org/packages/d8/1a/c5d9e57f52aa30bfee703e6b9e5b5072102cfc706f3444377bb0de79eac7/uv-0.9.7-py3-none-musllinux_1_1_armv7l.whl", hash = "sha256:d6e5fe28ca05a4b576c0e8da5f69251dc187a67054829cfc4afb2bfa1767114b", size = 20239129, upload-time = "2025-10-30T22:17:03.815Z" }, { url = "https://files.pythonhosted.org/packages/aa/ab/16110ca6b1c4aaad79b4f2c6bc102c416a906e5d29947d0dc774f6ef4365/uv-0.9.7-py3-none-musllinux_1_1_i686.whl", hash = "sha256:34fe0af83fcafb9e2b786f4bd633a06c878d548a7c479594ffb5607db8778471", size = 20647326, upload-time = "2025-10-30T22:17:06.33Z" }, { url = "https://files.pythonhosted.org/packages/89/a9/2a8129c796831279cc0c53ffdd19dd6133d514805e52b1ef8a2aa0ff8912/uv-0.9.7-py3-none-musllinux_1_1_x86_64.whl", hash = "sha256:777bb1de174319245a35e4f805d3b4484d006ebedae71d3546f95e7c28a5f436", size = 21604958, upload-time = "2025-10-30T22:17:09.046Z" }, { url = "https://files.pythonhosted.org/packages/73/97/616650cb4dd5fbaabf8237469e1bc84710ae878095d359999982e1bc8ecf/uv-0.9.7-py3-none-win32.whl", hash = "sha256:bcf878528bd079fe8ae15928b5dfa232fac8b0e1854a2102da6ae1a833c31276", size = 19418913, upload-time = "2025-10-30T22:17:11.384Z" }, { url = "https://files.pythonhosted.org/packages/de/7f/e3cdaffac70852f5ff933b04c7b8a06c0f91f41e563f04b689caa65b71bd/uv-0.9.7-py3-none-win_amd64.whl", hash = "sha256:62b315f62669899076a1953fba6baf50bd2b57f66f656280491331dcedd7e6c6", size = 21443513, upload-time = "2025-10-30T22:17:13.785Z" }, { url = "https://files.pythonhosted.org/packages/89/79/8278452acae2fe96829485d32e1a2363829c9e42674704562ffcfc06b140/uv-0.9.7-py3-none-win_arm64.whl", hash = "sha256:d13da6521d4e841b1e0a9fda82e793dcf8458a323a9e8955f50903479d0bfa97", size = 19946729, upload-time = "2025-10-30T22:17:16.669Z" }, ] [[package]] name = "virtualenv" version = "20.33.1" source = { registry = "https://pypi.org/simple" } resolution-markers = [ "python_full_version == '3.10.*'", "python_full_version < '3.10'", ] dependencies = [ { name = "distlib", marker = "python_full_version < '3.11'" }, { name = "filelock", version = "3.19.1", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.10'" }, { name = "filelock", version = "3.20.0", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version == '3.10.*'" }, { name = "platformdirs", version = "4.4.0", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.10'" }, { name = "platformdirs", version = "4.5.0", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version == '3.10.*'" }, ] sdist = { url = "https://files.pythonhosted.org/packages/8b/60/4f20960df6c7b363a18a55ab034c8f2bcd5d9770d1f94f9370ec104c1855/virtualenv-20.33.1.tar.gz", hash = "sha256:1b44478d9e261b3fb8baa5e74a0ca3bc0e05f21aa36167bf9cbf850e542765b8", size = 6082160, upload-time = "2025-08-05T16:10:55.605Z" } wheels = [ { url = "https://files.pythonhosted.org/packages/ca/ff/ded57ac5ff40a09e6e198550bab075d780941e0b0f83cbeabd087c59383a/virtualenv-20.33.1-py3-none-any.whl", hash = "sha256:07c19bc66c11acab6a5958b815cbcee30891cd1c2ccf53785a28651a0d8d8a67", size = 6060362, upload-time = "2025-08-05T16:10:52.81Z" }, ] [[package]] name = "virtualenv" version = "20.35.4" source = { registry = "https://pypi.org/simple" } resolution-markers = [ "python_full_version >= '3.13'", "python_full_version == '3.12.*'", "python_full_version == '3.11.*'", ] dependencies = [ { name = "distlib", marker = "python_full_version >= '3.11'" }, { name = "filelock", version = "3.20.0", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.11'" }, { name = "platformdirs", version = "4.5.0", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.11'" }, ] sdist = { url = "https://files.pythonhosted.org/packages/20/28/e6f1a6f655d620846bd9df527390ecc26b3805a0c5989048c210e22c5ca9/virtualenv-20.35.4.tar.gz", hash = "sha256:643d3914d73d3eeb0c552cbb12d7e82adf0e504dbf86a3182f8771a153a1971c", size = 6028799, upload-time = "2025-10-29T06:57:40.511Z" } wheels = [ { url = "https://files.pythonhosted.org/packages/79/0c/c05523fa3181fdf0c9c52a6ba91a23fbf3246cc095f26f6516f9c60e6771/virtualenv-20.35.4-py3-none-any.whl", hash = "sha256:c21c9cede36c9753eeade68ba7d523529f228a403463376cf821eaae2b650f1b", size = 6005095, upload-time = "2025-10-29T06:57:37.598Z" }, ] [[package]] name = "zipp" version = "3.21.0" source = { registry = "https://pypi.org/simple" } sdist = { url = "https://files.pythonhosted.org/packages/3f/50/bad581df71744867e9468ebd0bcd6505de3b275e06f202c2cb016e3ff56f/zipp-3.21.0.tar.gz", hash = "sha256:2c9958f6430a2040341a52eb608ed6dd93ef4392e02ffe219417c1b28b5dd1f4", size = 24545, upload-time = "2024-11-10T15:05:20.202Z" } wheels = [ { url = "https://files.pythonhosted.org/packages/b7/1a/7e4798e9339adc931158c9d69ecc34f5e6791489d469f5e50ec15e35f458/zipp-3.21.0-py3-none-any.whl", hash = "sha256:ac1bbe05fd2991f160ebce24ffbac5f6d11d83dc90891255885223d42b3cd931", size = 9630, upload-time = "2024-11-10T15:05:19.275Z" }, ]