From 84b2f28b17cfd4531dd86d4039c122a941130e34 Mon Sep 17 00:00:00 2001 From: Woody Date: Thu, 24 Mar 2022 17:46:17 +0800 Subject: [PATCH] add ClsToES demo --- Python3.6-ClsToElasticSearch/config.json | 32 + Python3.6-ClsToElasticSearch/serverless.yml | 30 + .../src/elasticsearch/__init__.py | 62 + .../__pycache__/__init__.cpython-37.pyc | Bin 0 -> 1444 bytes .../__pycache__/compat.cpython-37.pyc | Bin 0 -> 654 bytes .../__pycache__/exceptions.cpython-37.pyc | Bin 0 -> 5014 bytes .../__pycache__/transport.cpython-37.pyc | Bin 0 -> 11688 bytes .../src/elasticsearch/client/__init__.py | 2034 +++++++++++++++++ .../__pycache__/__init__.cpython-37.pyc | Bin 0 -> 77875 bytes .../src/elasticsearch/client/async_search.py | 187 ++ .../src/elasticsearch/client/autoscaling.py | 14 + .../src/elasticsearch/client/cat.py | 720 ++++++ .../src/elasticsearch/client/ccr.py | 255 +++ .../src/elasticsearch/client/cluster.py | 300 +++ .../src/elasticsearch/client/data_frame.py | 131 ++ .../src/elasticsearch/client/deprecation.py | 17 + .../src/elasticsearch/client/enrich.py | 85 + .../src/elasticsearch/client/eql.py | 25 + .../src/elasticsearch/client/graph.py | 29 + .../src/elasticsearch/client/ilm.py | 158 ++ .../src/elasticsearch/client/indices.py | 1316 +++++++++++ .../src/elasticsearch/client/ingest.py | 95 + .../src/elasticsearch/client/license.py | 94 + .../src/elasticsearch/client/migration.py | 20 + .../src/elasticsearch/client/ml.py | 1478 ++++++++++++ .../src/elasticsearch/client/monitoring.py | 30 + .../src/elasticsearch/client/nodes.py | 156 ++ .../src/elasticsearch/client/remote.py | 12 + .../src/elasticsearch/client/rollup.py | 151 ++ .../src/elasticsearch/client/security.py | 493 ++++ .../src/elasticsearch/client/slm.py | 131 ++ .../src/elasticsearch/client/snapshot.py | 229 ++ .../src/elasticsearch/client/sql.py | 52 + .../src/elasticsearch/client/ssl.py | 14 + .../src/elasticsearch/client/tasks.py | 83 + .../src/elasticsearch/client/transform.py | 204 ++ .../src/elasticsearch/client/utils.py | 125 + .../src/elasticsearch/client/watcher.py | 176 ++ .../src/elasticsearch/client/xpack.py | 32 + .../src/elasticsearch/compat.py | 27 + .../src/elasticsearch/connection/__init__.py | 10 + .../__pycache__/__init__.cpython-37.pyc | Bin 0 -> 403 bytes .../__pycache__/base.cpython-37.pyc | Bin 0 -> 7520 bytes .../__pycache__/http_requests.cpython-37.pyc | Bin 0 -> 4880 bytes .../__pycache__/http_urllib3.cpython-37.pyc | Bin 0 -> 6554 bytes .../src/elasticsearch/connection/base.py | 297 +++ .../elasticsearch/connection/http_requests.py | 201 ++ .../elasticsearch/connection/http_urllib3.py | 264 +++ .../src/elasticsearch/connection/pooling.py | 33 + .../src/elasticsearch/connection_pool.py | 282 +++ .../src/elasticsearch/exceptions.py | 152 ++ .../src/elasticsearch/helpers/__init__.py | 17 + .../src/elasticsearch/helpers/actions.py | 543 +++++ .../src/elasticsearch/helpers/errors.py | 14 + .../src/elasticsearch/helpers/test.py | 70 + .../src/elasticsearch/serializer.py | 139 ++ .../src/elasticsearch/transport.py | 403 ++++ Python3.6-ClsToElasticSearch/src/index.py | 54 + 58 files changed, 11476 insertions(+) create mode 100644 Python3.6-ClsToElasticSearch/config.json create mode 100755 Python3.6-ClsToElasticSearch/serverless.yml create mode 100755 Python3.6-ClsToElasticSearch/src/elasticsearch/__init__.py create mode 100644 Python3.6-ClsToElasticSearch/src/elasticsearch/__pycache__/__init__.cpython-37.pyc create mode 100644 Python3.6-ClsToElasticSearch/src/elasticsearch/__pycache__/compat.cpython-37.pyc create mode 100644 Python3.6-ClsToElasticSearch/src/elasticsearch/__pycache__/exceptions.cpython-37.pyc create mode 100644 Python3.6-ClsToElasticSearch/src/elasticsearch/__pycache__/transport.cpython-37.pyc create mode 100755 Python3.6-ClsToElasticSearch/src/elasticsearch/client/__init__.py create mode 100644 Python3.6-ClsToElasticSearch/src/elasticsearch/client/__pycache__/__init__.cpython-37.pyc create mode 100755 Python3.6-ClsToElasticSearch/src/elasticsearch/client/async_search.py create mode 100755 Python3.6-ClsToElasticSearch/src/elasticsearch/client/autoscaling.py create mode 100755 Python3.6-ClsToElasticSearch/src/elasticsearch/client/cat.py create mode 100755 Python3.6-ClsToElasticSearch/src/elasticsearch/client/ccr.py create mode 100755 Python3.6-ClsToElasticSearch/src/elasticsearch/client/cluster.py create mode 100755 Python3.6-ClsToElasticSearch/src/elasticsearch/client/data_frame.py create mode 100755 Python3.6-ClsToElasticSearch/src/elasticsearch/client/deprecation.py create mode 100755 Python3.6-ClsToElasticSearch/src/elasticsearch/client/enrich.py create mode 100755 Python3.6-ClsToElasticSearch/src/elasticsearch/client/eql.py create mode 100755 Python3.6-ClsToElasticSearch/src/elasticsearch/client/graph.py create mode 100755 Python3.6-ClsToElasticSearch/src/elasticsearch/client/ilm.py create mode 100755 Python3.6-ClsToElasticSearch/src/elasticsearch/client/indices.py create mode 100755 Python3.6-ClsToElasticSearch/src/elasticsearch/client/ingest.py create mode 100755 Python3.6-ClsToElasticSearch/src/elasticsearch/client/license.py create mode 100755 Python3.6-ClsToElasticSearch/src/elasticsearch/client/migration.py create mode 100755 Python3.6-ClsToElasticSearch/src/elasticsearch/client/ml.py create mode 100755 Python3.6-ClsToElasticSearch/src/elasticsearch/client/monitoring.py create mode 100755 Python3.6-ClsToElasticSearch/src/elasticsearch/client/nodes.py create mode 100755 Python3.6-ClsToElasticSearch/src/elasticsearch/client/remote.py create mode 100755 Python3.6-ClsToElasticSearch/src/elasticsearch/client/rollup.py create mode 100755 Python3.6-ClsToElasticSearch/src/elasticsearch/client/security.py create mode 100755 Python3.6-ClsToElasticSearch/src/elasticsearch/client/slm.py create mode 100755 Python3.6-ClsToElasticSearch/src/elasticsearch/client/snapshot.py create mode 100755 Python3.6-ClsToElasticSearch/src/elasticsearch/client/sql.py create mode 100755 Python3.6-ClsToElasticSearch/src/elasticsearch/client/ssl.py create mode 100755 Python3.6-ClsToElasticSearch/src/elasticsearch/client/tasks.py create mode 100755 Python3.6-ClsToElasticSearch/src/elasticsearch/client/transform.py create mode 100755 Python3.6-ClsToElasticSearch/src/elasticsearch/client/utils.py create mode 100755 Python3.6-ClsToElasticSearch/src/elasticsearch/client/watcher.py create mode 100755 Python3.6-ClsToElasticSearch/src/elasticsearch/client/xpack.py create mode 100755 Python3.6-ClsToElasticSearch/src/elasticsearch/compat.py create mode 100755 Python3.6-ClsToElasticSearch/src/elasticsearch/connection/__init__.py create mode 100644 Python3.6-ClsToElasticSearch/src/elasticsearch/connection/__pycache__/__init__.cpython-37.pyc create mode 100644 Python3.6-ClsToElasticSearch/src/elasticsearch/connection/__pycache__/base.cpython-37.pyc create mode 100644 Python3.6-ClsToElasticSearch/src/elasticsearch/connection/__pycache__/http_requests.cpython-37.pyc create mode 100644 Python3.6-ClsToElasticSearch/src/elasticsearch/connection/__pycache__/http_urllib3.cpython-37.pyc create mode 100755 Python3.6-ClsToElasticSearch/src/elasticsearch/connection/base.py create mode 100755 Python3.6-ClsToElasticSearch/src/elasticsearch/connection/http_requests.py create mode 100755 Python3.6-ClsToElasticSearch/src/elasticsearch/connection/http_urllib3.py create mode 100755 Python3.6-ClsToElasticSearch/src/elasticsearch/connection/pooling.py create mode 100755 Python3.6-ClsToElasticSearch/src/elasticsearch/connection_pool.py create mode 100755 Python3.6-ClsToElasticSearch/src/elasticsearch/exceptions.py create mode 100755 Python3.6-ClsToElasticSearch/src/elasticsearch/helpers/__init__.py create mode 100755 Python3.6-ClsToElasticSearch/src/elasticsearch/helpers/actions.py create mode 100755 Python3.6-ClsToElasticSearch/src/elasticsearch/helpers/errors.py create mode 100755 Python3.6-ClsToElasticSearch/src/elasticsearch/helpers/test.py create mode 100755 Python3.6-ClsToElasticSearch/src/elasticsearch/serializer.py create mode 100755 Python3.6-ClsToElasticSearch/src/elasticsearch/transport.py create mode 100644 Python3.6-ClsToElasticSearch/src/index.py diff --git a/Python3.6-ClsToElasticSearch/config.json b/Python3.6-ClsToElasticSearch/config.json new file mode 100644 index 000000000..3e77596e4 --- /dev/null +++ b/Python3.6-ClsToElasticSearch/config.json @@ -0,0 +1,32 @@ +{ + "serverless-cloud-function-application": { + "Chinese": { + "name": "CLS 消息转储至 ES", + "description": "使用CLS+云函数+ES+Kibana,提供日志转存能力,替代Logstash,成本更低,支持功能自定义。代码里面可以添加日志清洗、转换等功能", + "attention": "在使用本示例代码时需要在函数配置里添加环境变量和VPC配置;环境变量:ES_ADDRESS,ES_USER,ES_PASSWORD,ES_API_KEY,ES_INDEX。本示例代码仅供参考,如需用于生产请根据自身业务进行相应的评估。", + "tutorial":"https://cloud.tencent.com/document/product/583/51597", + "author": { + "name": "腾讯云" + } + }, + "English": { + "name": "CLSToElasticsearch", + "description": "This demo will connect CLS and consume message automatically.", + "attention": "When using this demo, you need to add environment variables and VPC configuration in the function configuration; environment variables: ES_ADDRESS, ES_USER, ES_PASSWORD, ES_API_KEY, ES_INDEX.", + "tutorial":" ", + "author": { + "name": "Tencent Cloud" + } + }, + "runtime": "Python3.6", + "readme": "https://github.com/tencentyun/serverless-demo/tree/master/Python3.6-ClsToElasticSearch", + "version": "1.10.4", + "tags": [ + "Python3.6", + "CLS", + "ES", + "Logstash", + "ETL" + ] + } +} diff --git a/Python3.6-ClsToElasticSearch/serverless.yml b/Python3.6-ClsToElasticSearch/serverless.yml new file mode 100755 index 000000000..05cbb6958 --- /dev/null +++ b/Python3.6-ClsToElasticSearch/serverless.yml @@ -0,0 +1,30 @@ +component: scf +name: ap-guangzhou_default_CLSToElasticsearch +inputs: + name: CLSToElasticsearch + src: ./src + handler: index.main_handler + runtime: Python3.6 + namespace: default + region: ap-guangzhou + memorySize: 256 + environment: + - key: ES_ADDRESS + value: ES address/ES 地址 + - key: ES_USER + value: ES user name/ ES 用户名 + - key: ES_PASSWORD + value: ES password/ES 密码 + - key: ES_INDEX + value: ES index + - key: ES_API_KEY + value: ES API KEY + timeout: 30 + triggers: + - type: cls + name: + enable: true + topic: + maxMsgNum: 50 + offset: latest + retry: 10000 diff --git a/Python3.6-ClsToElasticSearch/src/elasticsearch/__init__.py b/Python3.6-ClsToElasticSearch/src/elasticsearch/__init__.py new file mode 100755 index 000000000..00d4ffe4a --- /dev/null +++ b/Python3.6-ClsToElasticSearch/src/elasticsearch/__init__.py @@ -0,0 +1,62 @@ +# flake8: noqa +from __future__ import absolute_import + +VERSION = (7, 7, 0) +__version__ = VERSION +__versionstr__ = "7.7.0" + +import logging +import warnings + +logger = logging.getLogger("elasticsearch") +logger.addHandler(logging.NullHandler()) + +from .client import Elasticsearch +from .transport import Transport +from .connection_pool import ConnectionPool, ConnectionSelector, RoundRobinSelector +from .serializer import JSONSerializer +from .connection import Connection, RequestsHttpConnection, Urllib3HttpConnection +from .exceptions import ( + ImproperlyConfigured, + ElasticsearchException, + SerializationError, + TransportError, + NotFoundError, + ConflictError, + RequestError, + ConnectionError, + SSLError, + ConnectionTimeout, + AuthenticationException, + AuthorizationException, + ElasticsearchDeprecationWarning, +) + +# Only raise one warning per deprecation message so as not +# to spam up the user if the same action is done multiple times. +warnings.simplefilter("default", category=ElasticsearchDeprecationWarning, append=True) + +__all__ = [ + "Elasticsearch", + "Transport", + "ConnectionPool", + "ConnectionSelector", + "RoundRobinSelector", + "JSONSerializer", + "Connection", + "RequestsHttpConnection", + "Urllib3HttpConnection", + "ImproperlyConfigured", + "ElasticsearchException", + "SerializationError", + "TransportError", + "NotFoundError", + "ConflictError", + "RequestError", + "ConnectionError", + "SSLError", + "ConnectionTimeout", + "AuthenticationException", + "AuthorizationException", + "ElasticsearchDeprecationWarning", +] diff --git a/Python3.6-ClsToElasticSearch/src/elasticsearch/__pycache__/__init__.cpython-37.pyc b/Python3.6-ClsToElasticSearch/src/elasticsearch/__pycache__/__init__.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..65c3a1d1b4e5fc7cc61441ccd6bdd32db06e133f GIT binary patch literal 1444 zcmbtT&2HO95GE;0qNsmami+&3x4KvbddMXx+Qtf;B2WU!NrAa6Xw6z-u*+qaWLvw> z(07QBeWktjWt_*>c^lf9U1l4+10CLlF5iUBjH|FMz75-a2X^=_ z?D9R>5>00_>JX=cDUWhQH zlOzz~TOpWgE&VZNIv}KKjYJyaNZbVBoYTwHej2|Sj;RcM7Tiq==|wclz_&V46ybsK*d1SK+QniK*K=OK+C|ofwqAS17_-E*TANMEd$#Ib~N;Q;1P10CVB%1 zk&dRI_t332_YBJopPUZfhnWknLUZGG*p~5Ti=dryN+kqoQhqs P+u8HH)yO&af4%<&brzNb literal 0 HcmV?d00001 diff --git a/Python3.6-ClsToElasticSearch/src/elasticsearch/__pycache__/compat.cpython-37.pyc b/Python3.6-ClsToElasticSearch/src/elasticsearch/__pycache__/compat.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..81db9fc80d959ae0f36116ed196bce716d8c102d GIT binary patch literal 654 zcmZ{h&yJNa6vkWr!v$s>U1;Lk1Q$bMTpDBax;0})7j_}x(sQqjEoE9TAwH2lQn#+= z6zxXKE9T7em#f2E{crcIDOx@Pd!3@xy9d+pm~Ll`H8{_r-Cp7m~2*upd$dHoKkG z1kEUBW5dawF&Nytu}x@=w&w2WZ3h&6T%a}jH-^^vDjTviXPZx&n_ zBGDiIPfhy?^rVz9+TE7kIdP}p8I!i*VVS_Ne8XI>%`yB3*piK#rWK-Teh_`(vYhN^9i?!MHWl4Cnvj*CrR iNX2IXFDWw36O_a&y?{paADcUwT+plNK+lO1=F6b3@e1gQD1)8@z09ki ztD3HWzRn%cj()xhdWDxjmo!}i{RS_CE^E3DdX?8e*EHP#eS_CQ*EKy4`c1wDdQB`~ zy+u(M4KXjOqQ-Cjr_@>EKiV^zZ)GbV4~8-x3K^bk$5AinkEGz)YCH5)68K7Z(tp(c zlP`uz5J%bao{)hT2B}A6Tgq5w3;WWG)G(GwPS5Yg$#3FO#K$DD4Fi8X+xVUMVkFd< zTEt`|{3)}_-rlGAd)KB@`@ulOqa?erHA)_d2y*4i7h>r_&kk&K(a# z7{ch7+~`=`?AY9bA#6$W=^twhzd~Ooy+8@hj|Pfnq^~ z2UMUEqGJjAF^t7qn(J=U$ttcJc?03PSy+6LtcYS zdieiGo4;2=s?8IaI*$4WUa+|n`=bFCQkxHDd!#t7N*5^sWnAYwf)2y;BZNjg3Rn?w~rpi$E8PGU#MZkHX1AnI$?M}X4h6(M1M z`~pR2?mo@Ek6{+-!l7*U)HpReC0rxx>FYACk^S_w&Vf&jPKDPH7Io=>Mf$B4^y08+ z?=dNok&G1cSQJO^Jotp&{@~Yl@7!s!kM{Q;GL?AANFj8%Xt4)U2UG6zhM5(z=5+g;z?+*b3EBsbXFql&A^wCo7HFZ-oOowV56Mg zJez)7GmpKd=4)&VPLjamSpI=f3gOyf-;>Fjrd-KSZ{T!ZtddswZXuAJuACD{Kk%BO zM1|pYP%+5pIksTl+_XW|i&;;`1J>T#ASg+}yvGPXvgigw)uBo@2`w^ik(Kf+4j;@I zp5X9u{Ty2H-9o)`di|VwG+$^((Ne{sA@(gUxcQZNG9;i$$t^k`m8DEf4LxhkNUT#! z@(IQ5HPrt#<134ozOfUB8%gPj#mj4m*eBLEh|y>Hx$?~H+o#quaE})73;`M3PCj*5~LW#vChnx_WBrw`$-!;bikFX6lEHCw9+TrJ$(oW`=wd zT4XgD$BFBE$W`(>Zlu-*%gR!CC`M@=K@pFnFIoXl=L;e^8H%)gOWnSun$3B6Q}YQ_ z5CNf)FEWS2#hG&$2T@k;1z{p&=14J=ncWvjR{u1Pk4D3`?lElOQ?7zTE>6{Tb(wHo zo%oxMBxq)(T)?;J+8(I>d90$cf{*KN;Q_@k7^Y)d<`VwP`pU9uX2o1CFPjTy+5D-Q zuAX!Dn89wddQrv6DtXdRPNuSIy=4KB|-)t5Io`8WkV6iXVaW zJ`P2$m`&$-GVdUf!&xO+!F2kajv#HdNfn-SF9?-m4_P`o6BDRhEb+o43V$Te_8uka zpdUnDo>!;wqr;A}+%(EwRIaDJTUW~y42GegW<}S2YL$whWe^Fen!@P9j7=8O;Y8YV z9XNogd1xKlbne+2s@86LYr3dbpKyn|LFJVloZMq$Bsv;Gxm$h?mN|k0`yx94~wL|PlM{@odw^Q!DPqQy*SSin>8|-;k3v{8Q z2;oF7ML(UVPSs-QKra-qXft#Dwd%w@pw$U|74wW_uM0@3vFUqki$x+ij^z(X&$k4Rgoh3@?;tM|P;W!gzZc5dlIUhB_!_|! z{DX#n((o@D{*7U`c`ZY?f?YK}4^)edm)^YVJ*=c`s5fW2>`O}5k`juf(3KP>k{~I` z{gOIDW-APt?KmSJY0i^+T4}*m)`24@@ER~n$ T@c-4);*zse!!h|+CQttW`A~IN literal 0 HcmV?d00001 diff --git a/Python3.6-ClsToElasticSearch/src/elasticsearch/__pycache__/transport.cpython-37.pyc b/Python3.6-ClsToElasticSearch/src/elasticsearch/__pycache__/transport.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8a2cd569900a7b793e589015a84ae2b2a81b8160 GIT binary patch literal 11688 zcmb_i+ixS+d7leOQ4~d8R(rJ*Pd9E>Otg|)+d(UA9k2IdZ|cZfueRP0F~||mP#S4C z!#y)xE!9yTEH^cbCN+w@v`7KzC2jwN^dSXWph#beycL+&zSPeR+QLu$eczc2sg+>| zC?y`woH>{8eCNCTzVFN%v$Hh~pY1=nZMR<2wEv)+>{G(U>-dIGQ3y@wp4OsY!_)gl z%g{ApijrscORW-Lm%Vbo(yH{QT2p*q@v8ml)^xwts`Y1DGh8?2)%&xp*?yzd=+Cw0 z`tz-M)K|syhgxglO-0a{6pz^ z?$*n1MbY3^;QLNDas$67RyG?T`69=x;hz>%))xd)C+>bIS+ zxI1_I&fS|2-`;FLxbyD4n{VH1-Ff#xvy{vg!`zWFkV#`x+I~0)WTfsFvk`5gM%678 zoi^RR6AYuSL2JT?eu>Su@C~n^h_z!ddRsd>4vf8S;WKA80W#qGbDMUbFCd;PwZe zV|8uM+p@cRR*|~JgA z3>>jq<1Pw!Yo-{$tAwY)X51~Pc+_T6a2Eu5z|ExT! zM9$6o_pD&xNal8!@?_nXiU)04(t5F*+3L>TRGx#GIJN?~M40SP+Y7Q5WaL5^83}SH*d80W}SAQCvdJoOnT8 z7FSR+FP;<6qh^7$wrM5Pxp_GFcV_4vziSV|p{MjK=vn!iuS+n4wgO`71>0_S)p`qh z3hqNC!^rj{x9GIv@Mx|tTnP8j_Qtk5wCvE@cc5^{C zQ_=e_>?>x2l=p1NF?5$iq5}yf-D*qc@z4pQj@5Ufod7yaw&?E0_Fw-MeSZG!gL!)} z@F3@;mTSA%|4&WqM$>fv8%EpxA1eMAMPj~rXR|wxNAy|5mu%o4;v4=8iby-wPqf2@ zcsj;+*SMj@HEG2Q$7WnR(RD2Dbh_!Y(uA{8P4U|*yEXry;hjJQywRSBwQO?BF zvL4T)oQ-vqXni~v>oiuZ?auEm2wmuZYFx%$doi9rK1b4WV#vP~=3yD%YFxpSrMQan ze01UX!U?s!7*CTsPVF_=WaX+Eu3MfPMiw?-xa_oS z={n(x<=EXF+NB7)KwGxwjP?TwOD(sR{$R_Pf znCCy;ZruupTik=l`C`jRLIdlSQ*nAw5-df*Z#Xh}>du*?=O&a_8amBJd*eq+`jdpMe)e)mH8#(4-(9#zZgTc*+VOMEd zw!Ov1Er)g=8x>j|S`M}ceit=Rv}QgE^D^|^UauYaZD_oN74LcWHue&NqSSbQ2dW1r zgnbCMNKml**>vpO^WkWPxm8d7zXh1U0}lsQ1Spa>F6MS*o1Va#!Sf9JTjcP1R_H)- z;8M4oXy0)#=`aciBXlh$I6Pw@TEv*R^=#KmmxI~}Zp9;J!H*Y>ENV1_PmQFzy$vsd z#qs!O4WCn)UKC!HcAvWehp@r2u3Ih^&JLOUuYx|rAo}#++AMqEvQ#nT*+&zi!uD92 z20;Yx1)K=)4ZimSJY!y4VE{vG?ZYC@><3POir72vG}0eA+C0mZ=DyI}@4)EE}zhNl6%O>H$jP+OfNUFd0k z{W(UXXaLg{3>4;K+oeP!gV1?*avPAe6!J~5CWk)2q3OJ|eql%Cp!#&33oRNvapbDC z3`3atqCR93Sjsu2HYr4FXOGtvpM30A+JOcL2Ts@Rx$v>TmMkSa!vuk;MImJkVRg|A z9kW4JGAzj8vUBaUySt{KW6tKj{S;0U((Hh9xN-Z#{+L#!k06q;U6|2_vFSR@U?LYL zWnga;Cdet|&7*Fk)J8-@j2t@bP2Sqvyie+lI`9$<22=$fTzC~oX)6BQIUkQGMI#%# z+3PtHV$PDiI=Rly@~bcX?uzy5OD|LLO0#onnK?nZPNtJ(s+sCD=HaF^Wj^5qEu>c8 zww5!*-MMRfq0>3F*`ec$Hlf3U^jPhm5?YvaFW4_^6)k=g z3`t#;WQ=GFYzjksa2L)K14vU zO@u%K6j9@3Qsl%G&<3)gopSKk?!WV3^BUMjzbWd<)Jho?a`SLgD`ZHqj%#5slwIc< zMoY~EF=2wlJP^y&9smR3AkhlNfQzzWw;YM!D|1m+M~&J^Q6ll}rk{GoGf+x4LpmhL zB3*kHqvT|TU(iH-WV%3FpDcr`jVoTLVv+Agz59`4?SUrdc z_P)i+6oRGoY_zIfFBpopEADPKpOZjtT2kr;gHfxRX@61~4q&JpS*3^d!h#`tFf*p= z$^2=BNapeyrJu?CX(3K(nLWg4=ZnR)3rLmBk5w~hODcQ&02yIY=7lA55Tr4u8zysO zJ!qh0rl<&s2uF;!mlQ=|6apay5J)cN&Zy|!CJ&dCNW+sF1=4M9l+3EOspcn(&%(E4 zMm1rQn~O;)biAItikYQF#S2udplCJnc^S0GYxLxsR4h~RRcbR+uq{ORr9$=G4@kR!scz zw`MGwb%doQy3=b^i?#=gdGxoM4LFhcF){8fnt=#JB!ajni%O0Wv+QblEMXk{EZ3G> z6w<+4SiZCWWgvU5OPuu)P%ao7rIREQV@l25g|=ngQgX!xZqneOXBu{pK4oL)#BDT9 z-pZr`-v~P`zlNnX4Y`URNfF;^RyUBw856;uph-y4pjN5t2bT&4PBzQgNKNTWtPI~p z@n>2O!JIHYtR5PXhR{zxG7im1j|>EZxIWTxXU67{E=;7?ZXlS%^+yOL{l$;8-AZis zaJM`4W3Ce=gz@Er&+_%%%R_PO06-Tpp->7KPZ+P1KeRo?qqAaeGCcIzB!EDBf;<&~ zW^V+uhg_$>4TH>D!oy1EI?xl`5rRi}m7Jt65w&I8l%<$x(tNWCKxENLCZ>gh?;>93 zPsU}+yo7I~h_sIo=pGqz<*5aZ>AS{l?fs?i8;AO#K_NG|yF`i0$4l>_<*s>LLQoIR zAD3emY~prLi_ITXa?{w5FJjE*R5G<=hjtW6zHf~iFc|lcl~-OwQ+bIBi#nB9vXWZC z?N$|;4bC2Mho-IskTO(~HGHwHc4ThtNNS&@XYDPn^>va6HhlM<39Ap8=r9l2Zn*JlcA$fGIT+x@X?c-Kwa+KWaOrbTMn=ljq0T;x? zLu2&AjE!yW=@VESNc}F-L%Zc8(v3%l=4d@OcPnI{Vm-WpCsRHrCVymp-#jcGmJg9= z+N~Z>BjHp&u8H!AeqtO}?rZO9k6$cl(M(kD8GAK(pkrHf?R%(wgztM=G)oE1BjW@8 zJew5Nl`yFN6HoH zes_B(qIAI^0F0*$6Znt<(JU_HJSa_qI6~$N@8Ylp>GWI~EtO$IcMfS)w98>`7Hqgs zBL%X-#l0dg12*;C?V&m#Km_UA+YW^ug%ZdM5Wj5QjTUdDf2kZ1*K4|S~zt|jTKv@uj4kEJCzewunNdzGNy*ShN}g={#|RVVHtSy_@K5 zcm_l|$O=h13^vUk};=m5S#tG0x3H#8s={9cBJs(XJKz4KOSzcX9HXqS-WHckxac6Fmw zB4KFF4Tf7#&z&}46fzv)XZreQ`WpKvgOMH~#z}O65iRf4e_W1+gJ#fy< zA$P^@joZa{&dj#vh6*aqC#>TDP$Bff3FY>IU^l{dN)0FwO{Xh-Xu|ms*Ljj3kP=ec z46-CNHzx(10G3&t9#Tqxn{+y?#qB6iCumu|yfYrcGXP9A?%6~$*&n7CH$VF`Xl+U8IWzSPfM0v~}DmUfO8OtJVfEa!%Kbu1Ds zz+B!*S<2NjEhTcw$Syu{*@!BT-V27lSZ$W&ZOok-ICZMc)=z~(Daw1Dny?+aN>|UI zJVG*6qM3}RR;wA$m&kZk&4cr2QlF~=t&!A#jTA-502E_!<*p&GWVnt`sf>eP=R5jc zTkcsC2ZczV2pC;!nWBA1G|AOXhCCeCa3YI&IUbl$#w<sH|Z+@oFY=P2PaJ< zX1yuvNltpqGqa!;YP6n5o#fTO5Iy2liIUN30r^rl0JmQRcarHo7Tu;rs-yxjK8sgL z2v1orc^`Ez(8iFx(W-UC`v>QWEy+jg@^dtyO(y5Z8}e)PjLSmbo0gB#jRD4kF90Hh zkDxkOsXa5jDj|mYmM9(SKEODhlyUzkE}?%_z76kFis3~>`35`?#!%yaJer3`F$oU> zJC=b5FO_g)8kHG2ev~0c!h)iD2rPOy6_?~M;?i*iN8yAwagXbos1o*!rs7IeMU?l8 zxYE-PE1&33S0fy*?&3|Ii?F1(wNLc-ui%JOJDx#&H$A$FBilN8Ao#a3V(v;jCF(z# z0=}&s&K%Z3&rd|X5?UH(+p4rg`kAjbM<_YBmm#0^Jb;Q2Eq8VVVmh69qR_5Y2j9w)FhQZGQPTYTiA zZYVmy{P_a8-B9At)7}aLkAe~0S<7@ZL9Yst&uDTY4jEjhI6&lnDbigp-VorUh_reo zPst%kM6WhcRKi20?}d+1M9(V(ihg6XYqO#s#lmcCOg(Tt3$0LqSo>>(7mp9q~+$gW)>Q&dvg#Bl9~%F3V4Ti*W`CFPg2HR?LkGHaC3H!kT_!@xZSklEj&xu{=2A8LiM00kPjtKgsjhXKY?sgAL) z@Nq$7BEURlh;#4@%J_{?h6=lEeXrZG8N~jpv0M6qvCg{_!<=~U<{cF&8r+93lR$)(Kfb? z1<^}*K>fp)P*f|Wa<(h#cYLSn->XhnuU2cdZ&c6ME>`O`A*pwBNm70znRaoe7r}>y zk{G};rU-wLZkc*QbF67<36w8XXN0x<$Uaor4$=~p@?h`_ + is used to encode all outgoing requests. + However, you can implement your own custom serializer:: + + from elasticsearch.serializer import JSONSerializer + + class SetEncoder(JSONSerializer): + def default(self, obj): + if isinstance(obj, set): + return list(obj) + if isinstance(obj, Something): + return 'CustomSomethingRepresentation' + return JSONSerializer.default(self, obj) + + es = Elasticsearch(serializer=SetEncoder()) + + """ + + def __init__(self, hosts=None, transport_class=Transport, **kwargs): + """ + :arg hosts: list of nodes, or a single node, we should connect to. + Node should be a dictionary ({"host": "localhost", "port": 9200}), + the entire dictionary will be passed to the :class:`~elasticsearch.Connection` + class as kwargs, or a string in the format of ``host[:port]`` which will be + translated to a dictionary automatically. If no value is given the + :class:`~elasticsearch.Connection` class defaults will be used. + + :arg transport_class: :class:`~elasticsearch.Transport` subclass to use. + + :arg kwargs: any additional arguments will be passed on to the + :class:`~elasticsearch.Transport` class and, subsequently, to the + :class:`~elasticsearch.Connection` instances. + """ + self.transport = transport_class(_normalize_hosts(hosts), **kwargs) + + # namespaced clients for compatibility with API names + self.indices = IndicesClient(self) + self.ingest = IngestClient(self) + self.cluster = ClusterClient(self) + self.cat = CatClient(self) + self.nodes = NodesClient(self) + self.remote = RemoteClient(self) + self.snapshot = SnapshotClient(self) + self.tasks = TasksClient(self) + + self.xpack = XPackClient(self) + self.async_search = AsyncSearchClient(self) + self.autoscaling = AutoscalingClient(self) + self.ccr = CcrClient(self) + self.data_frame = Data_FrameClient(self) + self.deprecation = DeprecationClient(self) + self.eql = EqlClient(self) + self.graph = GraphClient(self) + self.ilm = IlmClient(self) + self.indices = IndicesClient(self) + self.license = LicenseClient(self) + self.migration = MigrationClient(self) + self.ml = MlClient(self) + self.monitoring = MonitoringClient(self) + self.rollup = RollupClient(self) + self.security = SecurityClient(self) + self.sql = SqlClient(self) + self.ssl = SslClient(self) + self.watcher = WatcherClient(self) + self.enrich = EnrichClient(self) + self.slm = SlmClient(self) + self.transform = TransformClient(self) + + def __repr__(self): + try: + # get a list of all connections + cons = self.transport.hosts + # truncate to 5 if there are too many + if len(cons) > 5: + cons = cons[:5] + ["..."] + return "<{cls}({cons})>".format(cls=self.__class__.__name__, cons=cons) + except Exception: + # probably operating on custom transport and connection_pool, ignore + return super(Elasticsearch, self).__repr__() + + # AUTO-GENERATED-API-DEFINITIONS # + @query_params() + def ping(self, params=None, headers=None): + """ + Returns whether the cluster is running. + ``_ + """ + try: + return self.transport.perform_request( + "HEAD", "/", params=params, headers=headers + ) + except TransportError: + return False + + @query_params() + def info(self, params=None, headers=None): + """ + Returns basic information about the cluster. + ``_ + """ + return self.transport.perform_request( + "GET", "/", params=params, headers=headers + ) + + @query_params( + "pipeline", + "refresh", + "routing", + "timeout", + "version", + "version_type", + "wait_for_active_shards", + ) + def create(self, index, id, body, doc_type=None, params=None, headers=None): + """ + Creates a new document in the index. Returns a 409 response when a document + with a same ID already exists in the index. + ``_ + + :arg index: The name of the index + :arg id: Document ID + :arg body: The document + :arg doc_type: The type of the document + :arg pipeline: The pipeline id to preprocess incoming documents + with + :arg refresh: If `true` then refresh the affected shards to make + this operation visible to search, if `wait_for` then wait for a refresh + to make this operation visible to search, if `false` (the default) then + do nothing with refreshes. Valid choices: true, false, wait_for + :arg routing: Specific routing value + :arg timeout: Explicit operation timeout + :arg version: Explicit version number for concurrency control + :arg version_type: Specific version type Valid choices: + internal, external, external_gte + :arg wait_for_active_shards: Sets the number of shard copies + that must be active before proceeding with the index operation. Defaults + to 1, meaning the primary shard only. Set to `all` for all shard copies, + otherwise set to any non-negative value less than or equal to the total + number of copies for the shard (number of replicas + 1) + """ + for param in (index, id, body): + if param in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument.") + + if doc_type in SKIP_IN_PATH: + doc_type = "_doc" + + return self.transport.perform_request( + "PUT", + _make_path(index, doc_type, id, "_create"), + params=params, + headers=headers, + body=body, + ) + + @query_params( + "if_primary_term", + "if_seq_no", + "op_type", + "pipeline", + "refresh", + "routing", + "timeout", + "version", + "version_type", + "wait_for_active_shards", + ) + def index(self, index, body, doc_type=None, id=None, params=None, headers=None): + """ + Creates or updates a document in an index. + ``_ + + :arg index: The name of the index + :arg body: The document + :arg doc_type: The type of the document + :arg id: Document ID + :arg if_primary_term: only perform the index operation if the + last operation that has changed the document has the specified primary + term + :arg if_seq_no: only perform the index operation if the last + operation that has changed the document has the specified sequence + number + :arg op_type: Explicit operation type. Defaults to `index` for + requests with an explicit document ID, and to `create`for requests + without an explicit document ID Valid choices: index, create + :arg pipeline: The pipeline id to preprocess incoming documents + with + :arg refresh: If `true` then refresh the affected shards to make + this operation visible to search, if `wait_for` then wait for a refresh + to make this operation visible to search, if `false` (the default) then + do nothing with refreshes. Valid choices: true, false, wait_for + :arg routing: Specific routing value + :arg timeout: Explicit operation timeout + :arg version: Explicit version number for concurrency control + :arg version_type: Specific version type Valid choices: + internal, external, external_gte + :arg wait_for_active_shards: Sets the number of shard copies + that must be active before proceeding with the index operation. Defaults + to 1, meaning the primary shard only. Set to `all` for all shard copies, + otherwise set to any non-negative value less than or equal to the total + number of copies for the shard (number of replicas + 1) + """ + for param in (index, body): + if param in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument.") + + if doc_type is None: + doc_type = "_doc" + + return self.transport.perform_request( + "POST" if id in SKIP_IN_PATH else "PUT", + _make_path(index, doc_type, id), + params=params, + headers=headers, + body=body, + ) + + @query_params( + "_source", + "_source_excludes", + "_source_includes", + "pipeline", + "refresh", + "routing", + "timeout", + "wait_for_active_shards", + ) + def bulk(self, body, index=None, doc_type=None, params=None, headers=None): + """ + Allows to perform multiple index/update/delete operations in a single request. + ``_ + + :arg body: The operation definition and data (action-data + pairs), separated by newlines + :arg index: Default index for items which don't provide one + :arg doc_type: Default document type for items which don't + provide one + :arg _source: True or false to return the _source field or not, + or default list of fields to return, can be overridden on each sub- + request + :arg _source_excludes: Default list of fields to exclude from + the returned _source field, can be overridden on each sub-request + :arg _source_includes: Default list of fields to extract and + return from the _source field, can be overridden on each sub-request + :arg pipeline: The pipeline id to preprocess incoming documents + with + :arg refresh: If `true` then refresh the affected shards to make + this operation visible to search, if `wait_for` then wait for a refresh + to make this operation visible to search, if `false` (the default) then + do nothing with refreshes. Valid choices: true, false, wait_for + :arg routing: Specific routing value + :arg timeout: Explicit operation timeout + :arg wait_for_active_shards: Sets the number of shard copies + that must be active before proceeding with the bulk operation. Defaults + to 1, meaning the primary shard only. Set to `all` for all shard copies, + otherwise set to any non-negative value less than or equal to the total + number of copies for the shard (number of replicas + 1) + """ + if body in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument 'body'.") + + body = _bulk_body(self.transport.serializer, body) + return self.transport.perform_request( + "POST", + _make_path(index, doc_type, "_bulk"), + params=params, + headers=headers, + body=body, + ) + + @query_params() + def clear_scroll(self, body=None, scroll_id=None, params=None, headers=None): + """ + Explicitly clears the search context for a scroll. + ``_ + + :arg body: A comma-separated list of scroll IDs to clear if none + was specified via the scroll_id parameter + :arg scroll_id: A comma-separated list of scroll IDs to clear + """ + if scroll_id in SKIP_IN_PATH and body in SKIP_IN_PATH: + raise ValueError("You need to supply scroll_id or body.") + elif scroll_id and not body: + body = {"scroll_id": [scroll_id]} + elif scroll_id: + params["scroll_id"] = scroll_id + + return self.transport.perform_request( + "DELETE", "/_search/scroll", params=params, headers=headers, body=body + ) + + @query_params( + "allow_no_indices", + "analyze_wildcard", + "analyzer", + "default_operator", + "df", + "expand_wildcards", + "ignore_throttled", + "ignore_unavailable", + "lenient", + "min_score", + "preference", + "q", + "routing", + "terminate_after", + ) + def count(self, body=None, index=None, doc_type=None, params=None, headers=None): + """ + Returns number of documents matching a query. + ``_ + + :arg body: A query to restrict the results specified with the + Query DSL (optional) + :arg index: A comma-separated list of indices to restrict the + results + :arg doc_type: A comma-separated list of types to restrict the + results + :arg allow_no_indices: Whether to ignore if a wildcard indices + expression resolves into no concrete indices. (This includes `_all` + string or when no indices have been specified) + :arg analyze_wildcard: Specify whether wildcard and prefix + queries should be analyzed (default: false) + :arg analyzer: The analyzer to use for the query string + :arg default_operator: The default operator for query string + query (AND or OR) Valid choices: AND, OR Default: OR + :arg df: The field to use as default where no field prefix is + given in the query string + :arg expand_wildcards: Whether to expand wildcard expression to + concrete indices that are open, closed or both. Valid choices: open, + closed, hidden, none, all Default: open + :arg ignore_throttled: Whether specified concrete, expanded or + aliased indices should be ignored when throttled + :arg ignore_unavailable: Whether specified concrete indices + should be ignored when unavailable (missing or closed) + :arg lenient: Specify whether format-based query failures (such + as providing text to a numeric field) should be ignored + :arg min_score: Include only documents with a specific `_score` + value in the result + :arg preference: Specify the node or shard the operation should + be performed on (default: random) + :arg q: Query in the Lucene query string syntax + :arg routing: A comma-separated list of specific routing values + :arg terminate_after: The maximum count for each shard, upon + reaching which the query execution will terminate early + """ + return self.transport.perform_request( + "POST", + _make_path(index, doc_type, "_count"), + params=params, + headers=headers, + body=body, + ) + + @query_params( + "if_primary_term", + "if_seq_no", + "refresh", + "routing", + "timeout", + "version", + "version_type", + "wait_for_active_shards", + ) + def delete(self, index, id, doc_type=None, params=None, headers=None): + """ + Removes a document from the index. + ``_ + + :arg index: The name of the index + :arg id: The document ID + :arg doc_type: The type of the document + :arg if_primary_term: only perform the delete operation if the + last operation that has changed the document has the specified primary + term + :arg if_seq_no: only perform the delete operation if the last + operation that has changed the document has the specified sequence + number + :arg refresh: If `true` then refresh the affected shards to make + this operation visible to search, if `wait_for` then wait for a refresh + to make this operation visible to search, if `false` (the default) then + do nothing with refreshes. Valid choices: true, false, wait_for + :arg routing: Specific routing value + :arg timeout: Explicit operation timeout + :arg version: Explicit version number for concurrency control + :arg version_type: Specific version type Valid choices: + internal, external, external_gte, force + :arg wait_for_active_shards: Sets the number of shard copies + that must be active before proceeding with the delete operation. + Defaults to 1, meaning the primary shard only. Set to `all` for all + shard copies, otherwise set to any non-negative value less than or equal + to the total number of copies for the shard (number of replicas + 1) + """ + for param in (index, id): + if param in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument.") + + if doc_type in SKIP_IN_PATH: + doc_type = "_doc" + + return self.transport.perform_request( + "DELETE", _make_path(index, doc_type, id), params=params, headers=headers + ) + + @query_params( + "_source", + "_source_excludes", + "_source_includes", + "allow_no_indices", + "analyze_wildcard", + "analyzer", + "conflicts", + "default_operator", + "df", + "expand_wildcards", + "from_", + "ignore_unavailable", + "lenient", + "max_docs", + "preference", + "q", + "refresh", + "request_cache", + "requests_per_second", + "routing", + "scroll", + "scroll_size", + "search_timeout", + "search_type", + "size", + "slices", + "sort", + "stats", + "terminate_after", + "timeout", + "version", + "wait_for_active_shards", + "wait_for_completion", + ) + def delete_by_query(self, index, body, doc_type=None, params=None, headers=None): + """ + Deletes documents matching the provided query. + ``_ + + :arg index: A comma-separated list of index names to search; use + `_all` or empty string to perform the operation on all indices + :arg body: The search definition using the Query DSL + :arg doc_type: A comma-separated list of document types to + search; leave empty to perform the operation on all types + :arg _source: True or false to return the _source field or not, + or a list of fields to return + :arg _source_excludes: A list of fields to exclude from the + returned _source field + :arg _source_includes: A list of fields to extract and return + from the _source field + :arg allow_no_indices: Whether to ignore if a wildcard indices + expression resolves into no concrete indices. (This includes `_all` + string or when no indices have been specified) + :arg analyze_wildcard: Specify whether wildcard and prefix + queries should be analyzed (default: false) + :arg analyzer: The analyzer to use for the query string + :arg conflicts: What to do when the delete by query hits version + conflicts? Valid choices: abort, proceed Default: abort + :arg default_operator: The default operator for query string + query (AND or OR) Valid choices: AND, OR Default: OR + :arg df: The field to use as default where no field prefix is + given in the query string + :arg expand_wildcards: Whether to expand wildcard expression to + concrete indices that are open, closed or both. Valid choices: open, + closed, hidden, none, all Default: open + :arg from_: Starting offset (default: 0) + :arg ignore_unavailable: Whether specified concrete indices + should be ignored when unavailable (missing or closed) + :arg lenient: Specify whether format-based query failures (such + as providing text to a numeric field) should be ignored + :arg max_docs: Maximum number of documents to process (default: + all documents) + :arg preference: Specify the node or shard the operation should + be performed on (default: random) + :arg q: Query in the Lucene query string syntax + :arg refresh: Should the effected indexes be refreshed? + :arg request_cache: Specify if request cache should be used for + this request or not, defaults to index level setting + :arg requests_per_second: The throttle for this request in sub- + requests per second. -1 means no throttle. + :arg routing: A comma-separated list of specific routing values + :arg scroll: Specify how long a consistent view of the index + should be maintained for scrolled search + :arg scroll_size: Size on the scroll request powering the delete + by query Default: 100 + :arg search_timeout: Explicit timeout for each search request. + Defaults to no timeout. + :arg search_type: Search operation type Valid choices: + query_then_fetch, dfs_query_then_fetch + :arg size: Deprecated, please use `max_docs` instead + :arg slices: The number of slices this task should be divided + into. Defaults to 1, meaning the task isn't sliced into subtasks. Can be + set to `auto`. Default: 1 + :arg sort: A comma-separated list of : pairs + :arg stats: Specific 'tag' of the request for logging and + statistical purposes + :arg terminate_after: The maximum number of documents to collect + for each shard, upon reaching which the query execution will terminate + early. + :arg timeout: Time each individual bulk request should wait for + shards that are unavailable. Default: 1m + :arg version: Specify whether to return document version as part + of a hit + :arg wait_for_active_shards: Sets the number of shard copies + that must be active before proceeding with the delete by query + operation. Defaults to 1, meaning the primary shard only. Set to `all` + for all shard copies, otherwise set to any non-negative value less than + or equal to the total number of copies for the shard (number of replicas + + 1) + :arg wait_for_completion: Should the request should block until + the delete by query is complete. Default: True + """ + # from is a reserved word so it cannot be used, use from_ instead + if "from_" in params: + params["from"] = params.pop("from_") + + for param in (index, body): + if param in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument.") + + return self.transport.perform_request( + "POST", + _make_path(index, doc_type, "_delete_by_query"), + params=params, + headers=headers, + body=body, + ) + + @query_params("requests_per_second") + def delete_by_query_rethrottle(self, task_id, params=None, headers=None): + """ + Changes the number of requests per second for a particular Delete By Query + operation. + ``_ + + :arg task_id: The task id to rethrottle + :arg requests_per_second: The throttle to set on this request in + floating sub-requests per second. -1 means set no throttle. + """ + if task_id in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument 'task_id'.") + + return self.transport.perform_request( + "POST", + _make_path("_delete_by_query", task_id, "_rethrottle"), + params=params, + headers=headers, + ) + + @query_params("master_timeout", "timeout") + def delete_script(self, id, params=None, headers=None): + """ + Deletes a script. + ``_ + + :arg id: Script ID + :arg master_timeout: Specify timeout for connection to master + :arg timeout: Explicit operation timeout + """ + if id in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument 'id'.") + + return self.transport.perform_request( + "DELETE", _make_path("_scripts", id), params=params, headers=headers + ) + + @query_params( + "_source", + "_source_excludes", + "_source_includes", + "preference", + "realtime", + "refresh", + "routing", + "stored_fields", + "version", + "version_type", + ) + def exists(self, index, id, doc_type=None, params=None, headers=None): + """ + Returns information about whether a document exists in an index. + ``_ + + :arg index: The name of the index + :arg id: The document ID + :arg doc_type: The type of the document (use `_all` to fetch the + first document matching the ID across all types) + :arg _source: True or false to return the _source field or not, + or a list of fields to return + :arg _source_excludes: A list of fields to exclude from the + returned _source field + :arg _source_includes: A list of fields to extract and return + from the _source field + :arg preference: Specify the node or shard the operation should + be performed on (default: random) + :arg realtime: Specify whether to perform the operation in + realtime or search mode + :arg refresh: Refresh the shard containing the document before + performing the operation + :arg routing: Specific routing value + :arg stored_fields: A comma-separated list of stored fields to + return in the response + :arg version: Explicit version number for concurrency control + :arg version_type: Specific version type Valid choices: + internal, external, external_gte, force + """ + for param in (index, id): + if param in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument.") + + if doc_type in SKIP_IN_PATH: + doc_type = "_doc" + + return self.transport.perform_request( + "HEAD", _make_path(index, doc_type, id), params=params, headers=headers + ) + + @query_params( + "_source", + "_source_excludes", + "_source_includes", + "preference", + "realtime", + "refresh", + "routing", + "version", + "version_type", + ) + def exists_source(self, index, id, doc_type=None, params=None, headers=None): + """ + Returns information about whether a document source exists in an index. + ``_ + + :arg index: The name of the index + :arg id: The document ID + :arg doc_type: The type of the document; deprecated and optional + starting with 7.0 + :arg _source: True or false to return the _source field or not, + or a list of fields to return + :arg _source_excludes: A list of fields to exclude from the + returned _source field + :arg _source_includes: A list of fields to extract and return + from the _source field + :arg preference: Specify the node or shard the operation should + be performed on (default: random) + :arg realtime: Specify whether to perform the operation in + realtime or search mode + :arg refresh: Refresh the shard containing the document before + performing the operation + :arg routing: Specific routing value + :arg version: Explicit version number for concurrency control + :arg version_type: Specific version type Valid choices: + internal, external, external_gte, force + """ + for param in (index, id): + if param in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument.") + + return self.transport.perform_request( + "HEAD", + _make_path(index, doc_type, id, "_source"), + params=params, + headers=headers, + ) + + @query_params( + "_source", + "_source_excludes", + "_source_includes", + "analyze_wildcard", + "analyzer", + "default_operator", + "df", + "lenient", + "preference", + "q", + "routing", + "stored_fields", + ) + def explain(self, index, id, body=None, doc_type=None, params=None, headers=None): + """ + Returns information about why a specific matches (or doesn't match) a query. + ``_ + + :arg index: The name of the index + :arg id: The document ID + :arg body: The query definition using the Query DSL + :arg doc_type: The type of the document + :arg _source: True or false to return the _source field or not, + or a list of fields to return + :arg _source_excludes: A list of fields to exclude from the + returned _source field + :arg _source_includes: A list of fields to extract and return + from the _source field + :arg analyze_wildcard: Specify whether wildcards and prefix + queries in the query string query should be analyzed (default: false) + :arg analyzer: The analyzer for the query string query + :arg default_operator: The default operator for query string + query (AND or OR) Valid choices: AND, OR Default: OR + :arg df: The default field for query string query (default: + _all) + :arg lenient: Specify whether format-based query failures (such + as providing text to a numeric field) should be ignored + :arg preference: Specify the node or shard the operation should + be performed on (default: random) + :arg q: Query in the Lucene query string syntax + :arg routing: Specific routing value + :arg stored_fields: A comma-separated list of stored fields to + return in the response + """ + for param in (index, id): + if param in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument.") + + if doc_type in SKIP_IN_PATH: + doc_type = "_doc" + + return self.transport.perform_request( + "POST", + _make_path(index, doc_type, id, "_explain"), + params=params, + headers=headers, + body=body, + ) + + @query_params( + "allow_no_indices", + "expand_wildcards", + "fields", + "ignore_unavailable", + "include_unmapped", + ) + def field_caps(self, index=None, params=None, headers=None): + """ + Returns the information about the capabilities of fields among multiple + indices. + ``_ + + :arg index: A comma-separated list of index names; use `_all` or + empty string to perform the operation on all indices + :arg allow_no_indices: Whether to ignore if a wildcard indices + expression resolves into no concrete indices. (This includes `_all` + string or when no indices have been specified) + :arg expand_wildcards: Whether to expand wildcard expression to + concrete indices that are open, closed or both. Valid choices: open, + closed, hidden, none, all Default: open + :arg fields: A comma-separated list of field names + :arg ignore_unavailable: Whether specified concrete indices + should be ignored when unavailable (missing or closed) + :arg include_unmapped: Indicates whether unmapped fields should + be included in the response. + """ + return self.transport.perform_request( + "GET", _make_path(index, "_field_caps"), params=params, headers=headers + ) + + @query_params( + "_source", + "_source_excludes", + "_source_includes", + "preference", + "realtime", + "refresh", + "routing", + "stored_fields", + "version", + "version_type", + ) + def get(self, index, id, doc_type=None, params=None, headers=None): + """ + Returns a document. + ``_ + + :arg index: The name of the index + :arg id: The document ID + :arg doc_type: The type of the document (use `_all` to fetch the + first document matching the ID across all types) + :arg _source: True or false to return the _source field or not, + or a list of fields to return + :arg _source_excludes: A list of fields to exclude from the + returned _source field + :arg _source_includes: A list of fields to extract and return + from the _source field + :arg preference: Specify the node or shard the operation should + be performed on (default: random) + :arg realtime: Specify whether to perform the operation in + realtime or search mode + :arg refresh: Refresh the shard containing the document before + performing the operation + :arg routing: Specific routing value + :arg stored_fields: A comma-separated list of stored fields to + return in the response + :arg version: Explicit version number for concurrency control + :arg version_type: Specific version type Valid choices: + internal, external, external_gte, force + """ + for param in (index, id): + if param in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument.") + + if doc_type in SKIP_IN_PATH: + doc_type = "_doc" + + return self.transport.perform_request( + "GET", _make_path(index, doc_type, id), params=params, headers=headers + ) + + @query_params("master_timeout") + def get_script(self, id, params=None, headers=None): + """ + Returns a script. + ``_ + + :arg id: Script ID + :arg master_timeout: Specify timeout for connection to master + """ + if id in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument 'id'.") + + return self.transport.perform_request( + "GET", _make_path("_scripts", id), params=params, headers=headers + ) + + @query_params( + "_source", + "_source_excludes", + "_source_includes", + "preference", + "realtime", + "refresh", + "routing", + "version", + "version_type", + ) + def get_source(self, index, id, doc_type=None, params=None, headers=None): + """ + Returns the source of a document. + ``_ + + :arg index: The name of the index + :arg id: The document ID + :arg doc_type: The type of the document; deprecated and optional + starting with 7.0 + :arg _source: True or false to return the _source field or not, + or a list of fields to return + :arg _source_excludes: A list of fields to exclude from the + returned _source field + :arg _source_includes: A list of fields to extract and return + from the _source field + :arg preference: Specify the node or shard the operation should + be performed on (default: random) + :arg realtime: Specify whether to perform the operation in + realtime or search mode + :arg refresh: Refresh the shard containing the document before + performing the operation + :arg routing: Specific routing value + :arg version: Explicit version number for concurrency control + :arg version_type: Specific version type Valid choices: + internal, external, external_gte, force + """ + for param in (index, id): + if param in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument.") + + if doc_type in SKIP_IN_PATH: + doc_type = "_doc" + + return self.transport.perform_request( + "GET", + _make_path(index, doc_type, id, "_source"), + params=params, + headers=headers, + ) + + @query_params( + "_source", + "_source_excludes", + "_source_includes", + "preference", + "realtime", + "refresh", + "routing", + "stored_fields", + ) + def mget(self, body, index=None, doc_type=None, params=None, headers=None): + """ + Allows to get multiple documents in one request. + ``_ + + :arg body: Document identifiers; can be either `docs` + (containing full document information) or `ids` (when index and type is + provided in the URL. + :arg index: The name of the index + :arg doc_type: The type of the document + :arg _source: True or false to return the _source field or not, + or a list of fields to return + :arg _source_excludes: A list of fields to exclude from the + returned _source field + :arg _source_includes: A list of fields to extract and return + from the _source field + :arg preference: Specify the node or shard the operation should + be performed on (default: random) + :arg realtime: Specify whether to perform the operation in + realtime or search mode + :arg refresh: Refresh the shard containing the document before + performing the operation + :arg routing: Specific routing value + :arg stored_fields: A comma-separated list of stored fields to + return in the response + """ + if body in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument 'body'.") + + return self.transport.perform_request( + "POST", + _make_path(index, doc_type, "_mget"), + params=params, + headers=headers, + body=body, + ) + + @query_params( + "ccs_minimize_roundtrips", + "max_concurrent_searches", + "max_concurrent_shard_requests", + "pre_filter_shard_size", + "rest_total_hits_as_int", + "search_type", + "typed_keys", + ) + def msearch(self, body, index=None, doc_type=None, params=None, headers=None): + """ + Allows to execute several search operations in one request. + ``_ + + :arg body: The request definitions (metadata-search request + definition pairs), separated by newlines + :arg index: A comma-separated list of index names to use as + default + :arg doc_type: A comma-separated list of document types to use + as default + :arg ccs_minimize_roundtrips: Indicates whether network round- + trips should be minimized as part of cross-cluster search requests + execution Default: true + :arg max_concurrent_searches: Controls the maximum number of + concurrent searches the multi search api will execute + :arg max_concurrent_shard_requests: The number of concurrent + shard requests each sub search executes concurrently per node. This + value should be used to limit the impact of the search on the cluster in + order to limit the number of concurrent shard requests Default: 5 + :arg pre_filter_shard_size: A threshold that enforces a pre- + filter roundtrip to prefilter search shards based on query rewriting if + the number of shards the search request expands to exceeds the + threshold. This filter roundtrip can limit the number of shards + significantly if for instance a shard can not match any documents based + on its rewrite method ie. if date filters are mandatory to match but the + shard bounds and the query are disjoint. + :arg rest_total_hits_as_int: Indicates whether hits.total should + be rendered as an integer or an object in the rest search response + :arg search_type: Search operation type Valid choices: + query_then_fetch, query_and_fetch, dfs_query_then_fetch, + dfs_query_and_fetch + :arg typed_keys: Specify whether aggregation and suggester names + should be prefixed by their respective types in the response + """ + if body in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument 'body'.") + + body = _bulk_body(self.transport.serializer, body) + return self.transport.perform_request( + "POST", + _make_path(index, doc_type, "_msearch"), + params=params, + headers=headers, + body=body, + ) + + @query_params( + "ccs_minimize_roundtrips", + "max_concurrent_searches", + "rest_total_hits_as_int", + "search_type", + "typed_keys", + ) + def msearch_template( + self, body, index=None, doc_type=None, params=None, headers=None + ): + """ + Allows to execute several search template operations in one request. + ``_ + + :arg body: The request definitions (metadata-search request + definition pairs), separated by newlines + :arg index: A comma-separated list of index names to use as + default + :arg doc_type: A comma-separated list of document types to use + as default + :arg ccs_minimize_roundtrips: Indicates whether network round- + trips should be minimized as part of cross-cluster search requests + execution Default: true + :arg max_concurrent_searches: Controls the maximum number of + concurrent searches the multi search api will execute + :arg rest_total_hits_as_int: Indicates whether hits.total should + be rendered as an integer or an object in the rest search response + :arg search_type: Search operation type Valid choices: + query_then_fetch, query_and_fetch, dfs_query_then_fetch, + dfs_query_and_fetch + :arg typed_keys: Specify whether aggregation and suggester names + should be prefixed by their respective types in the response + """ + if body in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument 'body'.") + + body = _bulk_body(self.transport.serializer, body) + return self.transport.perform_request( + "POST", + _make_path(index, doc_type, "_msearch", "template"), + params=params, + headers=headers, + body=body, + ) + + @query_params( + "field_statistics", + "fields", + "ids", + "offsets", + "payloads", + "positions", + "preference", + "realtime", + "routing", + "term_statistics", + "version", + "version_type", + ) + def mtermvectors( + self, body=None, index=None, doc_type=None, params=None, headers=None + ): + """ + Returns multiple termvectors in one request. + ``_ + + :arg body: Define ids, documents, parameters or a list of + parameters per document here. You must at least provide a list of + document ids. See documentation. + :arg index: The index in which the document resides. + :arg doc_type: The type of the document. + :arg field_statistics: Specifies if document count, sum of + document frequencies and sum of total term frequencies should be + returned. Applies to all returned documents unless otherwise specified + in body "params" or "docs". Default: True + :arg fields: A comma-separated list of fields to return. Applies + to all returned documents unless otherwise specified in body "params" or + "docs". + :arg ids: A comma-separated list of documents ids. You must + define ids as parameter or set "ids" or "docs" in the request body + :arg offsets: Specifies if term offsets should be returned. + Applies to all returned documents unless otherwise specified in body + "params" or "docs". Default: True + :arg payloads: Specifies if term payloads should be returned. + Applies to all returned documents unless otherwise specified in body + "params" or "docs". Default: True + :arg positions: Specifies if term positions should be returned. + Applies to all returned documents unless otherwise specified in body + "params" or "docs". Default: True + :arg preference: Specify the node or shard the operation should + be performed on (default: random) .Applies to all returned documents + unless otherwise specified in body "params" or "docs". + :arg realtime: Specifies if requests are real-time as opposed to + near-real-time (default: true). + :arg routing: Specific routing value. Applies to all returned + documents unless otherwise specified in body "params" or "docs". + :arg term_statistics: Specifies if total term frequency and + document frequency should be returned. Applies to all returned documents + unless otherwise specified in body "params" or "docs". + :arg version: Explicit version number for concurrency control + :arg version_type: Specific version type Valid choices: + internal, external, external_gte, force + """ + return self.transport.perform_request( + "POST", + _make_path(index, doc_type, "_mtermvectors"), + params=params, + headers=headers, + body=body, + ) + + @query_params("master_timeout", "timeout") + def put_script(self, id, body, context=None, params=None, headers=None): + """ + Creates or updates a script. + ``_ + + :arg id: Script ID + :arg body: The document + :arg context: Context name to compile script against + :arg master_timeout: Specify timeout for connection to master + :arg timeout: Explicit operation timeout + """ + for param in (id, body): + if param in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument.") + + return self.transport.perform_request( + "PUT", + _make_path("_scripts", id, context), + params=params, + headers=headers, + body=body, + ) + + @query_params( + "allow_no_indices", "expand_wildcards", "ignore_unavailable", "search_type" + ) + def rank_eval(self, body, index=None, params=None, headers=None): + """ + Allows to evaluate the quality of ranked search results over a set of typical + search queries + ``_ + + :arg body: The ranking evaluation search definition, including + search requests, document ratings and ranking metric definition. + :arg index: A comma-separated list of index names to search; use + `_all` or empty string to perform the operation on all indices + :arg allow_no_indices: Whether to ignore if a wildcard indices + expression resolves into no concrete indices. (This includes `_all` + string or when no indices have been specified) + :arg expand_wildcards: Whether to expand wildcard expression to + concrete indices that are open, closed or both. Valid choices: open, + closed, hidden, none, all Default: open + :arg ignore_unavailable: Whether specified concrete indices + should be ignored when unavailable (missing or closed) + :arg search_type: Search operation type Valid choices: + query_then_fetch, dfs_query_then_fetch + """ + if body in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument 'body'.") + + return self.transport.perform_request( + "POST", + _make_path(index, "_rank_eval"), + params=params, + headers=headers, + body=body, + ) + + @query_params( + "max_docs", + "refresh", + "requests_per_second", + "scroll", + "slices", + "timeout", + "wait_for_active_shards", + "wait_for_completion", + ) + def reindex(self, body, params=None, headers=None): + """ + Allows to copy documents from one index to another, optionally filtering the + source documents by a query, changing the destination index settings, or + fetching the documents from a remote cluster. + ``_ + + :arg body: The search definition using the Query DSL and the + prototype for the index request. + :arg max_docs: Maximum number of documents to process (default: + all documents) + :arg refresh: Should the affected indexes be refreshed? + :arg requests_per_second: The throttle to set on this request in + sub-requests per second. -1 means no throttle. + :arg scroll: Control how long to keep the search context alive + Default: 5m + :arg slices: The number of slices this task should be divided + into. Defaults to 1, meaning the task isn't sliced into subtasks. Can be + set to `auto`. Default: 1 + :arg timeout: Time each individual bulk request should wait for + shards that are unavailable. Default: 1m + :arg wait_for_active_shards: Sets the number of shard copies + that must be active before proceeding with the reindex operation. + Defaults to 1, meaning the primary shard only. Set to `all` for all + shard copies, otherwise set to any non-negative value less than or equal + to the total number of copies for the shard (number of replicas + 1) + :arg wait_for_completion: Should the request should block until + the reindex is complete. Default: True + """ + if body in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument 'body'.") + + return self.transport.perform_request( + "POST", "/_reindex", params=params, headers=headers, body=body + ) + + @query_params("requests_per_second") + def reindex_rethrottle(self, task_id, params=None, headers=None): + """ + Changes the number of requests per second for a particular Reindex operation. + ``_ + + :arg task_id: The task id to rethrottle + :arg requests_per_second: The throttle to set on this request in + floating sub-requests per second. -1 means set no throttle. + """ + if task_id in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument 'task_id'.") + + return self.transport.perform_request( + "POST", + _make_path("_reindex", task_id, "_rethrottle"), + params=params, + headers=headers, + ) + + @query_params() + def render_search_template(self, body=None, id=None, params=None, headers=None): + """ + Allows to use the Mustache language to pre-render a search definition. + ``_ + + :arg body: The search definition template and its params + :arg id: The id of the stored search template + """ + return self.transport.perform_request( + "POST", + _make_path("_render", "template", id), + params=params, + headers=headers, + body=body, + ) + + @query_params() + def scripts_painless_execute(self, body=None, params=None, headers=None): + """ + Allows an arbitrary script to be executed and a result to be returned + ``_ + + :arg body: The script to execute + """ + return self.transport.perform_request( + "POST", + "/_scripts/painless/_execute", + params=params, + headers=headers, + body=body, + ) + + @query_params("rest_total_hits_as_int", "scroll") + def scroll(self, body=None, scroll_id=None, params=None, headers=None): + """ + Allows to retrieve a large numbers of results from a single search request. + ``_ + + :arg body: The scroll ID if not passed by URL or query + parameter. + :arg scroll_id: The scroll ID for scrolled search + :arg rest_total_hits_as_int: Indicates whether hits.total should + be rendered as an integer or an object in the rest search response + :arg scroll: Specify how long a consistent view of the index + should be maintained for scrolled search + """ + if scroll_id in SKIP_IN_PATH and body in SKIP_IN_PATH: + raise ValueError("You need to supply scroll_id or body.") + elif scroll_id and not body: + body = {"scroll_id": scroll_id} + elif scroll_id: + params["scroll_id"] = scroll_id + + return self.transport.perform_request( + "POST", "/_search/scroll", params=params, headers=headers, body=body + ) + + @query_params( + "_source", + "_source_excludes", + "_source_includes", + "allow_no_indices", + "allow_partial_search_results", + "analyze_wildcard", + "analyzer", + "batched_reduce_size", + "ccs_minimize_roundtrips", + "default_operator", + "df", + "docvalue_fields", + "expand_wildcards", + "explain", + "from_", + "ignore_throttled", + "ignore_unavailable", + "lenient", + "max_concurrent_shard_requests", + "pre_filter_shard_size", + "preference", + "q", + "request_cache", + "rest_total_hits_as_int", + "routing", + "scroll", + "search_type", + "seq_no_primary_term", + "size", + "sort", + "stats", + "stored_fields", + "suggest_field", + "suggest_mode", + "suggest_size", + "suggest_text", + "terminate_after", + "timeout", + "track_scores", + "track_total_hits", + "typed_keys", + "version", + ) + def search(self, body=None, index=None, doc_type=None, params=None, headers=None): + """ + Returns results matching a query. + ``_ + + :arg body: The search definition using the Query DSL + :arg index: A comma-separated list of index names to search; use + `_all` or empty string to perform the operation on all indices + :arg doc_type: A comma-separated list of document types to + search; leave empty to perform the operation on all types + :arg _source: True or false to return the _source field or not, + or a list of fields to return + :arg _source_excludes: A list of fields to exclude from the + returned _source field + :arg _source_includes: A list of fields to extract and return + from the _source field + :arg allow_no_indices: Whether to ignore if a wildcard indices + expression resolves into no concrete indices. (This includes `_all` + string or when no indices have been specified) + :arg allow_partial_search_results: Indicate if an error should + be returned if there is a partial search failure or timeout Default: + True + :arg analyze_wildcard: Specify whether wildcard and prefix + queries should be analyzed (default: false) + :arg analyzer: The analyzer to use for the query string + :arg batched_reduce_size: The number of shard results that + should be reduced at once on the coordinating node. This value should be + used as a protection mechanism to reduce the memory overhead per search + request if the potential number of shards in the request can be large. + Default: 512 + :arg ccs_minimize_roundtrips: Indicates whether network round- + trips should be minimized as part of cross-cluster search requests + execution Default: true + :arg default_operator: The default operator for query string + query (AND or OR) Valid choices: AND, OR Default: OR + :arg df: The field to use as default where no field prefix is + given in the query string + :arg docvalue_fields: A comma-separated list of fields to return + as the docvalue representation of a field for each hit + :arg expand_wildcards: Whether to expand wildcard expression to + concrete indices that are open, closed or both. Valid choices: open, + closed, hidden, none, all Default: open + :arg explain: Specify whether to return detailed information + about score computation as part of a hit + :arg from_: Starting offset (default: 0) + :arg ignore_throttled: Whether specified concrete, expanded or + aliased indices should be ignored when throttled + :arg ignore_unavailable: Whether specified concrete indices + should be ignored when unavailable (missing or closed) + :arg lenient: Specify whether format-based query failures (such + as providing text to a numeric field) should be ignored + :arg max_concurrent_shard_requests: The number of concurrent + shard requests per node this search executes concurrently. This value + should be used to limit the impact of the search on the cluster in order + to limit the number of concurrent shard requests Default: 5 + :arg pre_filter_shard_size: A threshold that enforces a pre- + filter roundtrip to prefilter search shards based on query rewriting if + the number of shards the search request expands to exceeds the + threshold. This filter roundtrip can limit the number of shards + significantly if for instance a shard can not match any documents based + on its rewrite method ie. if date filters are mandatory to match but the + shard bounds and the query are disjoint. + :arg preference: Specify the node or shard the operation should + be performed on (default: random) + :arg q: Query in the Lucene query string syntax + :arg request_cache: Specify if request cache should be used for + this request or not, defaults to index level setting + :arg rest_total_hits_as_int: Indicates whether hits.total should + be rendered as an integer or an object in the rest search response + :arg routing: A comma-separated list of specific routing values + :arg scroll: Specify how long a consistent view of the index + should be maintained for scrolled search + :arg search_type: Search operation type Valid choices: + query_then_fetch, dfs_query_then_fetch + :arg seq_no_primary_term: Specify whether to return sequence + number and primary term of the last modification of each hit + :arg size: Number of hits to return (default: 10) + :arg sort: A comma-separated list of : pairs + :arg stats: Specific 'tag' of the request for logging and + statistical purposes + :arg stored_fields: A comma-separated list of stored fields to + return as part of a hit + :arg suggest_field: Specify which field to use for suggestions + :arg suggest_mode: Specify suggest mode Valid choices: missing, + popular, always Default: missing + :arg suggest_size: How many suggestions to return in response + :arg suggest_text: The source text for which the suggestions + should be returned + :arg terminate_after: The maximum number of documents to collect + for each shard, upon reaching which the query execution will terminate + early. + :arg timeout: Explicit operation timeout + :arg track_scores: Whether to calculate and return scores even + if they are not used for sorting + :arg track_total_hits: Indicate if the number of documents that + match the query should be tracked + :arg typed_keys: Specify whether aggregation and suggester names + should be prefixed by their respective types in the response + :arg version: Specify whether to return document version as part + of a hit + """ + # from is a reserved word so it cannot be used, use from_ instead + if "from_" in params: + params["from"] = params.pop("from_") + + return self.transport.perform_request( + "POST", + _make_path(index, doc_type, "_search"), + params=params, + headers=headers, + body=body, + ) + + @query_params( + "allow_no_indices", + "expand_wildcards", + "ignore_unavailable", + "local", + "preference", + "routing", + ) + def search_shards(self, index=None, params=None, headers=None): + """ + Returns information about the indices and shards that a search request would be + executed against. + ``_ + + :arg index: A comma-separated list of index names to search; use + `_all` or empty string to perform the operation on all indices + :arg allow_no_indices: Whether to ignore if a wildcard indices + expression resolves into no concrete indices. (This includes `_all` + string or when no indices have been specified) + :arg expand_wildcards: Whether to expand wildcard expression to + concrete indices that are open, closed or both. Valid choices: open, + closed, hidden, none, all Default: open + :arg ignore_unavailable: Whether specified concrete indices + should be ignored when unavailable (missing or closed) + :arg local: Return local information, do not retrieve the state + from master node (default: false) + :arg preference: Specify the node or shard the operation should + be performed on (default: random) + :arg routing: Specific routing value + """ + return self.transport.perform_request( + "GET", _make_path(index, "_search_shards"), params=params, headers=headers + ) + + @query_params( + "allow_no_indices", + "ccs_minimize_roundtrips", + "expand_wildcards", + "explain", + "ignore_throttled", + "ignore_unavailable", + "preference", + "profile", + "rest_total_hits_as_int", + "routing", + "scroll", + "search_type", + "typed_keys", + ) + def search_template( + self, body, index=None, doc_type=None, params=None, headers=None + ): + """ + Allows to use the Mustache language to pre-render a search definition. + ``_ + + :arg body: The search definition template and its params + :arg index: A comma-separated list of index names to search; use + `_all` or empty string to perform the operation on all indices + :arg doc_type: A comma-separated list of document types to + search; leave empty to perform the operation on all types + :arg allow_no_indices: Whether to ignore if a wildcard indices + expression resolves into no concrete indices. (This includes `_all` + string or when no indices have been specified) + :arg ccs_minimize_roundtrips: Indicates whether network round- + trips should be minimized as part of cross-cluster search requests + execution Default: true + :arg expand_wildcards: Whether to expand wildcard expression to + concrete indices that are open, closed or both. Valid choices: open, + closed, hidden, none, all Default: open + :arg explain: Specify whether to return detailed information + about score computation as part of a hit + :arg ignore_throttled: Whether specified concrete, expanded or + aliased indices should be ignored when throttled + :arg ignore_unavailable: Whether specified concrete indices + should be ignored when unavailable (missing or closed) + :arg preference: Specify the node or shard the operation should + be performed on (default: random) + :arg profile: Specify whether to profile the query execution + :arg rest_total_hits_as_int: Indicates whether hits.total should + be rendered as an integer or an object in the rest search response + :arg routing: A comma-separated list of specific routing values + :arg scroll: Specify how long a consistent view of the index + should be maintained for scrolled search + :arg search_type: Search operation type Valid choices: + query_then_fetch, query_and_fetch, dfs_query_then_fetch, + dfs_query_and_fetch + :arg typed_keys: Specify whether aggregation and suggester names + should be prefixed by their respective types in the response + """ + if body in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument 'body'.") + + return self.transport.perform_request( + "POST", + _make_path(index, doc_type, "_search", "template"), + params=params, + headers=headers, + body=body, + ) + + @query_params( + "field_statistics", + "fields", + "offsets", + "payloads", + "positions", + "preference", + "realtime", + "routing", + "term_statistics", + "version", + "version_type", + ) + def termvectors( + self, index, body=None, doc_type=None, id=None, params=None, headers=None + ): + """ + Returns information and statistics about terms in the fields of a particular + document. + ``_ + + :arg index: The index in which the document resides. + :arg body: Define parameters and or supply a document to get + termvectors for. See documentation. + :arg doc_type: The type of the document. + :arg id: The id of the document, when not specified a doc param + should be supplied. + :arg field_statistics: Specifies if document count, sum of + document frequencies and sum of total term frequencies should be + returned. Default: True + :arg fields: A comma-separated list of fields to return. + :arg offsets: Specifies if term offsets should be returned. + Default: True + :arg payloads: Specifies if term payloads should be returned. + Default: True + :arg positions: Specifies if term positions should be returned. + Default: True + :arg preference: Specify the node or shard the operation should + be performed on (default: random). + :arg realtime: Specifies if request is real-time as opposed to + near-real-time (default: true). + :arg routing: Specific routing value. + :arg term_statistics: Specifies if total term frequency and + document frequency should be returned. + :arg version: Explicit version number for concurrency control + :arg version_type: Specific version type Valid choices: + internal, external, external_gte, force + """ + if index in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument 'index'.") + + if doc_type in SKIP_IN_PATH: + doc_type = "_doc" + + return self.transport.perform_request( + "POST", + _make_path(index, doc_type, id, "_termvectors"), + params=params, + headers=headers, + body=body, + ) + + @query_params( + "_source", + "_source_excludes", + "_source_includes", + "if_primary_term", + "if_seq_no", + "lang", + "refresh", + "retry_on_conflict", + "routing", + "timeout", + "wait_for_active_shards", + ) + def update(self, index, id, body, doc_type=None, params=None, headers=None): + """ + Updates a document with a script or partial document. + ``_ + + :arg index: The name of the index + :arg id: Document ID + :arg body: The request definition requires either `script` or + partial `doc` + :arg doc_type: The type of the document + :arg _source: True or false to return the _source field or not, + or a list of fields to return + :arg _source_excludes: A list of fields to exclude from the + returned _source field + :arg _source_includes: A list of fields to extract and return + from the _source field + :arg if_primary_term: only perform the update operation if the + last operation that has changed the document has the specified primary + term + :arg if_seq_no: only perform the update operation if the last + operation that has changed the document has the specified sequence + number + :arg lang: The script language (default: painless) + :arg refresh: If `true` then refresh the affected shards to make + this operation visible to search, if `wait_for` then wait for a refresh + to make this operation visible to search, if `false` (the default) then + do nothing with refreshes. Valid choices: true, false, wait_for + :arg retry_on_conflict: Specify how many times should the + operation be retried when a conflict occurs (default: 0) + :arg routing: Specific routing value + :arg timeout: Explicit operation timeout + :arg wait_for_active_shards: Sets the number of shard copies + that must be active before proceeding with the update operation. + Defaults to 1, meaning the primary shard only. Set to `all` for all + shard copies, otherwise set to any non-negative value less than or equal + to the total number of copies for the shard (number of replicas + 1) + """ + for param in (index, id, body): + if param in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument.") + + if doc_type in SKIP_IN_PATH: + doc_type = "_doc" + + return self.transport.perform_request( + "POST", + _make_path(index, doc_type, id, "_update"), + params=params, + headers=headers, + body=body, + ) + + @query_params( + "_source", + "_source_excludes", + "_source_includes", + "allow_no_indices", + "analyze_wildcard", + "analyzer", + "conflicts", + "default_operator", + "df", + "expand_wildcards", + "from_", + "ignore_unavailable", + "lenient", + "max_docs", + "pipeline", + "preference", + "q", + "refresh", + "request_cache", + "requests_per_second", + "routing", + "scroll", + "scroll_size", + "search_timeout", + "search_type", + "size", + "slices", + "sort", + "stats", + "terminate_after", + "timeout", + "version", + "version_type", + "wait_for_active_shards", + "wait_for_completion", + ) + def update_by_query( + self, index, body=None, doc_type=None, params=None, headers=None + ): + """ + Performs an update on every document in the index without changing the source, + for example to pick up a mapping change. + ``_ + + :arg index: A comma-separated list of index names to search; use + `_all` or empty string to perform the operation on all indices + :arg body: The search definition using the Query DSL + :arg doc_type: A comma-separated list of document types to + search; leave empty to perform the operation on all types + :arg _source: True or false to return the _source field or not, + or a list of fields to return + :arg _source_excludes: A list of fields to exclude from the + returned _source field + :arg _source_includes: A list of fields to extract and return + from the _source field + :arg allow_no_indices: Whether to ignore if a wildcard indices + expression resolves into no concrete indices. (This includes `_all` + string or when no indices have been specified) + :arg analyze_wildcard: Specify whether wildcard and prefix + queries should be analyzed (default: false) + :arg analyzer: The analyzer to use for the query string + :arg conflicts: What to do when the update by query hits version + conflicts? Valid choices: abort, proceed Default: abort + :arg default_operator: The default operator for query string + query (AND or OR) Valid choices: AND, OR Default: OR + :arg df: The field to use as default where no field prefix is + given in the query string + :arg expand_wildcards: Whether to expand wildcard expression to + concrete indices that are open, closed or both. Valid choices: open, + closed, hidden, none, all Default: open + :arg from_: Starting offset (default: 0) + :arg ignore_unavailable: Whether specified concrete indices + should be ignored when unavailable (missing or closed) + :arg lenient: Specify whether format-based query failures (such + as providing text to a numeric field) should be ignored + :arg max_docs: Maximum number of documents to process (default: + all documents) + :arg pipeline: Ingest pipeline to set on index requests made by + this action. (default: none) + :arg preference: Specify the node or shard the operation should + be performed on (default: random) + :arg q: Query in the Lucene query string syntax + :arg refresh: Should the affected indexes be refreshed? + :arg request_cache: Specify if request cache should be used for + this request or not, defaults to index level setting + :arg requests_per_second: The throttle to set on this request in + sub-requests per second. -1 means no throttle. + :arg routing: A comma-separated list of specific routing values + :arg scroll: Specify how long a consistent view of the index + should be maintained for scrolled search + :arg scroll_size: Size on the scroll request powering the update + by query Default: 100 + :arg search_timeout: Explicit timeout for each search request. + Defaults to no timeout. + :arg search_type: Search operation type Valid choices: + query_then_fetch, dfs_query_then_fetch + :arg size: Deprecated, please use `max_docs` instead + :arg slices: The number of slices this task should be divided + into. Defaults to 1, meaning the task isn't sliced into subtasks. Can be + set to `auto`. Default: 1 + :arg sort: A comma-separated list of : pairs + :arg stats: Specific 'tag' of the request for logging and + statistical purposes + :arg terminate_after: The maximum number of documents to collect + for each shard, upon reaching which the query execution will terminate + early. + :arg timeout: Time each individual bulk request should wait for + shards that are unavailable. Default: 1m + :arg version: Specify whether to return document version as part + of a hit + :arg version_type: Should the document increment the version + number (internal) on hit or not (reindex) + :arg wait_for_active_shards: Sets the number of shard copies + that must be active before proceeding with the update by query + operation. Defaults to 1, meaning the primary shard only. Set to `all` + for all shard copies, otherwise set to any non-negative value less than + or equal to the total number of copies for the shard (number of replicas + + 1) + :arg wait_for_completion: Should the request should block until + the update by query operation is complete. Default: True + """ + # from is a reserved word so it cannot be used, use from_ instead + if "from_" in params: + params["from"] = params.pop("from_") + + if index in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument 'index'.") + + return self.transport.perform_request( + "POST", + _make_path(index, doc_type, "_update_by_query"), + params=params, + headers=headers, + body=body, + ) + + @query_params("requests_per_second") + def update_by_query_rethrottle(self, task_id, params=None, headers=None): + """ + Changes the number of requests per second for a particular Update By Query + operation. + ``_ + + :arg task_id: The task id to rethrottle + :arg requests_per_second: The throttle to set on this request in + floating sub-requests per second. -1 means set no throttle. + """ + if task_id in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument 'task_id'.") + + return self.transport.perform_request( + "POST", + _make_path("_update_by_query", task_id, "_rethrottle"), + params=params, + headers=headers, + ) + + @query_params() + def get_script_context(self, params=None, headers=None): + """ + Returns all script contexts. + ``_ + """ + return self.transport.perform_request( + "GET", "/_script_context", params=params, headers=headers + ) + + @query_params() + def get_script_languages(self, params=None, headers=None): + """ + Returns available script types, languages and contexts + ``_ + """ + return self.transport.perform_request( + "GET", "/_script_language", params=params, headers=headers + ) diff --git a/Python3.6-ClsToElasticSearch/src/elasticsearch/client/__pycache__/__init__.cpython-37.pyc b/Python3.6-ClsToElasticSearch/src/elasticsearch/client/__pycache__/__init__.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3371df774cf3c96edfe37d2a19c70e771712f6d1 GIT binary patch literal 77875 zcmeHwX>c6pncfU;fZ!pCx~Qu~N&+C^pf1aTPKu-sTcTx=vc2QA2ZQ+sr5-901$ zf!_5}8+#A$eRz-T+IwX0BiSUo$tIiK!>-m8zujJnwgP z_Y4k#q)17Ct!7X6*WdYm@BO~tD|`2j6!5qB{)x&jf3aBjZ~S5Z`f&3y{)^X(g@P)m zV!hBPmWoB*_tbkzJ*rpr)q5L#r9M3CRsHq;#z1L6?g#3FjiJ&|W4JWj7%7e5*`OM# z?`e#dM&*9EzPGWjv`_9w>iZi9N(bbAPyLq0t)*M#ezbmD<6!Ba-0!X5-ngT5hurV0 z-`TjUbeG)kuixD`R5~R02kQ4U?k(Lb_qWvVYusPDzwtonfksdY8V{Bpl;^kB4>yjK zj>!FO^@kb{mmZe;gY`!mW2G^Q zX5{|v`q9SYrN`y|Q2mLMw)Cvr-(R0? zJXd;7?jNW>-*}<)g4_r77aK2?UXuF<>n}Hsm5#~%;rc6$n%qBJf4%WW>5XC`?D@`T)Fa<5mOiV-)c6O5(wnap)P#EUgMxZAeCu*=>6Gd@ zU6^?6dtC8};`)JhvsP`Xuw1VtVN|Kdr@q&t-wdCLD$RJg6(uOP&;2+VMXhL}cYQQY zqFQsYoUAN|@%m6ZsxMcfI9wlWH{WZwlJI)~AU3e~ji#zq!}xf;7B*Sc=o`(&Fiza> zy~peA7;U=;!^bQB?VeNUTRj@ z>gli&RhL}Tw;XFHt+-mLV;0=Q;p0_5kOL5>C*P~P zAA4SlD$Bl^;Wz3H_ipc-n964CtK0upZ86GLHuRRS>%d#BW-V!Ppdx@#*w?rBinv~(wsow4vpjl`|W&rDZ!+Ir7 zYSma~vPuZyU%&b9IR1_Q7MC?3o^wOdzFJ|ic)F0_cT&9E6Wzat-X zQTWPVTY`o0SU`rf$Q_vrgC;tjUpzSZ+10sdYO>&syjEVP?d0!EOu zf`q$|a~CYN;v^0#(PF!S9fWuC$!ryK8PCprEiOySnuOjFZC}a$#Pu#ORUYA(qKCd%W+&^?SD9)MZsadELYme z(*G&Wlt!@U%K+wu+Qo@I>m#+e){K)%vl>PNSi@)#m(mccZejyOQ~Ws)19}=^X$TcZ zd|Vn@#waedA{7mx06M|FI8a$$4x4JCf4yG@zuwzwC($Hp7!fR^%4mDNxb%C4=u>$5 zf8LsT7Zt`cE10uZb8)^>n>o>{%6!E$Z%3`OAx1uPydIxvojg60nX#FwAf}mexrVJ$ zE>AD7tRE;hG4a^ht6^C>z|O}{;8GYEDh~9H6bE|xdPa&P#i8O*&rorsX8`|=6!+oR zXt4)RZ|yyRC!;<4iqWU>Rh6IGzsJl?{J%hL!$PU33ZSdSQm^U>`xb=4eo!p+f1|Ka zQ~=KphDrnB;N^n)71GlWikFL}A^H8QYFLfP@8NJz?NOs@uiB^fs{`tm%Y*9gt6SA= z@^%Do531YM9l0_i>etkr>Mkj>2W9S7htxf;%q^FP)UT_1)qV1QR9gIwx?epYzxSei zpdM6*)e-fOdRRT8Ky658)MMzyQ|cM@Y4xm{RnMvC)eCA;MtqBU@$!S}H`Pn(WvTH1YJB=~QT>)W zre2XJx8TWfweNC|`fYVWos?&{x^^E`ugdJb#@V|(qW*z;UA-Y?ZtEz+62t0u)MwOZ zrNlv3!p_FHnT6LbkE-8QZ>qPX-0dhAV5Ux~G4;92{p$DB+v@Z3<_=dwz8}A@-chHe z%$+E693%Pzbw<4_PwwjYO84v!)fdzkrNrGGC0@gt{*n5U`aXGo2=7bk%j)~pSJYQA zr)5CRAFDZ4k+=7lxB5-L`V%#;s`BPuyipiOs20?sTEb{w#mdxV?nc$0sd(}&dQaX5c+VC6 z$0|~>Jb6$c@}H3odrkd-`a$(|^+N)Am(>TVtoC1iRQ-keVfFW< zjfc^O9^p4o$KJ~W>Yu4^svnVekI1`{w8b6ssQOXYLizGu_0QFhsUMe;V<^cv?wsL0 z>Mzx|)K5sMag@^I;r{+f%->I;znHztd(^*BKdpX7N=>*@-S^iX8SCad3^)ZVazy=0 z^|R{dq}E4KD_7*N)X%G5kSCAH3j8bei|X6*ds2S?wfZIX%kq2b8->0?X&PkjZ`Su_ z=v3wSt%ZWnrOe}?-nuYV56_47KqKkt0J!{o4GbZu8u-sjZE-09jl58aRL}s?0G|^q zM6HI@_4Z1#q-p8N)4@9@PoD|Grdn>*nk0f!;>=PQSfUjyRZwdJrgpxapna?5zDkRu=bC-P~N0%&LAB8^&~QE~qpWYDvhK;#aa%NrGiPj-XNnYaEkI z+kV8L)Kc391m4Vqw^?$zpe6FR$r!d8xxfh2j+q( zjF-U*)BsW|)4`dgS{&g2$xnS4>GCV~r<6)x0bn532s zAQp!JPjE4WN_>XeYLV2}2=P`~VEevSJE&Hg0eeteSP52I?I>tnXa?0*v&l{YR;8Zc z!V*~O08&l6u7de60JDUDR}9bq#-(<&L)dg5m*rysmmj<(f+6ot9Kl4BlNv#n=FnHlpN8M`!ZYKax zVMJ+3*mv1?0$0ktaJwX){aWDrlI(m<-JbTXxUNZqLD$-Y413q-v1y6Tp%T zsM*4{G8O7=cKY<21P5Z4;HjscblpEFjO+*zz#qu zAZc2x@}y&whssmHf#QuoSW$LR|OkHC$EI*U9W{w}zk31sly6|AJfR37DiS0M1 zTKYAKjR+JVmX>~mV}bc1`D z2R03`f6bZ;yl{H~D`plRMaulN%$0gU>UZzWdQ102vqI{+1XOG)lGU_Km3;NdTg97t z=VmFvk>Kr8c+8-m@|)@9urapnR?dYh-D=0< zj>_>&>UQM~EFPR=Z%o2SXUp%iKEU`s;bddhj@-xxKl(9EK9(JXkMC`?{paXHb;s&XoTycnao#KKqeO|?Ro;y_%o+Jcj`1K zkElvDF@0VX$mq)=$aH&ty4q@3g{%EntN*8#S7zqxt@)V-Yyx4FeK~{in_8ieJ@V!g zPrW!-*0UdLc?iTQY|?TCh7oc?J6RO=HVR?kfxcYxL$8BsU__H{(QA$6Iuy`g488aT z>r<-%4#YN?UO_4of{ea++7}E^8#%b#EVvod(%k7VIVl~D#33+@iZQi9FS0PmJFrVT8mM(kzQF5AaX{tKyjoiMO@s@iLG>$GF@{QtVfzhV8*83C1wL-ZF^HAY3?|PFybJw2?n7ct3dGy=iWW?OHyNTpcbN5&>Y3`&J|95r{3Cr_BWCzGel-P6gZ%-u7|r_J57$*j41 zPSIG^Q#pt!br3WQiATzZj(;Bksu z*_5GFlNQIu-#<)|1t6RgzR!W}PyvPrv{`nDdH8H(A1u~vo{g_BW{a=^0E(mM`9 zD|%)(nl-P0fow9b=L+)-f|jpQPv0yx5Dg3FHf@vdL?P_$n@h{W3=I=~-8R-+u=)sSw@GC%x5b6dmH-h@y|e0(}&sd!(@P4hqq|QaF6C9^J>ge)t+J zFGcsu(^|a|J;1v`a6)uShywl^Zdi|m=t2JOY1E^`ycuaYUxerg9}GaSha*Dt5PuAT z*%2><=wbfsjnU>K`Yx_VW4s%@pq&q*asCj5PcDx2Nawlez@mu_+c#;?}Q74f>Q%Z z_MPp=yS{J1tEaD!44fN@zECXie~G4HJ>MDrZsBy{OYq%e&7!2t-g<96Y_1Pzq_#d>)>Bw6bGB&3$KCMB zi`7sxC+q#tnL~~KEWT!e2=G)cPxM4zVs&4^Wuh3(@t&sph_fCkT!t|tF}FaW*waHh z`mOlizMjqr)D=2X&xzv%9>;(2G%hP3znnl-ghxnm4F>gZ^~vP)U~|D=0tWp4HjiUv|{o4;8IvoaC3O|)I_f= z&iZ~#G*<IK|Ki@eB0yV-`51rQPZ>I$(q0uJ;P2Qbn5imSI~VU@)) z(@eh(ndXDI={Cs|eXGCFF`4s~xK;())ZjxihgLMCCig>_(cafio-q>|xtRe}3Xm^h zJX=p2hj4_`M((mOFwik!EC7#|+Hwe|u&^{}bmq8$e1gebA2cfK^+8A_F{Yw4YHq}j zu5`zR3Vcv7QDtpYfR+WO&A3Wm8vYT^c`u9bHf}B+CWNo`F7(11>foipwZWutZXg;> z0R5MXKU92g6rM(X^6aSUfd^FKY|q(VMV7a?)`z{(ce!t^e{En5uAJYJ-B9^+z>r(R z<6<TxmW0{|U)rx+rD7YY1Tip2@Wu(q zZ)ilV1mQ(UDKN-s4ocVUzS;RsGvXzB5l=~_Ww+g2?ZV;6OK7gK=ZVy;23Q{FN~+nw z`WImY*_ZSg)U|XBT$;gCS!`W~?vu8q?&U2w>GJkhP!lc@A{PXEu_4zWWHta~Zoacd z6Q?zI9G!vAS#pze3H0@}O=HEZrHA{o0y|2T9H|CGvP@K7xe>E6$coSp0~3kh7o3N5 zIuFJlMKu#LNs)EVLbhpyKdE&Ae3-!2kxk78R$^kLo??DwAb2TY5G)E%sbsIP*la<=Mh! z+s_6kFD`>y!qEMEnRnR_3|2^eV4eicc4HotiBktMt;tArh4(a+bgjjJo9n2pN?<5vu~fTaIcC+WB-pXy}LC7IuBW z>>SidVj2>?Qo5sA0aMg#7ivIr(7y0PiZRVrbE+9GimK6wXLa%KSc1Y4D+VeJ0~QLi z6Gm%49?NFS*J;{v72TTkJyRqIc zi?QBQQ_*|;V#p%8RF(b0$>jNw0o^#f>WWQR}W^n z6a7(Xe{BIysIr`8z-Xg1j0X_%;KErNY%L4ycc-6Zk}d|Mp9?)1>N(=5=Lo1L+%n;d zJgs^!k=+LMH1zak)z|D%{X$9c>~gQr(H=D*bab%tGa%G%H(5Fg7-%m;d27L-nc@!LQb;Q1X-DIr-hL`W1^ zpm9{3!%QnrY*-*=sS`<&vG)tD35;Pomp+li!$V(@hiJ%G)3I#2?7@3A-6A_sW z>LTe4to?PYgI25QiN4ffd2L^fqsb!Wb*(PjVdIhBQMCaj&{hb=6k>#w5MvnQG8rdR zC|Ib4@L5J7NZL7hDYBxuvldEW0S;Xgh&z}82!1U{s8LNRC?lXP3@h}8ZqH9;`fFgq zJ(Fq3gT0>)>#OVAG+V>R3lIMbP9@zXYUr@*jhu zp3(dVbtl>`G~)eaBHA{zqGmwbg@hLou0$9T&x>T)5a@rfcE3hGghpoQc@W*Y52m6X z%Y)?1MJ!&W0g8Y8*KrlJRL}tjmWe}#yEtUIi)OIa6HTub<7vikFBTD}U2Klx*{tvv zJ&2(;HVejagV#V%tg%#l9es27-)dH&DX7*FU#?Y-S_3VtKUA1T;fzQUSe~yTH(?yw z4Emqqx(Q!#1cp*{rK}q)SC(ru^0_Vx$6T16S(CrO%{V6zJ18Ak#=^O};rsw}Z5|rsm zQ5ZpRHks;TA3z#ryhB-LI{i`U0QAA|bZ)i~A1;Y3@B}LH6@-SviAPmopDPWSCsFBu zF+Y?wb!kP2>|Q7xfTjc_-F*OdytarC^RS#OMXe-3>_O>P^Q7IZoJTZ51tyfzAk6N} znt_BWHN@sr@dX0lVYgsxOEp|Cz6ZYsD5Yv}ZVk&4lhd88r^g1ikr>6n&XaNPag2Mf zY^T1;H(_0L%leM7gnN~4IOKbfX~y)%J(HR} zufFk)ldLLNUUeQ;XM-<@=z2_(h9^QLtf*@xTnhbS{Me}zoW9S!Gtr?I#j8oY2(0`#iyygu zEa;ZCY-YL-YGHeVMG^0IBfRM;gpaYSg&ol@hQ>DEwYxIrJfh10pnmCLF9l~N&W61E&Mb^jzh3gCRwo`w#F7f-nUy9}H1L2ID&sm&TyHc2&pfu^apY+-W5S z+e*lj0U+UBf?O@Y;Ti-dIy>x!BU9q2A$%7!-q9u^Pok`giM7qoY3iB9OU8dz`fxB7(J{%M;aoq>l(a>jPzJu9Gj+?r%F6ZTh9& zza>LRXWYUwDXAu>S(TEGtD;Clh3Rqp7k>tqi~)IIZ9s&aoB>&$LA5)Toxzr{o3X<|%JV-g zwzUJ)*5=^6_V}i>2XD~H8)6VPu1~wJU!APOuIm>`7T5@&=1p9`;2qb3rO9C{p&U0r zznI@uUKZsjP8fGEG8sFxZ@Lk(zKlCquyYr%Or_`>>7%%VeYh@QV&04{fsDZs+UWr| zvuO@$)r)8k#gds*A)ZHFIdkUvfL^09L^^2y{;{-0Gw;@=3zvo=0!WB$yxvc;S4Lvl z%0+st#ihMQ(N?a4$p}lgnV)ePToyD+_)@JS(;$_6r9DRN6eE{kX`fcq*~B4uDTye> zM-0ItEHV%?nguZsF+!b6{m6%s#HHIDU_(m>+a>vz3`^iNonOHC6+Ek<>AYjD?>o5v zE`BL!F*_KABFY2_q7%eVJAkK|SXotH0&6P$v5kVpdz1w>OO^(0T1+9u**u3{EkkHB zmO{gk`(1Huyvp<%iMQL>_84ukWrC@8-^(l}(Z`DKDk3HwI@tEv-2xG%xxhe#K z2*5oTKrM_cM|wD0jYD)nt`-@^wX@0k?4aHn_ah^ zh>b~#Em1^NDIf7qcG^cw!KPE-0j#tsfw(EO0aC)tfFFah&q&vydMP{F4BP58iZIBS zH}HP~Lz~imf)i2{v|K%$If=KjUw|p1tCdye3m>J~DxEkLP|pqEDv)XK!Z2&qj#WX6 zf-ln!bdejmU_qk`G@<$K>0s({iL{AnEV9*k_ER^nqaimlu5C+EsabnJkxXW#v-VIa zA0wI&wo{nG5xHoxxyj79x!`1a)p(-Tl4d2<+2HY`KAWL#%g87?*#FeQkMZO)2(b71DNniZcjQ-3V7n$b$?xHtxv$y2>em?R;!#;V5b!M+yG3@w}9 zFNx$Xrzwy+YB96bN!2vupcJ-)q+pm1j*Hi#8y71zF=2Deyf6nPlbL=juPdfU0On(} zeek^S)-TRJuQN(Q?tBrR9B?hoRYNh=ZUmT7#*)h7m<4)^Drjh|w-y%(kFYzq8l=C> z+QXw65T3T&j+S9w%QfLN_1e!MYZcglFjOIdBYZT|5_^$19ROsn&|HwI5`c;TKKDo_ z)s@>L54ZCE8>mSfEz6a zIg)4@S+R_PTXDnSvH+SZFj@MCGB&4;$OMUV$6mKU%Nz@y?Ih5@g#K>2i?#~6%v4Dr zA>VM7`nF7+gu9OL%N^+3GL>e+FSCo|RYh1C&oJbm4jIs@=K|Q&YS~aw5|iAdGjpP; z6cJkN`#e?;09#Q z6TOmKD@R&A?%KI}&L$KAs~u128QDrdNAgu>B<2~6NmDs-;sb^k3|rK^ZoZK2)*z5o>>M`&){y??XA5OtrP{^x%8wQES0^b$nBxPE}Car&C&3 zHFkG+XEvnG;r+691hmHIyVKCFL6*j6aJ04MRC?*1L#Ro_(Lf$b1jSG3iU9^2X^#Pz zr={@@3vzb+p30^PDU4`NFfG%ASlt1%P&Pjd_-&Eot_9vSP$_5=M>}i;j5*V0IvvBBs>lr1&cxoU>0qYlE*paq8v61l4nFMoGd~vN(=}&&C9L@ z!3J6xhCDmBO#8}-+{A)#V{}(`^LnIbcM1TdyE!aJeQgcmz@}wp{zWLR!o)Fe);VF6I5X~|5Z>5nby(5ow?+p_AQ)7D#WlT11pl(m`UtP5&~!VWW> zk-?0E9bEt5zm_sOO1GR0=g5P3Vr2#jDWEbpHU&@K597+1eug(kAuih`PigLc+xCBr zxS8Ql+yBQmv;XVD8DiwD>o23H9d>j^6=fanbi5F_dfL&M{|`CN5Toqe?EVV`0|$av zM1pj9^(TiCJqH=oYUU)dhYgl+(??1qhsJP|i`VTl(>Vp^E~(iiHM9@#^xWVhL~1?> z1c10dTDi^yV6VpM1`j+*Iv`5i-7S8#Cxj=aG36v5W$TgH!^k$lZUA_*B)%BNI zyw{YBgx1C#I8B`a54p9w_b$1(T5_@dm^sLYn+MHx3HUNjINor2b>Vd6qA6Txz%s6V z^26;xVaIParySEC6Ew$BbdJFLo4B!%Ut)x0P8`3PqN{F~QxjcnggdY*I=_RXv%n~&J=%DINRZ{MiKQb#RGaQ}Ha>5S&KXulWQL`- zUng1=OY%L)5OP-*3Ho9gMb-7QnoM`Int0oYpO&jO&+QsQnDw1*U`9zyYLhVoP;uTZ zz1=4@l;Xzli`I;>YY5qbXzUt7khXG*T=+T=k`(278bW$uU;ZnSm3?TakhOx`k|i)G z*`1HRQn!xPzjdV5stwChD+*7zVW zcEMP-(g)b&MM(dd!~7H<9p<0LkB$8|H^{yIAJVw}n9IG6wwzAPd5lSlbz~8>Ub@#B z)r%5t3QcdI8PhhU6V`nQaXD~;hhQDW&)GcSA&v-v58xbS(roIkaX8*ESc_f<3++^d zVOm`{K}l_{reYioCc(s7*G7|vn4xkh?p(?QqjSJX@P1CTnq}KwfaviR9rCto^txK3 z*Dfu&5wzq(amQP|imcfUAfPLHlqIcDnJE*>l^85tQW_!hQ{{8vO5C0FT>T*+?dQ_DggqhB+5eb(1xyIy@QhQC za}2$XG|9M3XsV=tY)Y8UK<#JaSG>Ce{5u+9QsHqQ$ki<#O{S1zp~s(f8Ao$B7{7#1yFLo&b;Wx$pdqd+ zMBi}~jYRH-JbN1U-vD8GQJ4ihjW;z&5(wEn@)Ckh?XXN3u+DLdF=|(jD7+BC01Xzj zw%|HOn81Ym>+krHnXgmRWD$!tQqnygkWobreSw`^U1lhHkzh6>o=^3cXXbR9Pu*&! zt{8Sgu-N#5tB<@BbX#F$eFfR%F*zEDSRw2i+ItNSLMtheL1rLRCuW1`c=TKV>=Cxp zg4qr*h5C8YrRgyyAL*MLV7@DeI$Gh7#XSzO*aANKUoIVpDpG z+l!0fEifSrhRPxik1}KzUBnzB%LUf2#&aN<*CVMR^_eC!IqR2Run$_>7&PDmZ z-L(K4Fkd)i3|>bahp$~@&43M0){8T>%Z7D7?PZq@^J343&4%rgkCk-t9*2y{(R@P4 zNAzvr-}NDj5V*On zC!tFMytp*9TvzEGS-I(mHju})dJf(MCktwcL|;lY|k@4nMCM3IDi(8 z=K}4&#xy1!%B=}Zs=ZBx{S%C5fJmcaQh~`~Wx-)t)c7!0{V+NmAKozEj!dP@%+{Wp(nH;C*XhiVb}|6& zBbrjuoz7Hh2H>7)$l2V{)GibjmT+jH*wrBA z269!%Zpphp8YJv=W>k}~%oH=daD%p%nbeQ@om@rSGEJx>kb}t<^kbv8bRtIq+zCV7 zV{IH#%VXk9iypUJi+Lb^NO3N9q0Mx3At(si%;i3`9da}W=U2M3;vAyc8o6Xfc`e5F z(R9?1Omd~Y0NhRL}GWj=`n7g>$)h z89%n+Z0FKTFlpRcuRErpPtMWouY5h7Nb5NE7LREFRn$k_gW2M5Z9sghNC_hegwhrfZv>}sQ;+@AqI22OU5uR2V{=n>hasSy3$RJ=O_63>z})vz!ICr9;ZRlGOWPJ;gO+zFI(JtdKJ zBNpB@B_pC%+JCeVV_Sg4$fFE>wK*zkq_&N1u{krZZvFZmi?SOW2q9Tcyv^}Pw3mVh zU__dPkYZ2V$dgD@`G||_!{RkGWqHLSkd4V3 z?s~mrGf-)y%-vY#X7A)ZHi@QAA1yA2TrCiPFLpuV!>)nD#Np!obC>o08PXX1&aAS| zeUS<6aO}q;XeDp^u^DQ?Y7luNN3CTtU^a5|v`2R!+j9mQAA+>K9}e z3SVkZ`M&6W&b-xSMJV z`s4^$i*{<=glRluBqaGAS3hdJXQQVdQ!T9_lH3UKdBkv zOG6eMqaOnlY&Qtlz~qmUJ=_o`Z#A%%%HSRoJS7O&WbE!KQs!R`a{QtOhSZ1MCI75p zRw3c3oGhM|^*uO!r5jE#< zO3nFD9%#CKnn83<;k_I}LwkMk9mS5t!Z;LM7rKbzLv>A-8``d0whq$Er zf2)sVmvp0#yJLe>T+|*32tQ=R$wNs|6?x2X^ZvD-XmYI>Pjb0y#Y@HJD4u*;!U=Im zu@N_WRiDum_6sl!RDKX=47e%t*aNx-sP(}2z?-2G0cmJ%1?p~|X-6mkKf_$=m4IRt zwZzs@R#a)%0)-f`&=uR`M5c%oG!`)em^_&>rLt_Pt5Ckg9CnG|xd}X)ja;VffGb`ut55}X zolf3ceVBd-O?b%@wZv_BeFz?cgowH2lE|V*Yuua6sJFTwyISCFM$dBRPVUZYrZKhD zwh?iUK0q_S%RMampM3{F$A|D=pB(IxKiXV(qI55mF37|NXh~|7x<&G4yOeI5ui)rB z#Am_#yj=}-DA#`I3PiLpXLUqBgLX@|#o>G9W~*!@8_8!NxO71O5*bCJ$DGNGY_g*uPvJ-R^Ui@vif6%!B>GIHK4T^; zA`eCX>B7W7`}23DP|7>zAP|c*+-VlqEqAU4jI9Yr-bT<(A(Y*;BGzn{#!c(U#C2XE zS(>=M`hCEUQQB^7c0ieSgC}Nc-~5~+T+i3;$>nVX66xG^*;W70l4fY^fo%^kVp2|Lq%i}#vt}cBt z)|?`oRT>FX0`Y~1&boHBl+;O9IuxIxak40whAvkHLB@AklP0kGJn9YG0eXK*P1P0x z{KOCdQ-Y5xG)gh|d8Sf{D`b$BD9{-kgvuHr?F+RSXYeo$AL*bp(148qvIi~BOE?VH z92rNkG*hSH_zV;T%djgTwFCgb>(X>^9AH%HwI49@4PYd!tmol4cCSKy{0Y~V%~_*Y(g_#LmN4h-qDcvB2bEw`ZpnF`tO0Bo7L7(tyv4o=;3S`DMzL zT@J*hh*?Iv30Z339_=rja@0JUqd1yOBH9H;y9E95G}O^3M+GNIE1}pxuT#cMfq{Ar zCslJW+BLvoI75bwf!(#@Mket55myI0ut9^VDz~=i-%Oe3^YQrCAP}B0 z86-v6t>6VT6Ki!q*=TqZUoB$R2D7}=Ab0={)OndoLD;$lZIktCM^OJ^6BueNijp`@ zf?P!cN5DIs$v<|3;IZyN7Mjkx36o8OjrTDQf+x@UVK}|JITaw>@#zhW@_~-4O51~{ z9N^zQPlh)sOecE~#KX1EHI>b6zW`s)^vra?BYPvZV4&#mBJ8r~LnfUO=M-0g3;@LY zrqr}Km-~(7c0;nN%n2dnPf(cmwUqH`-IJeT=SgurHFmmXQEz-$E@P(24!xKkxlKeb zq6PUn00e*#a*Trrf|QXJXwk5q111(p7{5AW`Jhu1Od>)HuuXS1kGi(m;8eRYuW=HS zPk62}#icx+XH()<1h18iSN?fnQD2;WUezKkI=u)s3FpGPk?83F69qMaR@TBZbu6hY zjuAVOgSESc5@fx#C{A7Z2niP%3K?p-9Wjukqlwf>!kcw@JK3l=XZEZ@Q^sybMP{>x ze2#XFiwGhk>T5 zc3mNWd*%^VoUDmXpoY*+VLmKZ7I3eX1s%DNQbCrO^(u7n0K`l??2S%GM22P(VGxk; z5fxEgBCGEEsZ|%+=0+uelSnxwn?IJyLkxd#Wbrlt@HrkXmL8e*Q>JSH` z%P>ep-{6H7p!GpxUg>7}`4N_KhTMpWTPLnC3TZ$K6n{et(5?8s@B@Y0hKl=oQs()C zIa{t!!}b1pi(rQM3;E-}&i&{RepstKa=&yQmGr0SunlWDNlDZ}MWkrLhZRi;$wud; z9oVfXWYPbHRvX!}<~RM$|Cni@ZWfga_co7C7w%GadRpPd6ca)#Kw7l-1a~TVEWi6mpv7@QdpBqmh7ZnN z;2AYJoz{&J{G5E^xXX+b;>e#qu4$jOcI&;L@w4mjmolC;n}=cS)yPEWJ&nH#B&rW> zx*vx&-O~;Anv$hKIP4>rUfA8PeUv!3C{8p7tn-Xc?rA6gpO6i8NW=%>cVBK1W6inNxG^IJ7AGEwFAIxa4mV_jY!dd}*Odvij8v7Ll>Fnr&a1Lp0{%69#@D^7=TfhA(*`Gs z1SD&H!in~;4Is&bb=dwII^+PitB!yg0b17)yIY0-jaC^z=SM#;Srg;`AUYOtR;5KZBrP-Q$tm7hr)v$<~IjMi_!u7eJHJ)-RwRi|Cj-sxx&`13H~pYzz(863Ec zwOg@2T9xWpoZ8^#l1L!cMehBr6ZLualbx0*VWWPxz4fywsJJsE#TJ~zP~l5QU86Dl z00BvN^=L93_1H?rRYSHs_g>nOHQqRXl5G@<-F6cuwGS_+pZB=x^up7CxtWAvKo?5~ z85_Fo+^DZXsyXKn*4aIq=*<#iy8BU047kP;>Fk>+I-+}+UKxVeX7$?)Y+W0$FR!Wv zr?Rt6T%W{gxn*G*GJ0g%M$N=o6(#&Pe4;Ygh(3&rX4&1sCER$k=y+aPW}Cfor^&{$ z%{pzlGi_L%xUm+H|8Slo)fL_+2%PU)i#5b*U)*Y7s}iJ?vv|1*2$A4+!Uzc?=ns{@ zD=#MDBvE0?h`RdZ_A@XAW5Q{TP3wbLsk$$XN<71c3B)#`<%7|eP=P+`TjGV={JD9^ z-5#!+Y3$#SnA03kI7(ukKE(||ns;Xjdg{AUAA=`Lw@|OQQf@VQmd^r0wi8F?8ItU% z{M-0(@h~Vjv`GuSCkkIVcxiBLP;^eCR_FBI=obrXee&!mD1Tpu@0>}Gwt)8sj#v`QJRiY84(pvcPfm}0F7MB0Ct?oWp3BycCuZYp zM8LD=yO2d^8l{fz0ZXt-Y#7!e5X)$@z5@?6y zuL0x4da$-S?u9S-!1Gnc^CMFwH@thGA#(w0ZF-z+zmw82R||W(^GfSM7zT)gtGzKM~e)2b9A)~2z8ao#CH$`%i-wAN$~(7|2V?H6OMJ*nXBt9ct5!RT;0G@Jj(hLhN z4mF>QTrjIwl=M>0$Q4#=u&igY(@SC#E^8>oWeIQPS*-rSAO4WdwsN%7o3^hkn8R0hC2@y!9!PIN)yb7 zgqi11J$SN4WqFwZuBnrU=4;7^P0g=Ni57TshHPV}Lp{(B)$zh}cbZD1O_E*mvpJL^ zh|IHY8;{Ac0K1{DIm%_^l=q3^<_$=@f4c&$gY1G>J$I!RM;-uHFnGQNY|W&J9NlB@`Uk zMu?L=1F_2nw5HZO6M&UF_4jHLz&eq2W8Ted^qV&PNm3^|3Ge0@Kg49uqn`1D}vTDYLQ@7Qg=Nl-Gj!T9K zw`#1E2PdG+T&10~=F;bkJ?@I>5qxBV!KfkqC`JyQ0i^3tC6F&Je;e^`^X)ngtcL}G zl+JJqRw3yYNY;_zO*&XdqJdIeNoG}Z%45S4dySxt%mGYj;ma$it*y=k$Yq-E2!#=# zF<)4tpQlJp;zd#)q+X}v6A@202Hk9zaccmaAO-_FPumgQ5XMb{)11oq#_0nss?5$0 zsF>uE*mMKiP*1I)q{)HK9cF{p>)tBF6(=@@ND8RnIbvF!xq~Fp*>0MI(L@Pckew!p z&Qy9`6Gj^)tey0f1|I5o=J2TF_>A-p$aqZnfUTX`izUkIfYD!;=U2)iZFVX$2OmEiL{^1y+Tv#oeuVCR}ioB5Z*iZKyyrJDStW~Yw+fB<{V6H_0` z!C@-T^uH%KOM>b>X;5Ar&-y-WTtT%LUFIMEg*$~#tj-Mr4LmYKDnvEX^8wk0`;ZN? zZ}q1fl)Yn8K?-KoP6`k`7}%UT!S+hw&YCrC2%`Nx<*Hsv8g&N45V7-50+v<}*u22> zZGk!P=&D}nj=s9&t{_8!3n?78!*M2@ZH6d^1k}041C|e@+B3y@FGO=ecpYIm#_2kL%jSbFW=(jCwciPUVfSvrmKy9mY1L7<>z_%1zvuU zmtW%LS9tkVUVe?2U+3jJynL6J-{9ppdHH~s-{$3ac==skevg;m=j9K1`6FKbn3q4n zWqohCT!kM2I|2UiusSK9v5Qy=R}ZPwX$eIco2-|DwjH zvQKvU3vE;h6NKx&qQL#c=ZF{<+VGSHDu}QFkT}8Qg3)|3k;3{&c(EGFsbO)1ga?K7 z0dn`1BqDu}fLRL>{Trj_cp2e^aSYKOUS8mZM;Amd^0Jqgmv|wQ5xvaIeqN68!Z?cP z6<%)Pi@A5Lv%NKZ=;N^?FJc`TuFsRU*NW&9G>jPW@q`UnzpAWR=&myymtd~b& s`i=-}edu|O*xFaN*0dh<^<_}x3wJ2dtG0cUkH7XSbN literal 0 HcmV?d00001 diff --git a/Python3.6-ClsToElasticSearch/src/elasticsearch/client/async_search.py b/Python3.6-ClsToElasticSearch/src/elasticsearch/client/async_search.py new file mode 100755 index 000000000..1c9bd472f --- /dev/null +++ b/Python3.6-ClsToElasticSearch/src/elasticsearch/client/async_search.py @@ -0,0 +1,187 @@ +from .utils import NamespacedClient, SKIP_IN_PATH, query_params, _make_path + + +class AsyncSearchClient(NamespacedClient): + @query_params() + def delete(self, id, params=None, headers=None): + """ + Deletes an async search by ID. If the search is still running, the search + request will be cancelled. Otherwise, the saved search results are deleted. + ``_ + + :arg id: The async search ID + """ + if id in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument 'id'.") + + return self.transport.perform_request( + "DELETE", _make_path("_async_search", id), params=params, headers=headers + ) + + @query_params("keep_alive", "typed_keys", "wait_for_completion_timeout") + def get(self, id, params=None, headers=None): + """ + Retrieves the results of a previously submitted async search request given its + ID. + ``_ + + :arg id: The async search ID + :arg keep_alive: Specify the time interval in which the results + (partial or final) for this search will be available + :arg typed_keys: Specify whether aggregation and suggester names + should be prefixed by their respective types in the response + :arg wait_for_completion_timeout: Specify the time that the + request should block waiting for the final response + """ + if id in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument 'id'.") + + return self.transport.perform_request( + "GET", _make_path("_async_search", id), params=params, headers=headers + ) + + @query_params( + "_source", + "_source_excludes", + "_source_includes", + "allow_no_indices", + "allow_partial_search_results", + "analyze_wildcard", + "analyzer", + "batched_reduce_size", + "default_operator", + "df", + "docvalue_fields", + "expand_wildcards", + "explain", + "from_", + "ignore_throttled", + "ignore_unavailable", + "keep_alive", + "keep_on_completion", + "lenient", + "max_concurrent_shard_requests", + "preference", + "q", + "request_cache", + "routing", + "search_type", + "seq_no_primary_term", + "size", + "sort", + "stats", + "stored_fields", + "suggest_field", + "suggest_mode", + "suggest_size", + "suggest_text", + "terminate_after", + "timeout", + "track_scores", + "track_total_hits", + "typed_keys", + "version", + "wait_for_completion_timeout", + ) + def submit(self, body=None, index=None, params=None, headers=None): + """ + Executes a search request asynchronously. + ``_ + + :arg body: The search definition using the Query DSL + :arg index: A comma-separated list of index names to search; use + `_all` or empty string to perform the operation on all indices + :arg _source: True or false to return the _source field or not, + or a list of fields to return + :arg _source_excludes: A list of fields to exclude from the + returned _source field + :arg _source_includes: A list of fields to extract and return + from the _source field + :arg allow_no_indices: Whether to ignore if a wildcard indices + expression resolves into no concrete indices. (This includes `_all` + string or when no indices have been specified) + :arg allow_partial_search_results: Indicate if an error should + be returned if there is a partial search failure or timeout Default: + True + :arg analyze_wildcard: Specify whether wildcard and prefix + queries should be analyzed (default: false) + :arg analyzer: The analyzer to use for the query string + :arg batched_reduce_size: The number of shard results that + should be reduced at once on the coordinating node. This value should be + used as the granularity at which progress results will be made + available. Default: 5 + :arg default_operator: The default operator for query string + query (AND or OR) Valid choices: AND, OR Default: OR + :arg df: The field to use as default where no field prefix is + given in the query string + :arg docvalue_fields: A comma-separated list of fields to return + as the docvalue representation of a field for each hit + :arg expand_wildcards: Whether to expand wildcard expression to + concrete indices that are open, closed or both. Valid choices: open, + closed, hidden, none, all Default: open + :arg explain: Specify whether to return detailed information + about score computation as part of a hit + :arg from_: Starting offset (default: 0) + :arg ignore_throttled: Whether specified concrete, expanded or + aliased indices should be ignored when throttled + :arg ignore_unavailable: Whether specified concrete indices + should be ignored when unavailable (missing or closed) + :arg keep_alive: Update the time interval in which the results + (partial or final) for this search will be available Default: 5d + :arg keep_on_completion: Control whether the response should be + stored in the cluster if it completed within the provided + [wait_for_completion] time (default: false) + :arg lenient: Specify whether format-based query failures (such + as providing text to a numeric field) should be ignored + :arg max_concurrent_shard_requests: The number of concurrent + shard requests per node this search executes concurrently. This value + should be used to limit the impact of the search on the cluster in order + to limit the number of concurrent shard requests Default: 5 + :arg preference: Specify the node or shard the operation should + be performed on (default: random) + :arg q: Query in the Lucene query string syntax + :arg request_cache: Specify if request cache should be used for + this request or not, defaults to true + :arg routing: A comma-separated list of specific routing values + :arg search_type: Search operation type Valid choices: + query_then_fetch, dfs_query_then_fetch + :arg seq_no_primary_term: Specify whether to return sequence + number and primary term of the last modification of each hit + :arg size: Number of hits to return (default: 10) + :arg sort: A comma-separated list of : pairs + :arg stats: Specific 'tag' of the request for logging and + statistical purposes + :arg stored_fields: A comma-separated list of stored fields to + return as part of a hit + :arg suggest_field: Specify which field to use for suggestions + :arg suggest_mode: Specify suggest mode Valid choices: missing, + popular, always Default: missing + :arg suggest_size: How many suggestions to return in response + :arg suggest_text: The source text for which the suggestions + should be returned + :arg terminate_after: The maximum number of documents to collect + for each shard, upon reaching which the query execution will terminate + early. + :arg timeout: Explicit operation timeout + :arg track_scores: Whether to calculate and return scores even + if they are not used for sorting + :arg track_total_hits: Indicate if the number of documents that + match the query should be tracked + :arg typed_keys: Specify whether aggregation and suggester names + should be prefixed by their respective types in the response + :arg version: Specify whether to return document version as part + of a hit + :arg wait_for_completion_timeout: Specify the time that the + request should block waiting for the final response Default: 1s + """ + # from is a reserved word so it cannot be used, use from_ instead + if "from_" in params: + params["from"] = params.pop("from_") + + return self.transport.perform_request( + "POST", + _make_path(index, "_async_search"), + params=params, + headers=headers, + body=body, + ) diff --git a/Python3.6-ClsToElasticSearch/src/elasticsearch/client/autoscaling.py b/Python3.6-ClsToElasticSearch/src/elasticsearch/client/autoscaling.py new file mode 100755 index 000000000..7b2f158c8 --- /dev/null +++ b/Python3.6-ClsToElasticSearch/src/elasticsearch/client/autoscaling.py @@ -0,0 +1,14 @@ +from .utils import NamespacedClient, query_params + + +class AutoscalingClient(NamespacedClient): + @query_params() + def get_autoscaling_decision(self, params=None, headers=None): + """ + Gets the current autoscaling decision based on the configured autoscaling + policy, indicating whether or not autoscaling is needed. + ``_ + """ + return self.transport.perform_request( + "GET", "/_autoscaling/decision", params=params, headers=headers + ) diff --git a/Python3.6-ClsToElasticSearch/src/elasticsearch/client/cat.py b/Python3.6-ClsToElasticSearch/src/elasticsearch/client/cat.py new file mode 100755 index 000000000..5a344766f --- /dev/null +++ b/Python3.6-ClsToElasticSearch/src/elasticsearch/client/cat.py @@ -0,0 +1,720 @@ +from .utils import NamespacedClient, query_params, _make_path + + +class CatClient(NamespacedClient): + @query_params("expand_wildcards", "format", "h", "help", "local", "s", "v") + def aliases(self, name=None, params=None, headers=None): + """ + Shows information about currently configured aliases to indices including + filter and routing infos. + ``_ + + :arg name: A comma-separated list of alias names to return + :arg expand_wildcards: Whether to expand wildcard expression to + concrete indices that are open, closed or both. Valid choices: open, + closed, hidden, none, all Default: all + :arg format: a short version of the Accept header, e.g. json, + yaml + :arg h: Comma-separated list of column names to display + :arg help: Return help information + :arg local: Return local information, do not retrieve the state + from master node (default: false) + :arg s: Comma-separated list of column names or column aliases + to sort by + :arg v: Verbose mode. Display column headers + """ + return self.transport.perform_request( + "GET", _make_path("_cat", "aliases", name), params=params, headers=headers + ) + + @query_params("bytes", "format", "h", "help", "local", "master_timeout", "s", "v") + def allocation(self, node_id=None, params=None, headers=None): + """ + Provides a snapshot of how many shards are allocated to each data node and how + much disk space they are using. + ``_ + + :arg node_id: A comma-separated list of node IDs or names to + limit the returned information + :arg bytes: The unit in which to display byte values Valid + choices: b, k, kb, m, mb, g, gb, t, tb, p, pb + :arg format: a short version of the Accept header, e.g. json, + yaml + :arg h: Comma-separated list of column names to display + :arg help: Return help information + :arg local: Return local information, do not retrieve the state + from master node (default: false) + :arg master_timeout: Explicit operation timeout for connection + to master node + :arg s: Comma-separated list of column names or column aliases + to sort by + :arg v: Verbose mode. Display column headers + """ + return self.transport.perform_request( + "GET", + _make_path("_cat", "allocation", node_id), + params=params, + headers=headers, + ) + + @query_params("format", "h", "help", "s", "v") + def count(self, index=None, params=None, headers=None): + """ + Provides quick access to the document count of the entire cluster, or + individual indices. + ``_ + + :arg index: A comma-separated list of index names to limit the + returned information + :arg format: a short version of the Accept header, e.g. json, + yaml + :arg h: Comma-separated list of column names to display + :arg help: Return help information + :arg s: Comma-separated list of column names or column aliases + to sort by + :arg v: Verbose mode. Display column headers + """ + return self.transport.perform_request( + "GET", _make_path("_cat", "count", index), params=params, headers=headers + ) + + @query_params("format", "h", "help", "s", "time", "ts", "v") + def health(self, params=None, headers=None): + """ + Returns a concise representation of the cluster health. + ``_ + + :arg format: a short version of the Accept header, e.g. json, + yaml + :arg h: Comma-separated list of column names to display + :arg help: Return help information + :arg s: Comma-separated list of column names or column aliases + to sort by + :arg time: The unit in which to display time values Valid + choices: d, h, m, s, ms, micros, nanos + :arg ts: Set to false to disable timestamping Default: True + :arg v: Verbose mode. Display column headers + """ + return self.transport.perform_request( + "GET", "/_cat/health", params=params, headers=headers + ) + + @query_params("help", "s") + def help(self, params=None, headers=None): + """ + Returns help for the Cat APIs. + ``_ + + :arg help: Return help information + :arg s: Comma-separated list of column names or column aliases + to sort by + """ + return self.transport.perform_request( + "GET", "/_cat", params=params, headers=headers + ) + + @query_params( + "bytes", + "expand_wildcards", + "format", + "h", + "health", + "help", + "include_unloaded_segments", + "local", + "master_timeout", + "pri", + "s", + "time", + "v", + ) + def indices(self, index=None, params=None, headers=None): + """ + Returns information about indices: number of primaries and replicas, document + counts, disk size, ... + ``_ + + :arg index: A comma-separated list of index names to limit the + returned information + :arg bytes: The unit in which to display byte values Valid + choices: b, k, kb, m, mb, g, gb, t, tb, p, pb + :arg expand_wildcards: Whether to expand wildcard expression to + concrete indices that are open, closed or both. Valid choices: open, + closed, hidden, none, all Default: all + :arg format: a short version of the Accept header, e.g. json, + yaml + :arg h: Comma-separated list of column names to display + :arg health: A health status ("green", "yellow", or "red" to + filter only indices matching the specified health status Valid choices: + green, yellow, red + :arg help: Return help information + :arg include_unloaded_segments: If set to true segment stats + will include stats for segments that are not currently loaded into + memory + :arg local: Return local information, do not retrieve the state + from master node (default: false) + :arg master_timeout: Explicit operation timeout for connection + to master node + :arg pri: Set to true to return stats only for primary shards + :arg s: Comma-separated list of column names or column aliases + to sort by + :arg time: The unit in which to display time values Valid + choices: d, h, m, s, ms, micros, nanos + :arg v: Verbose mode. Display column headers + """ + return self.transport.perform_request( + "GET", _make_path("_cat", "indices", index), params=params, headers=headers + ) + + @query_params("format", "h", "help", "local", "master_timeout", "s", "v") + def master(self, params=None, headers=None): + """ + Returns information about the master node. + ``_ + + :arg format: a short version of the Accept header, e.g. json, + yaml + :arg h: Comma-separated list of column names to display + :arg help: Return help information + :arg local: Return local information, do not retrieve the state + from master node (default: false) + :arg master_timeout: Explicit operation timeout for connection + to master node + :arg s: Comma-separated list of column names or column aliases + to sort by + :arg v: Verbose mode. Display column headers + """ + return self.transport.perform_request( + "GET", "/_cat/master", params=params, headers=headers + ) + + @query_params( + "bytes", + "format", + "full_id", + "h", + "help", + "local", + "master_timeout", + "s", + "time", + "v", + ) + def nodes(self, params=None, headers=None): + """ + Returns basic statistics about performance of cluster nodes. + ``_ + + :arg bytes: The unit in which to display byte values Valid + choices: b, k, kb, m, mb, g, gb, t, tb, p, pb + :arg format: a short version of the Accept header, e.g. json, + yaml + :arg full_id: Return the full node ID instead of the shortened + version (default: false) + :arg h: Comma-separated list of column names to display + :arg help: Return help information + :arg local: Calculate the selected nodes using the local cluster + state rather than the state from master node (default: false) + :arg master_timeout: Explicit operation timeout for connection + to master node + :arg s: Comma-separated list of column names or column aliases + to sort by + :arg time: The unit in which to display time values Valid + choices: d, h, m, s, ms, micros, nanos + :arg v: Verbose mode. Display column headers + """ + return self.transport.perform_request( + "GET", "/_cat/nodes", params=params, headers=headers + ) + + @query_params( + "active_only", "bytes", "detailed", "format", "h", "help", "s", "time", "v" + ) + def recovery(self, index=None, params=None, headers=None): + """ + Returns information about index shard recoveries, both on-going completed. + ``_ + + :arg index: Comma-separated list or wildcard expression of index + names to limit the returned information + :arg active_only: If `true`, the response only includes ongoing + shard recoveries + :arg bytes: The unit in which to display byte values Valid + choices: b, k, kb, m, mb, g, gb, t, tb, p, pb + :arg detailed: If `true`, the response includes detailed + information about shard recoveries + :arg format: a short version of the Accept header, e.g. json, + yaml + :arg h: Comma-separated list of column names to display + :arg help: Return help information + :arg s: Comma-separated list of column names or column aliases + to sort by + :arg time: The unit in which to display time values Valid + choices: d, h, m, s, ms, micros, nanos + :arg v: Verbose mode. Display column headers + """ + return self.transport.perform_request( + "GET", _make_path("_cat", "recovery", index), params=params, headers=headers + ) + + @query_params( + "bytes", "format", "h", "help", "local", "master_timeout", "s", "time", "v" + ) + def shards(self, index=None, params=None, headers=None): + """ + Provides a detailed view of shard allocation on nodes. + ``_ + + :arg index: A comma-separated list of index names to limit the + returned information + :arg bytes: The unit in which to display byte values Valid + choices: b, k, kb, m, mb, g, gb, t, tb, p, pb + :arg format: a short version of the Accept header, e.g. json, + yaml + :arg h: Comma-separated list of column names to display + :arg help: Return help information + :arg local: Return local information, do not retrieve the state + from master node (default: false) + :arg master_timeout: Explicit operation timeout for connection + to master node + :arg s: Comma-separated list of column names or column aliases + to sort by + :arg time: The unit in which to display time values Valid + choices: d, h, m, s, ms, micros, nanos + :arg v: Verbose mode. Display column headers + """ + return self.transport.perform_request( + "GET", _make_path("_cat", "shards", index), params=params, headers=headers + ) + + @query_params("bytes", "format", "h", "help", "s", "v") + def segments(self, index=None, params=None, headers=None): + """ + Provides low-level information about the segments in the shards of an index. + ``_ + + :arg index: A comma-separated list of index names to limit the + returned information + :arg bytes: The unit in which to display byte values Valid + choices: b, k, kb, m, mb, g, gb, t, tb, p, pb + :arg format: a short version of the Accept header, e.g. json, + yaml + :arg h: Comma-separated list of column names to display + :arg help: Return help information + :arg s: Comma-separated list of column names or column aliases + to sort by + :arg v: Verbose mode. Display column headers + """ + return self.transport.perform_request( + "GET", _make_path("_cat", "segments", index), params=params, headers=headers + ) + + @query_params("format", "h", "help", "local", "master_timeout", "s", "time", "v") + def pending_tasks(self, params=None, headers=None): + """ + Returns a concise representation of the cluster pending tasks. + ``_ + + :arg format: a short version of the Accept header, e.g. json, + yaml + :arg h: Comma-separated list of column names to display + :arg help: Return help information + :arg local: Return local information, do not retrieve the state + from master node (default: false) + :arg master_timeout: Explicit operation timeout for connection + to master node + :arg s: Comma-separated list of column names or column aliases + to sort by + :arg time: The unit in which to display time values Valid + choices: d, h, m, s, ms, micros, nanos + :arg v: Verbose mode. Display column headers + """ + return self.transport.perform_request( + "GET", "/_cat/pending_tasks", params=params, headers=headers + ) + + @query_params("format", "h", "help", "local", "master_timeout", "s", "size", "v") + def thread_pool(self, thread_pool_patterns=None, params=None, headers=None): + """ + Returns cluster-wide thread pool statistics per node. By default the active, + queue and rejected statistics are returned for all thread pools. + ``_ + + :arg thread_pool_patterns: A comma-separated list of regular- + expressions to filter the thread pools in the output + :arg format: a short version of the Accept header, e.g. json, + yaml + :arg h: Comma-separated list of column names to display + :arg help: Return help information + :arg local: Return local information, do not retrieve the state + from master node (default: false) + :arg master_timeout: Explicit operation timeout for connection + to master node + :arg s: Comma-separated list of column names or column aliases + to sort by + :arg size: The multiplier in which to display values Valid + choices: , k, m, g, t, p + :arg v: Verbose mode. Display column headers + """ + return self.transport.perform_request( + "GET", + _make_path("_cat", "thread_pool", thread_pool_patterns), + params=params, + headers=headers, + ) + + @query_params("bytes", "format", "h", "help", "s", "v") + def fielddata(self, fields=None, params=None, headers=None): + """ + Shows how much heap memory is currently being used by fielddata on every data + node in the cluster. + ``_ + + :arg fields: A comma-separated list of fields to return in the + output + :arg bytes: The unit in which to display byte values Valid + choices: b, k, kb, m, mb, g, gb, t, tb, p, pb + :arg format: a short version of the Accept header, e.g. json, + yaml + :arg h: Comma-separated list of column names to display + :arg help: Return help information + :arg s: Comma-separated list of column names or column aliases + to sort by + :arg v: Verbose mode. Display column headers + """ + return self.transport.perform_request( + "GET", + _make_path("_cat", "fielddata", fields), + params=params, + headers=headers, + ) + + @query_params("format", "h", "help", "local", "master_timeout", "s", "v") + def plugins(self, params=None, headers=None): + """ + Returns information about installed plugins across nodes node. + ``_ + + :arg format: a short version of the Accept header, e.g. json, + yaml + :arg h: Comma-separated list of column names to display + :arg help: Return help information + :arg local: Return local information, do not retrieve the state + from master node (default: false) + :arg master_timeout: Explicit operation timeout for connection + to master node + :arg s: Comma-separated list of column names or column aliases + to sort by + :arg v: Verbose mode. Display column headers + """ + return self.transport.perform_request( + "GET", "/_cat/plugins", params=params, headers=headers + ) + + @query_params("format", "h", "help", "local", "master_timeout", "s", "v") + def nodeattrs(self, params=None, headers=None): + """ + Returns information about custom node attributes. + ``_ + + :arg format: a short version of the Accept header, e.g. json, + yaml + :arg h: Comma-separated list of column names to display + :arg help: Return help information + :arg local: Return local information, do not retrieve the state + from master node (default: false) + :arg master_timeout: Explicit operation timeout for connection + to master node + :arg s: Comma-separated list of column names or column aliases + to sort by + :arg v: Verbose mode. Display column headers + """ + return self.transport.perform_request( + "GET", "/_cat/nodeattrs", params=params, headers=headers + ) + + @query_params("format", "h", "help", "local", "master_timeout", "s", "v") + def repositories(self, params=None, headers=None): + """ + Returns information about snapshot repositories registered in the cluster. + ``_ + + :arg format: a short version of the Accept header, e.g. json, + yaml + :arg h: Comma-separated list of column names to display + :arg help: Return help information + :arg local: Return local information, do not retrieve the state + from master node + :arg master_timeout: Explicit operation timeout for connection + to master node + :arg s: Comma-separated list of column names or column aliases + to sort by + :arg v: Verbose mode. Display column headers + """ + return self.transport.perform_request( + "GET", "/_cat/repositories", params=params, headers=headers + ) + + @query_params( + "format", "h", "help", "ignore_unavailable", "master_timeout", "s", "time", "v" + ) + def snapshots(self, repository=None, params=None, headers=None): + """ + Returns all snapshots in a specific repository. + ``_ + + :arg repository: Name of repository from which to fetch the + snapshot information + :arg format: a short version of the Accept header, e.g. json, + yaml + :arg h: Comma-separated list of column names to display + :arg help: Return help information + :arg ignore_unavailable: Set to true to ignore unavailable + snapshots + :arg master_timeout: Explicit operation timeout for connection + to master node + :arg s: Comma-separated list of column names or column aliases + to sort by + :arg time: The unit in which to display time values Valid + choices: d, h, m, s, ms, micros, nanos + :arg v: Verbose mode. Display column headers + """ + return self.transport.perform_request( + "GET", + _make_path("_cat", "snapshots", repository), + params=params, + headers=headers, + ) + + @query_params( + "actions", + "detailed", + "format", + "h", + "help", + "node_id", + "parent_task", + "s", + "time", + "v", + ) + def tasks(self, params=None, headers=None): + """ + Returns information about the tasks currently executing on one or more nodes in + the cluster. + ``_ + + :arg actions: A comma-separated list of actions that should be + returned. Leave empty to return all. + :arg detailed: Return detailed task information (default: false) + :arg format: a short version of the Accept header, e.g. json, + yaml + :arg h: Comma-separated list of column names to display + :arg help: Return help information + :arg node_id: A comma-separated list of node IDs or names to + limit the returned information; use `_local` to return information from + the node you're connecting to, leave empty to get information from all + nodes + :arg parent_task: Return tasks with specified parent task id. + Set to -1 to return all. + :arg s: Comma-separated list of column names or column aliases + to sort by + :arg time: The unit in which to display time values Valid + choices: d, h, m, s, ms, micros, nanos + :arg v: Verbose mode. Display column headers + """ + return self.transport.perform_request( + "GET", "/_cat/tasks", params=params, headers=headers + ) + + @query_params("format", "h", "help", "local", "master_timeout", "s", "v") + def templates(self, name=None, params=None, headers=None): + """ + Returns information about existing templates. + ``_ + + :arg name: A pattern that returned template names must match + :arg format: a short version of the Accept header, e.g. json, + yaml + :arg h: Comma-separated list of column names to display + :arg help: Return help information + :arg local: Return local information, do not retrieve the state + from master node (default: false) + :arg master_timeout: Explicit operation timeout for connection + to master node + :arg s: Comma-separated list of column names or column aliases + to sort by + :arg v: Verbose mode. Display column headers + """ + return self.transport.perform_request( + "GET", _make_path("_cat", "templates", name), params=params, headers=headers + ) + + @query_params("allow_no_match", "bytes", "format", "h", "help", "s", "time", "v") + def ml_data_frame_analytics(self, id=None, params=None, headers=None): + """ + Gets configuration and usage information about data frame analytics jobs. + ``_ + + :arg id: The ID of the data frame analytics to fetch + :arg allow_no_match: Whether to ignore if a wildcard expression + matches no configs. (This includes `_all` string or when no configs have + been specified) + :arg bytes: The unit in which to display byte values Valid + choices: b, k, kb, m, mb, g, gb, t, tb, p, pb + :arg format: a short version of the Accept header, e.g. json, + yaml + :arg h: Comma-separated list of column names to display + :arg help: Return help information + :arg s: Comma-separated list of column names or column aliases + to sort by + :arg time: The unit in which to display time values Valid + choices: d, h, m, s, ms, micros, nanos + :arg v: Verbose mode. Display column headers + """ + return self.transport.perform_request( + "GET", + _make_path("_cat", "ml", "data_frame", "analytics", id), + params=params, + headers=headers, + ) + + @query_params("allow_no_datafeeds", "format", "h", "help", "s", "time", "v") + def ml_datafeeds(self, datafeed_id=None, params=None, headers=None): + """ + Gets configuration and usage information about datafeeds. + ``_ + + :arg datafeed_id: The ID of the datafeeds stats to fetch + :arg allow_no_datafeeds: Whether to ignore if a wildcard + expression matches no datafeeds. (This includes `_all` string or when no + datafeeds have been specified) + :arg format: a short version of the Accept header, e.g. json, + yaml + :arg h: Comma-separated list of column names to display + :arg help: Return help information + :arg s: Comma-separated list of column names or column aliases + to sort by + :arg time: The unit in which to display time values Valid + choices: d, h, m, s, ms, micros, nanos + :arg v: Verbose mode. Display column headers + """ + return self.transport.perform_request( + "GET", + _make_path("_cat", "ml", "datafeeds", datafeed_id), + params=params, + headers=headers, + ) + + @query_params("allow_no_jobs", "bytes", "format", "h", "help", "s", "time", "v") + def ml_jobs(self, job_id=None, params=None, headers=None): + """ + Gets configuration and usage information about anomaly detection jobs. + ``_ + + :arg job_id: The ID of the jobs stats to fetch + :arg allow_no_jobs: Whether to ignore if a wildcard expression + matches no jobs. (This includes `_all` string or when no jobs have been + specified) + :arg bytes: The unit in which to display byte values Valid + choices: b, k, kb, m, mb, g, gb, t, tb, p, pb + :arg format: a short version of the Accept header, e.g. json, + yaml + :arg h: Comma-separated list of column names to display + :arg help: Return help information + :arg s: Comma-separated list of column names or column aliases + to sort by + :arg time: The unit in which to display time values Valid + choices: d, h, m, s, ms, micros, nanos + :arg v: Verbose mode. Display column headers + """ + return self.transport.perform_request( + "GET", + _make_path("_cat", "ml", "anomaly_detectors", job_id), + params=params, + headers=headers, + ) + + @query_params( + "allow_no_match", + "bytes", + "format", + "from_", + "h", + "help", + "s", + "size", + "time", + "v", + ) + def ml_trained_models(self, model_id=None, params=None, headers=None): + """ + Gets configuration and usage information about inference trained models. + ``_ + + :arg model_id: The ID of the trained models stats to fetch + :arg allow_no_match: Whether to ignore if a wildcard expression + matches no trained models. (This includes `_all` string or when no + trained models have been specified) Default: True + :arg bytes: The unit in which to display byte values Valid + choices: b, k, kb, m, mb, g, gb, t, tb, p, pb + :arg format: a short version of the Accept header, e.g. json, + yaml + :arg from_: skips a number of trained models + :arg h: Comma-separated list of column names to display + :arg help: Return help information + :arg s: Comma-separated list of column names or column aliases + to sort by + :arg size: specifies a max number of trained models to get + Default: 100 + :arg time: The unit in which to display time values Valid + choices: d, h, m, s, ms, micros, nanos + :arg v: Verbose mode. Display column headers + """ + # from is a reserved word so it cannot be used, use from_ instead + if "from_" in params: + params["from"] = params.pop("from_") + + return self.transport.perform_request( + "GET", + _make_path("_cat", "ml", "trained_models", model_id), + params=params, + headers=headers, + ) + + @query_params( + "allow_no_match", "format", "from_", "h", "help", "s", "size", "time", "v" + ) + def transforms(self, transform_id=None, params=None, headers=None): + """ + Gets configuration and usage information about transforms. + ``_ + + :arg transform_id: The id of the transform for which to get + stats. '_all' or '*' implies all transforms + :arg allow_no_match: Whether to ignore if a wildcard expression + matches no transforms. (This includes `_all` string or when no + transforms have been specified) + :arg format: a short version of the Accept header, e.g. json, + yaml + :arg from_: skips a number of transform configs, defaults to 0 + :arg h: Comma-separated list of column names to display + :arg help: Return help information + :arg s: Comma-separated list of column names or column aliases + to sort by + :arg size: specifies a max number of transforms to get, defaults + to 100 + :arg time: The unit in which to display time values Valid + choices: d, h, m, s, ms, micros, nanos + :arg v: Verbose mode. Display column headers + """ + # from is a reserved word so it cannot be used, use from_ instead + if "from_" in params: + params["from"] = params.pop("from_") + + return self.transport.perform_request( + "GET", + _make_path("_cat", "transforms", transform_id), + params=params, + headers=headers, + ) diff --git a/Python3.6-ClsToElasticSearch/src/elasticsearch/client/ccr.py b/Python3.6-ClsToElasticSearch/src/elasticsearch/client/ccr.py new file mode 100755 index 000000000..5ef08c47c --- /dev/null +++ b/Python3.6-ClsToElasticSearch/src/elasticsearch/client/ccr.py @@ -0,0 +1,255 @@ +from .utils import NamespacedClient, query_params, _make_path, SKIP_IN_PATH + + +class CcrClient(NamespacedClient): + @query_params() + def delete_auto_follow_pattern(self, name, params=None, headers=None): + """ + Deletes auto-follow patterns. + ``_ + + :arg name: The name of the auto follow pattern. + """ + if name in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument 'name'.") + + return self.transport.perform_request( + "DELETE", + _make_path("_ccr", "auto_follow", name), + params=params, + headers=headers, + ) + + @query_params("wait_for_active_shards") + def follow(self, index, body, params=None, headers=None): + """ + Creates a new follower index configured to follow the referenced leader index. + ``_ + + :arg index: The name of the follower index + :arg body: The name of the leader index and other optional ccr + related parameters + :arg wait_for_active_shards: Sets the number of shard copies + that must be active before returning. Defaults to 0. Set to `all` for + all shard copies, otherwise set to any non-negative value less than or + equal to the total number of copies for the shard (number of replicas + + 1) Default: 0 + """ + for param in (index, body): + if param in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument.") + + return self.transport.perform_request( + "PUT", + _make_path(index, "_ccr", "follow"), + params=params, + headers=headers, + body=body, + ) + + @query_params() + def follow_info(self, index, params=None, headers=None): + """ + Retrieves information about all follower indices, including parameters and + status for each follower index + ``_ + + :arg index: A comma-separated list of index patterns; use `_all` + to perform the operation on all indices + """ + if index in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument 'index'.") + + return self.transport.perform_request( + "GET", _make_path(index, "_ccr", "info"), params=params, headers=headers + ) + + @query_params() + def follow_stats(self, index, params=None, headers=None): + """ + Retrieves follower stats. return shard-level stats about the following tasks + associated with each shard for the specified indices. + ``_ + + :arg index: A comma-separated list of index patterns; use `_all` + to perform the operation on all indices + """ + if index in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument 'index'.") + + return self.transport.perform_request( + "GET", _make_path(index, "_ccr", "stats"), params=params, headers=headers + ) + + @query_params() + def forget_follower(self, index, body, params=None, headers=None): + """ + Removes the follower retention leases from the leader. + ``_ + + :arg index: the name of the leader index for which specified + follower retention leases should be removed + :arg body: the name and UUID of the follower index, the name of + the cluster containing the follower index, and the alias from the + perspective of that cluster for the remote cluster containing the leader + index + """ + for param in (index, body): + if param in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument.") + + return self.transport.perform_request( + "POST", + _make_path(index, "_ccr", "forget_follower"), + params=params, + headers=headers, + body=body, + ) + + @query_params() + def get_auto_follow_pattern(self, name=None, params=None, headers=None): + """ + Gets configured auto-follow patterns. Returns the specified auto-follow pattern + collection. + ``_ + + :arg name: The name of the auto follow pattern. + """ + return self.transport.perform_request( + "GET", + _make_path("_ccr", "auto_follow", name), + params=params, + headers=headers, + ) + + @query_params() + def pause_follow(self, index, params=None, headers=None): + """ + Pauses a follower index. The follower index will not fetch any additional + operations from the leader index. + ``_ + + :arg index: The name of the follower index that should pause + following its leader index. + """ + if index in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument 'index'.") + + return self.transport.perform_request( + "POST", + _make_path(index, "_ccr", "pause_follow"), + params=params, + headers=headers, + ) + + @query_params() + def put_auto_follow_pattern(self, name, body, params=None, headers=None): + """ + Creates a new named collection of auto-follow patterns against a specified + remote cluster. Newly created indices on the remote cluster matching any of the + specified patterns will be automatically configured as follower indices. + ``_ + + :arg name: The name of the auto follow pattern. + :arg body: The specification of the auto follow pattern + """ + for param in (name, body): + if param in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument.") + + return self.transport.perform_request( + "PUT", + _make_path("_ccr", "auto_follow", name), + params=params, + headers=headers, + body=body, + ) + + @query_params() + def resume_follow(self, index, body=None, params=None, headers=None): + """ + Resumes a follower index that has been paused + ``_ + + :arg index: The name of the follow index to resume following. + :arg body: The name of the leader index and other optional ccr + related parameters + """ + if index in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument 'index'.") + + return self.transport.perform_request( + "POST", + _make_path(index, "_ccr", "resume_follow"), + params=params, + headers=headers, + body=body, + ) + + @query_params() + def stats(self, params=None, headers=None): + """ + Gets all stats related to cross-cluster replication. + ``_ + """ + return self.transport.perform_request( + "GET", "/_ccr/stats", params=params, headers=headers + ) + + @query_params() + def unfollow(self, index, params=None, headers=None): + """ + Stops the following task associated with a follower index and removes index + metadata and settings associated with cross-cluster replication. + ``_ + + :arg index: The name of the follower index that should be turned + into a regular index. + """ + if index in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument 'index'.") + + return self.transport.perform_request( + "POST", + _make_path(index, "_ccr", "unfollow"), + params=params, + headers=headers, + ) + + @query_params() + def pause_auto_follow_pattern(self, name, params=None, headers=None): + """ + Pauses an auto-follow pattern + ``_ + + :arg name: The name of the auto follow pattern that should pause + discovering new indices to follow. + """ + if name in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument 'name'.") + + return self.transport.perform_request( + "POST", + _make_path("_ccr", "auto_follow", name, "pause"), + params=params, + headers=headers, + ) + + @query_params() + def resume_auto_follow_pattern(self, name, params=None, headers=None): + """ + Resumes an auto-follow pattern that has been paused + ``_ + + :arg name: The name of the auto follow pattern to resume + discovering new indices to follow. + """ + if name in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument 'name'.") + + return self.transport.perform_request( + "POST", + _make_path("_ccr", "auto_follow", name, "resume"), + params=params, + headers=headers, + ) diff --git a/Python3.6-ClsToElasticSearch/src/elasticsearch/client/cluster.py b/Python3.6-ClsToElasticSearch/src/elasticsearch/client/cluster.py new file mode 100755 index 000000000..ea43f262b --- /dev/null +++ b/Python3.6-ClsToElasticSearch/src/elasticsearch/client/cluster.py @@ -0,0 +1,300 @@ +from .utils import NamespacedClient, query_params, _make_path, SKIP_IN_PATH + + +class ClusterClient(NamespacedClient): + @query_params( + "expand_wildcards", + "level", + "local", + "master_timeout", + "timeout", + "wait_for_active_shards", + "wait_for_events", + "wait_for_no_initializing_shards", + "wait_for_no_relocating_shards", + "wait_for_nodes", + "wait_for_status", + ) + def health(self, index=None, params=None, headers=None): + """ + Returns basic information about the health of the cluster. + ``_ + + :arg index: Limit the information returned to a specific index + :arg expand_wildcards: Whether to expand wildcard expression to + concrete indices that are open, closed or both. Valid choices: open, + closed, hidden, none, all Default: all + :arg level: Specify the level of detail for returned information + Valid choices: cluster, indices, shards Default: cluster + :arg local: Return local information, do not retrieve the state + from master node (default: false) + :arg master_timeout: Explicit operation timeout for connection + to master node + :arg timeout: Explicit operation timeout + :arg wait_for_active_shards: Wait until the specified number of + shards is active + :arg wait_for_events: Wait until all currently queued events + with the given priority are processed Valid choices: immediate, urgent, + high, normal, low, languid + :arg wait_for_no_initializing_shards: Whether to wait until + there are no initializing shards in the cluster + :arg wait_for_no_relocating_shards: Whether to wait until there + are no relocating shards in the cluster + :arg wait_for_nodes: Wait until the specified number of nodes is + available + :arg wait_for_status: Wait until cluster is in a specific state + Valid choices: green, yellow, red + """ + return self.transport.perform_request( + "GET", + _make_path("_cluster", "health", index), + params=params, + headers=headers, + ) + + @query_params("local", "master_timeout") + def pending_tasks(self, params=None, headers=None): + """ + Returns a list of any cluster-level changes (e.g. create index, update mapping, + allocate or fail shard) which have not yet been executed. + ``_ + + :arg local: Return local information, do not retrieve the state + from master node (default: false) + :arg master_timeout: Specify timeout for connection to master + """ + return self.transport.perform_request( + "GET", "/_cluster/pending_tasks", params=params, headers=headers + ) + + @query_params( + "allow_no_indices", + "expand_wildcards", + "flat_settings", + "ignore_unavailable", + "local", + "master_timeout", + "wait_for_metadata_version", + "wait_for_timeout", + ) + def state(self, metric=None, index=None, params=None, headers=None): + """ + Returns a comprehensive information about the state of the cluster. + ``_ + + :arg metric: Limit the information returned to the specified + metrics Valid choices: _all, blocks, metadata, nodes, routing_table, + routing_nodes, master_node, version + :arg index: A comma-separated list of index names; use `_all` or + empty string to perform the operation on all indices + :arg allow_no_indices: Whether to ignore if a wildcard indices + expression resolves into no concrete indices. (This includes `_all` + string or when no indices have been specified) + :arg expand_wildcards: Whether to expand wildcard expression to + concrete indices that are open, closed or both. Valid choices: open, + closed, hidden, none, all Default: open + :arg flat_settings: Return settings in flat format (default: + false) + :arg ignore_unavailable: Whether specified concrete indices + should be ignored when unavailable (missing or closed) + :arg local: Return local information, do not retrieve the state + from master node (default: false) + :arg master_timeout: Specify timeout for connection to master + :arg wait_for_metadata_version: Wait for the metadata version to + be equal or greater than the specified metadata version + :arg wait_for_timeout: The maximum time to wait for + wait_for_metadata_version before timing out + """ + if index and metric in SKIP_IN_PATH: + metric = "_all" + + return self.transport.perform_request( + "GET", + _make_path("_cluster", "state", metric, index), + params=params, + headers=headers, + ) + + @query_params("flat_settings", "timeout") + def stats(self, node_id=None, params=None, headers=None): + """ + Returns high-level overview of cluster statistics. + ``_ + + :arg node_id: A comma-separated list of node IDs or names to + limit the returned information; use `_local` to return information from + the node you're connecting to, leave empty to get information from all + nodes + :arg flat_settings: Return settings in flat format (default: + false) + :arg timeout: Explicit operation timeout + """ + return self.transport.perform_request( + "GET", + "/_cluster/stats" + if node_id in SKIP_IN_PATH + else _make_path("_cluster", "stats", "nodes", node_id), + params=params, + headers=headers, + ) + + @query_params( + "dry_run", "explain", "master_timeout", "metric", "retry_failed", "timeout" + ) + def reroute(self, body=None, params=None, headers=None): + """ + Allows to manually change the allocation of individual shards in the cluster. + ``_ + + :arg body: The definition of `commands` to perform (`move`, + `cancel`, `allocate`) + :arg dry_run: Simulate the operation only and return the + resulting state + :arg explain: Return an explanation of why the commands can or + cannot be executed + :arg master_timeout: Explicit operation timeout for connection + to master node + :arg metric: Limit the information returned to the specified + metrics. Defaults to all but metadata Valid choices: _all, blocks, + metadata, nodes, routing_table, master_node, version + :arg retry_failed: Retries allocation of shards that are blocked + due to too many subsequent allocation failures + :arg timeout: Explicit operation timeout + """ + return self.transport.perform_request( + "POST", "/_cluster/reroute", params=params, headers=headers, body=body + ) + + @query_params("flat_settings", "include_defaults", "master_timeout", "timeout") + def get_settings(self, params=None, headers=None): + """ + Returns cluster settings. + ``_ + + :arg flat_settings: Return settings in flat format (default: + false) + :arg include_defaults: Whether to return all default clusters + setting. + :arg master_timeout: Explicit operation timeout for connection + to master node + :arg timeout: Explicit operation timeout + """ + return self.transport.perform_request( + "GET", "/_cluster/settings", params=params, headers=headers + ) + + @query_params("flat_settings", "master_timeout", "timeout") + def put_settings(self, body, params=None, headers=None): + """ + Updates the cluster settings. + ``_ + + :arg body: The settings to be updated. Can be either `transient` + or `persistent` (survives cluster restart). + :arg flat_settings: Return settings in flat format (default: + false) + :arg master_timeout: Explicit operation timeout for connection + to master node + :arg timeout: Explicit operation timeout + """ + if body in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument 'body'.") + + return self.transport.perform_request( + "PUT", "/_cluster/settings", params=params, headers=headers, body=body + ) + + @query_params() + def remote_info(self, params=None, headers=None): + """ + Returns the information about configured remote clusters. + ``_ + """ + return self.transport.perform_request( + "GET", "/_remote/info", params=params, headers=headers + ) + + @query_params("include_disk_info", "include_yes_decisions") + def allocation_explain(self, body=None, params=None, headers=None): + """ + Provides explanations for shard allocations in the cluster. + ``_ + + :arg body: The index, shard, and primary flag to explain. Empty + means 'explain the first unassigned shard' + :arg include_disk_info: Return information about disk usage and + shard sizes (default: false) + :arg include_yes_decisions: Return 'YES' decisions in + explanation (default: false) + """ + return self.transport.perform_request( + "POST", + "/_cluster/allocation/explain", + params=params, + headers=headers, + body=body, + ) + + @query_params("master_timeout", "timeout") + def delete_component_template(self, name, params=None, headers=None): + """ + Deletes a component template + ``_ + + :arg name: The name of the template + :arg master_timeout: Specify timeout for connection to master + :arg timeout: Explicit operation timeout + """ + if name in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument 'name'.") + + return self.transport.perform_request( + "DELETE", + _make_path("_component_template", name), + params=params, + headers=headers, + ) + + @query_params("local", "master_timeout") + def get_component_template(self, name=None, params=None, headers=None): + """ + Returns one or more component templates + ``_ + + :arg name: The comma separated names of the component templates + :arg local: Return local information, do not retrieve the state + from master node (default: false) + :arg master_timeout: Explicit operation timeout for connection + to master node + """ + return self.transport.perform_request( + "GET", + _make_path("_component_template", name), + params=params, + headers=headers, + ) + + @query_params("create", "master_timeout", "timeout") + def put_component_template(self, name, body, params=None, headers=None): + """ + Creates or updates a component template + ``_ + + :arg name: The name of the template + :arg body: The template definition + :arg create: Whether the index template should only be added if + new or can also replace an existing one + :arg master_timeout: Specify timeout for connection to master + :arg timeout: Explicit operation timeout + """ + for param in (name, body): + if param in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument.") + + return self.transport.perform_request( + "PUT", + _make_path("_component_template", name), + params=params, + headers=headers, + body=body, + ) diff --git a/Python3.6-ClsToElasticSearch/src/elasticsearch/client/data_frame.py b/Python3.6-ClsToElasticSearch/src/elasticsearch/client/data_frame.py new file mode 100755 index 000000000..e7ff49a56 --- /dev/null +++ b/Python3.6-ClsToElasticSearch/src/elasticsearch/client/data_frame.py @@ -0,0 +1,131 @@ +from .utils import NamespacedClient, query_params, _make_path, SKIP_IN_PATH + + +class Data_FrameClient(NamespacedClient): + @query_params() + def delete_data_frame_transform(self, transform_id, params=None, headers=None): + """ + ``_ + + :arg transform_id: The id of the transform to delete + """ + if transform_id in SKIP_IN_PATH: + raise ValueError( + "Empty value passed for a required argument 'transform_id'." + ) + return self.transport.perform_request( + "DELETE", + _make_path("_data_frame", "transforms", transform_id), + params=params, + headers=headers, + ) + + @query_params("from_", "size") + def get_data_frame_transform(self, transform_id=None, params=None, headers=None): + """ + ``_ + + :arg transform_id: The id or comma delimited list of id expressions of + the transforms to get, '_all' or '*' implies get all transforms + :arg from_: skips a number of transform configs, defaults to 0 + :arg size: specifies a max number of transforms to get, defaults to 100 + """ + return self.transport.perform_request( + "GET", + _make_path("_data_frame", "transforms", transform_id), + params=params, + headers=headers, + ) + + @query_params() + def get_data_frame_transform_stats( + self, transform_id=None, params=None, headers=None + ): + """ + ``_ + + :arg transform_id: The id of the transform for which to get stats. + '_all' or '*' implies all transforms + """ + return self.transport.perform_request( + "GET", + _make_path("_data_frame", "transforms", transform_id, "_stats"), + params=params, + ) + + @query_params() + def preview_data_frame_transform(self, body, params=None, headers=None): + """ + ``_ + + :arg body: The definition for the data_frame transform to preview + """ + if body in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument 'body'.") + return self.transport.perform_request( + "POST", + "/_data_frame/transforms/_preview", + params=params, + headers=headers, + body=body, + ) + + @query_params() + def put_data_frame_transform(self, transform_id, body, params=None, headers=None): + """ + ``_ + + :arg transform_id: The id of the new transform. + :arg body: The data frame transform definition + """ + for param in (transform_id, body): + if param in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument.") + return self.transport.perform_request( + "PUT", + _make_path("_data_frame", "transforms", transform_id), + params=params, + headers=headers, + body=body, + ) + + @query_params("timeout") + def start_data_frame_transform(self, transform_id, params=None, headers=None): + """ + ``_ + + :arg transform_id: The id of the transform to start + :arg timeout: Controls the time to wait for the transform to start + """ + if transform_id in SKIP_IN_PATH: + raise ValueError( + "Empty value passed for a required argument 'transform_id'." + ) + return self.transport.perform_request( + "POST", + _make_path("_data_frame", "transforms", transform_id, "_start"), + params=params, + headers=headers, + ) + + @query_params("timeout", "wait_for_completion") + def stop_data_frame_transform(self, transform_id, params=None, headers=None): + """ + ``_ + + :arg transform_id: The id of the transform to stop + :arg timeout: Controls the time to wait until the transform has stopped. + Default to 30 seconds + :arg wait_for_completion: Whether to wait for the transform to fully + stop before returning or not. Default to false + """ + if transform_id in SKIP_IN_PATH: + raise ValueError( + "Empty value passed for a required argument 'transform_id'." + ) + return self.transport.perform_request( + "POST", + _make_path("_data_frame", "transforms", transform_id, "_stop"), + params=params, + headers=headers, + ) diff --git a/Python3.6-ClsToElasticSearch/src/elasticsearch/client/deprecation.py b/Python3.6-ClsToElasticSearch/src/elasticsearch/client/deprecation.py new file mode 100755 index 000000000..8b0e9d70e --- /dev/null +++ b/Python3.6-ClsToElasticSearch/src/elasticsearch/client/deprecation.py @@ -0,0 +1,17 @@ +from .utils import NamespacedClient, query_params, _make_path + + +class DeprecationClient(NamespacedClient): + @query_params() + def info(self, index=None, params=None, headers=None): + """ + ``_ + + :arg index: Index pattern + """ + return self.transport.perform_request( + "GET", + _make_path(index, "_xpack", "migration", "deprecations"), + params=params, + headers=headers, + ) diff --git a/Python3.6-ClsToElasticSearch/src/elasticsearch/client/enrich.py b/Python3.6-ClsToElasticSearch/src/elasticsearch/client/enrich.py new file mode 100755 index 000000000..2237a3d08 --- /dev/null +++ b/Python3.6-ClsToElasticSearch/src/elasticsearch/client/enrich.py @@ -0,0 +1,85 @@ +from .utils import NamespacedClient, query_params, _make_path, SKIP_IN_PATH + + +class EnrichClient(NamespacedClient): + @query_params() + def delete_policy(self, name, params=None, headers=None): + """ + Deletes an existing enrich policy and its enrich index. + ``_ + + :arg name: The name of the enrich policy + """ + if name in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument 'name'.") + + return self.transport.perform_request( + "DELETE", + _make_path("_enrich", "policy", name), + params=params, + headers=headers, + ) + + @query_params("wait_for_completion") + def execute_policy(self, name, params=None, headers=None): + """ + Creates the enrich index for an existing enrich policy. + ``_ + + :arg name: The name of the enrich policy + :arg wait_for_completion: Should the request should block until + the execution is complete. Default: True + """ + if name in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument 'name'.") + + return self.transport.perform_request( + "PUT", + _make_path("_enrich", "policy", name, "_execute"), + params=params, + headers=headers, + ) + + @query_params() + def get_policy(self, name=None, params=None, headers=None): + """ + Gets information about an enrich policy. + ``_ + + :arg name: A comma-separated list of enrich policy names + """ + return self.transport.perform_request( + "GET", _make_path("_enrich", "policy", name), params=params, headers=headers + ) + + @query_params() + def put_policy(self, name, body, params=None, headers=None): + """ + Creates a new enrich policy. + ``_ + + :arg name: The name of the enrich policy + :arg body: The enrich policy to register + """ + for param in (name, body): + if param in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument.") + + return self.transport.perform_request( + "PUT", + _make_path("_enrich", "policy", name), + params=params, + headers=headers, + body=body, + ) + + @query_params() + def stats(self, params=None, headers=None): + """ + Gets enrich coordinator statistics and information about enrich policies that + are currently executing. + ``_ + """ + return self.transport.perform_request( + "GET", "/_enrich/_stats", params=params, headers=headers + ) diff --git a/Python3.6-ClsToElasticSearch/src/elasticsearch/client/eql.py b/Python3.6-ClsToElasticSearch/src/elasticsearch/client/eql.py new file mode 100755 index 000000000..4562bfe91 --- /dev/null +++ b/Python3.6-ClsToElasticSearch/src/elasticsearch/client/eql.py @@ -0,0 +1,25 @@ +from .utils import NamespacedClient, SKIP_IN_PATH, query_params, _make_path + + +class EqlClient(NamespacedClient): + @query_params() + def search(self, index, body, params=None, headers=None): + """ + Returns results matching a query expressed in Event Query Language (EQL) + ``_ + + :arg index: The name of the index to scope the operation + :arg body: Eql request body. Use the `query` to limit the query + scope. + """ + for param in (index, body): + if param in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument.") + + return self.transport.perform_request( + "POST", + _make_path(index, "_eql", "search"), + params=params, + headers=headers, + body=body, + ) diff --git a/Python3.6-ClsToElasticSearch/src/elasticsearch/client/graph.py b/Python3.6-ClsToElasticSearch/src/elasticsearch/client/graph.py new file mode 100755 index 000000000..d495ede60 --- /dev/null +++ b/Python3.6-ClsToElasticSearch/src/elasticsearch/client/graph.py @@ -0,0 +1,29 @@ +from .utils import NamespacedClient, query_params, _make_path, SKIP_IN_PATH + + +class GraphClient(NamespacedClient): + @query_params("routing", "timeout") + def explore(self, index, body=None, doc_type=None, params=None, headers=None): + """ + Explore extracted and summarized information about the documents and terms in + an index. + ``_ + + :arg index: A comma-separated list of index names to search; use + `_all` or empty string to perform the operation on all indices + :arg body: Graph Query DSL + :arg doc_type: A comma-separated list of document types to + search; leave empty to perform the operation on all types + :arg routing: Specific routing value + :arg timeout: Explicit operation timeout + """ + if index in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument 'index'.") + + return self.transport.perform_request( + "POST", + _make_path(index, doc_type, "_graph", "explore"), + params=params, + headers=headers, + body=body, + ) diff --git a/Python3.6-ClsToElasticSearch/src/elasticsearch/client/ilm.py b/Python3.6-ClsToElasticSearch/src/elasticsearch/client/ilm.py new file mode 100755 index 000000000..bc8b70528 --- /dev/null +++ b/Python3.6-ClsToElasticSearch/src/elasticsearch/client/ilm.py @@ -0,0 +1,158 @@ +from .utils import NamespacedClient, query_params, _make_path, SKIP_IN_PATH + + +class IlmClient(NamespacedClient): + @query_params() + def delete_lifecycle(self, policy, params=None, headers=None): + """ + Deletes the specified lifecycle policy definition. A currently used policy + cannot be deleted. + ``_ + + :arg policy: The name of the index lifecycle policy + """ + if policy in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument 'policy'.") + + return self.transport.perform_request( + "DELETE", + _make_path("_ilm", "policy", policy), + params=params, + headers=headers, + ) + + @query_params("only_errors", "only_managed") + def explain_lifecycle(self, index, params=None, headers=None): + """ + Retrieves information about the index's current lifecycle state, such as the + currently executing phase, action, and step. + ``_ + + :arg index: The name of the index to explain + :arg only_errors: filters the indices included in the response + to ones in an ILM error state, implies only_managed + :arg only_managed: filters the indices included in the response + to ones managed by ILM + """ + if index in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument 'index'.") + + return self.transport.perform_request( + "GET", _make_path(index, "_ilm", "explain"), params=params, headers=headers + ) + + @query_params() + def get_lifecycle(self, policy=None, params=None, headers=None): + """ + Returns the specified policy definition. Includes the policy version and last + modified date. + ``_ + + :arg policy: The name of the index lifecycle policy + """ + return self.transport.perform_request( + "GET", _make_path("_ilm", "policy", policy), params=params, headers=headers + ) + + @query_params() + def get_status(self, params=None, headers=None): + """ + Retrieves the current index lifecycle management (ILM) status. + ``_ + """ + return self.transport.perform_request( + "GET", "/_ilm/status", params=params, headers=headers + ) + + @query_params() + def move_to_step(self, index, body=None, params=None, headers=None): + """ + Manually moves an index into the specified step and executes that step. + ``_ + + :arg index: The name of the index whose lifecycle step is to + change + :arg body: The new lifecycle step to move to + """ + if index in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument 'index'.") + + return self.transport.perform_request( + "POST", + _make_path("_ilm", "move", index), + params=params, + headers=headers, + body=body, + ) + + @query_params() + def put_lifecycle(self, policy, body=None, params=None, headers=None): + """ + Creates a lifecycle policy + ``_ + + :arg policy: The name of the index lifecycle policy + :arg body: The lifecycle policy definition to register + """ + if policy in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument 'policy'.") + + return self.transport.perform_request( + "PUT", + _make_path("_ilm", "policy", policy), + params=params, + headers=headers, + body=body, + ) + + @query_params() + def remove_policy(self, index, params=None, headers=None): + """ + Removes the assigned lifecycle policy and stops managing the specified index + ``_ + + :arg index: The name of the index to remove policy on + """ + if index in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument 'index'.") + + return self.transport.perform_request( + "POST", _make_path(index, "_ilm", "remove"), params=params, headers=headers + ) + + @query_params() + def retry(self, index, params=None, headers=None): + """ + Retries executing the policy for an index that is in the ERROR step. + ``_ + + :arg index: The name of the indices (comma-separated) whose + failed lifecycle step is to be retry + """ + if index in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument 'index'.") + + return self.transport.perform_request( + "POST", _make_path(index, "_ilm", "retry"), params=params, headers=headers + ) + + @query_params() + def start(self, params=None, headers=None): + """ + Start the index lifecycle management (ILM) plugin. + ``_ + """ + return self.transport.perform_request( + "POST", "/_ilm/start", params=params, headers=headers + ) + + @query_params() + def stop(self, params=None, headers=None): + """ + Halts all lifecycle management operations and stops the index lifecycle + management (ILM) plugin + ``_ + """ + return self.transport.perform_request( + "POST", "/_ilm/stop", params=params, headers=headers + ) diff --git a/Python3.6-ClsToElasticSearch/src/elasticsearch/client/indices.py b/Python3.6-ClsToElasticSearch/src/elasticsearch/client/indices.py new file mode 100755 index 000000000..e5816cf43 --- /dev/null +++ b/Python3.6-ClsToElasticSearch/src/elasticsearch/client/indices.py @@ -0,0 +1,1316 @@ +from .utils import NamespacedClient, query_params, _make_path, SKIP_IN_PATH + + +class IndicesClient(NamespacedClient): + @query_params() + def analyze(self, body=None, index=None, params=None, headers=None): + """ + Performs the analysis process on a text and return the tokens breakdown of the + text. + ``_ + + :arg body: Define analyzer/tokenizer parameters and the text on + which the analysis should be performed + :arg index: The name of the index to scope the operation + """ + return self.transport.perform_request( + "POST", + _make_path(index, "_analyze"), + params=params, + headers=headers, + body=body, + ) + + @query_params("allow_no_indices", "expand_wildcards", "ignore_unavailable") + def refresh(self, index=None, params=None, headers=None): + """ + Performs the refresh operation in one or more indices. + ``_ + + :arg index: A comma-separated list of index names; use `_all` or + empty string to perform the operation on all indices + :arg allow_no_indices: Whether to ignore if a wildcard indices + expression resolves into no concrete indices. (This includes `_all` + string or when no indices have been specified) + :arg expand_wildcards: Whether to expand wildcard expression to + concrete indices that are open, closed or both. Valid choices: open, + closed, hidden, none, all Default: open + :arg ignore_unavailable: Whether specified concrete indices + should be ignored when unavailable (missing or closed) + """ + return self.transport.perform_request( + "POST", _make_path(index, "_refresh"), params=params, headers=headers + ) + + @query_params( + "allow_no_indices", + "expand_wildcards", + "force", + "ignore_unavailable", + "wait_if_ongoing", + ) + def flush(self, index=None, params=None, headers=None): + """ + Performs the flush operation on one or more indices. + ``_ + + :arg index: A comma-separated list of index names; use `_all` or + empty string for all indices + :arg allow_no_indices: Whether to ignore if a wildcard indices + expression resolves into no concrete indices. (This includes `_all` + string or when no indices have been specified) + :arg expand_wildcards: Whether to expand wildcard expression to + concrete indices that are open, closed or both. Valid choices: open, + closed, hidden, none, all Default: open + :arg force: Whether a flush should be forced even if it is not + necessarily needed ie. if no changes will be committed to the index. + This is useful if transaction log IDs should be incremented even if no + uncommitted changes are present. (This setting can be considered as + internal) + :arg ignore_unavailable: Whether specified concrete indices + should be ignored when unavailable (missing or closed) + :arg wait_if_ongoing: If set to true the flush operation will + block until the flush can be executed if another flush operation is + already executing. The default is true. If set to false the flush will + be skipped iff if another flush operation is already running. + """ + return self.transport.perform_request( + "POST", _make_path(index, "_flush"), params=params, headers=headers + ) + + @query_params( + "include_type_name", "master_timeout", "timeout", "wait_for_active_shards" + ) + def create(self, index, body=None, params=None, headers=None): + """ + Creates an index with optional settings and mappings. + ``_ + + :arg index: The name of the index + :arg body: The configuration for the index (`settings` and + `mappings`) + :arg include_type_name: Whether a type should be expected in the + body of the mappings. + :arg master_timeout: Specify timeout for connection to master + :arg timeout: Explicit operation timeout + :arg wait_for_active_shards: Set the number of active shards to + wait for before the operation returns. + """ + if index in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument 'index'.") + + return self.transport.perform_request( + "PUT", _make_path(index), params=params, headers=headers, body=body + ) + + @query_params("master_timeout", "timeout", "wait_for_active_shards") + def clone(self, index, target, body=None, params=None, headers=None): + """ + Clones an index + ``_ + + :arg index: The name of the source index to clone + :arg target: The name of the target index to clone into + :arg body: The configuration for the target index (`settings` + and `aliases`) + :arg master_timeout: Specify timeout for connection to master + :arg timeout: Explicit operation timeout + :arg wait_for_active_shards: Set the number of active shards to + wait for on the cloned index before the operation returns. + """ + for param in (index, target): + if param in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument.") + + return self.transport.perform_request( + "PUT", + _make_path(index, "_clone", target), + params=params, + headers=headers, + body=body, + ) + + @query_params( + "allow_no_indices", + "expand_wildcards", + "flat_settings", + "ignore_unavailable", + "include_defaults", + "include_type_name", + "local", + "master_timeout", + ) + def get(self, index, params=None, headers=None): + """ + Returns information about one or more indices. + ``_ + + :arg index: A comma-separated list of index names + :arg allow_no_indices: Ignore if a wildcard expression resolves + to no concrete indices (default: false) + :arg expand_wildcards: Whether wildcard expressions should get + expanded to open or closed indices (default: open) Valid choices: open, + closed, hidden, none, all Default: open + :arg flat_settings: Return settings in flat format (default: + false) + :arg ignore_unavailable: Ignore unavailable indexes (default: + false) + :arg include_defaults: Whether to return all default setting for + each of the indices. + :arg include_type_name: Whether to add the type name to the + response (default: false) + :arg local: Return local information, do not retrieve the state + from master node (default: false) + :arg master_timeout: Specify timeout for connection to master + """ + if index in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument 'index'.") + + return self.transport.perform_request( + "GET", _make_path(index), params=params, headers=headers + ) + + @query_params( + "allow_no_indices", + "expand_wildcards", + "ignore_unavailable", + "master_timeout", + "timeout", + "wait_for_active_shards", + ) + def open(self, index, params=None, headers=None): + """ + Opens an index. + ``_ + + :arg index: A comma separated list of indices to open + :arg allow_no_indices: Whether to ignore if a wildcard indices + expression resolves into no concrete indices. (This includes `_all` + string or when no indices have been specified) + :arg expand_wildcards: Whether to expand wildcard expression to + concrete indices that are open, closed or both. Valid choices: open, + closed, hidden, none, all Default: closed + :arg ignore_unavailable: Whether specified concrete indices + should be ignored when unavailable (missing or closed) + :arg master_timeout: Specify timeout for connection to master + :arg timeout: Explicit operation timeout + :arg wait_for_active_shards: Sets the number of active shards to + wait for before the operation returns. + """ + if index in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument 'index'.") + + return self.transport.perform_request( + "POST", _make_path(index, "_open"), params=params, headers=headers + ) + + @query_params( + "allow_no_indices", + "expand_wildcards", + "ignore_unavailable", + "master_timeout", + "timeout", + "wait_for_active_shards", + ) + def close(self, index, params=None, headers=None): + """ + Closes an index. + ``_ + + :arg index: A comma separated list of indices to close + :arg allow_no_indices: Whether to ignore if a wildcard indices + expression resolves into no concrete indices. (This includes `_all` + string or when no indices have been specified) + :arg expand_wildcards: Whether to expand wildcard expression to + concrete indices that are open, closed or both. Valid choices: open, + closed, hidden, none, all Default: open + :arg ignore_unavailable: Whether specified concrete indices + should be ignored when unavailable (missing or closed) + :arg master_timeout: Specify timeout for connection to master + :arg timeout: Explicit operation timeout + :arg wait_for_active_shards: Sets the number of active shards to + wait for before the operation returns. + """ + if index in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument 'index'.") + + return self.transport.perform_request( + "POST", _make_path(index, "_close"), params=params, headers=headers + ) + + @query_params( + "allow_no_indices", + "expand_wildcards", + "ignore_unavailable", + "master_timeout", + "timeout", + ) + def delete(self, index, params=None, headers=None): + """ + Deletes an index. + ``_ + + :arg index: A comma-separated list of indices to delete; use + `_all` or `*` string to delete all indices + :arg allow_no_indices: Ignore if a wildcard expression resolves + to no concrete indices (default: false) + :arg expand_wildcards: Whether wildcard expressions should get + expanded to open or closed indices (default: open) Valid choices: open, + closed, hidden, none, all Default: open + :arg ignore_unavailable: Ignore unavailable indexes (default: + false) + :arg master_timeout: Specify timeout for connection to master + :arg timeout: Explicit operation timeout + """ + if index in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument 'index'.") + + return self.transport.perform_request( + "DELETE", _make_path(index), params=params, headers=headers + ) + + @query_params( + "allow_no_indices", + "expand_wildcards", + "flat_settings", + "ignore_unavailable", + "include_defaults", + "local", + ) + def exists(self, index, params=None, headers=None): + """ + Returns information about whether a particular index exists. + ``_ + + :arg index: A comma-separated list of index names + :arg allow_no_indices: Ignore if a wildcard expression resolves + to no concrete indices (default: false) + :arg expand_wildcards: Whether wildcard expressions should get + expanded to open or closed indices (default: open) Valid choices: open, + closed, hidden, none, all Default: open + :arg flat_settings: Return settings in flat format (default: + false) + :arg ignore_unavailable: Ignore unavailable indexes (default: + false) + :arg include_defaults: Whether to return all default setting for + each of the indices. + :arg local: Return local information, do not retrieve the state + from master node (default: false) + """ + if index in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument 'index'.") + + return self.transport.perform_request( + "HEAD", _make_path(index), params=params, headers=headers + ) + + @query_params("allow_no_indices", "expand_wildcards", "ignore_unavailable", "local") + def exists_type(self, index, doc_type, params=None, headers=None): + """ + Returns information about whether a particular document type exists. + (DEPRECATED) + ``_ + + :arg index: A comma-separated list of index names; use `_all` to + check the types across all indices + :arg doc_type: A comma-separated list of document types to check + :arg allow_no_indices: Whether to ignore if a wildcard indices + expression resolves into no concrete indices. (This includes `_all` + string or when no indices have been specified) + :arg expand_wildcards: Whether to expand wildcard expression to + concrete indices that are open, closed or both. Valid choices: open, + closed, hidden, none, all Default: open + :arg ignore_unavailable: Whether specified concrete indices + should be ignored when unavailable (missing or closed) + :arg local: Return local information, do not retrieve the state + from master node (default: false) + """ + for param in (index, doc_type): + if param in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument.") + + return self.transport.perform_request( + "HEAD", + _make_path(index, "_mapping", doc_type), + params=params, + headers=headers, + ) + + @query_params( + "allow_no_indices", + "expand_wildcards", + "ignore_unavailable", + "include_type_name", + "master_timeout", + "timeout", + ) + def put_mapping(self, body, index=None, doc_type=None, params=None, headers=None): + """ + Updates the index mappings. + ``_ + + :arg body: The mapping definition + :arg index: A comma-separated list of index names the mapping + should be added to (supports wildcards); use `_all` or omit to add the + mapping on all indices. + :arg doc_type: The name of the document type + :arg allow_no_indices: Whether to ignore if a wildcard indices + expression resolves into no concrete indices. (This includes `_all` + string or when no indices have been specified) + :arg expand_wildcards: Whether to expand wildcard expression to + concrete indices that are open, closed or both. Valid choices: open, + closed, hidden, none, all Default: open + :arg ignore_unavailable: Whether specified concrete indices + should be ignored when unavailable (missing or closed) + :arg include_type_name: Whether a type should be expected in the + body of the mappings. + :arg master_timeout: Specify timeout for connection to master + :arg timeout: Explicit operation timeout + """ + if body in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument 'body'.") + + if doc_type not in SKIP_IN_PATH and index in SKIP_IN_PATH: + index = "_all" + + return self.transport.perform_request( + "PUT", + _make_path(index, doc_type, "_mapping"), + params=params, + headers=headers, + body=body, + ) + + @query_params( + "allow_no_indices", + "expand_wildcards", + "ignore_unavailable", + "include_type_name", + "local", + "master_timeout", + ) + def get_mapping(self, index=None, doc_type=None, params=None, headers=None): + """ + Returns mappings for one or more indices. + ``_ + + :arg index: A comma-separated list of index names + :arg doc_type: A comma-separated list of document types + :arg allow_no_indices: Whether to ignore if a wildcard indices + expression resolves into no concrete indices. (This includes `_all` + string or when no indices have been specified) + :arg expand_wildcards: Whether to expand wildcard expression to + concrete indices that are open, closed or both. Valid choices: open, + closed, hidden, none, all Default: open + :arg ignore_unavailable: Whether specified concrete indices + should be ignored when unavailable (missing or closed) + :arg include_type_name: Whether to add the type name to the + response (default: false) + :arg local: Return local information, do not retrieve the state + from master node (default: false) + :arg master_timeout: Specify timeout for connection to master + """ + return self.transport.perform_request( + "GET", + _make_path(index, "_mapping", doc_type), + params=params, + headers=headers, + ) + + @query_params( + "allow_no_indices", + "expand_wildcards", + "ignore_unavailable", + "include_defaults", + "include_type_name", + "local", + ) + def get_field_mapping( + self, fields, index=None, doc_type=None, params=None, headers=None + ): + """ + Returns mapping for one or more fields. + ``_ + + :arg fields: A comma-separated list of fields + :arg index: A comma-separated list of index names + :arg doc_type: A comma-separated list of document types + :arg allow_no_indices: Whether to ignore if a wildcard indices + expression resolves into no concrete indices. (This includes `_all` + string or when no indices have been specified) + :arg expand_wildcards: Whether to expand wildcard expression to + concrete indices that are open, closed or both. Valid choices: open, + closed, hidden, none, all Default: open + :arg ignore_unavailable: Whether specified concrete indices + should be ignored when unavailable (missing or closed) + :arg include_defaults: Whether the default mapping values should + be returned as well + :arg include_type_name: Whether a type should be returned in the + body of the mappings. + :arg local: Return local information, do not retrieve the state + from master node (default: false) + """ + if fields in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument 'fields'.") + + return self.transport.perform_request( + "GET", + _make_path(index, "_mapping", doc_type, "field", fields), + params=params, + headers=headers, + ) + + @query_params("master_timeout", "timeout") + def put_alias(self, index, name, body=None, params=None, headers=None): + """ + Creates or updates an alias. + ``_ + + :arg index: A comma-separated list of index names the alias + should point to (supports wildcards); use `_all` to perform the + operation on all indices. + :arg name: The name of the alias to be created or updated + :arg body: The settings for the alias, such as `routing` or + `filter` + :arg master_timeout: Specify timeout for connection to master + :arg timeout: Explicit timestamp for the document + """ + for param in (index, name): + if param in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument.") + + return self.transport.perform_request( + "PUT", + _make_path(index, "_alias", name), + params=params, + headers=headers, + body=body, + ) + + @query_params("allow_no_indices", "expand_wildcards", "ignore_unavailable", "local") + def exists_alias(self, name, index=None, params=None, headers=None): + """ + Returns information about whether a particular alias exists. + ``_ + + :arg name: A comma-separated list of alias names to return + :arg index: A comma-separated list of index names to filter + aliases + :arg allow_no_indices: Whether to ignore if a wildcard indices + expression resolves into no concrete indices. (This includes `_all` + string or when no indices have been specified) + :arg expand_wildcards: Whether to expand wildcard expression to + concrete indices that are open, closed or both. Valid choices: open, + closed, hidden, none, all Default: all + :arg ignore_unavailable: Whether specified concrete indices + should be ignored when unavailable (missing or closed) + :arg local: Return local information, do not retrieve the state + from master node (default: false) + """ + if name in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument 'name'.") + + return self.transport.perform_request( + "HEAD", _make_path(index, "_alias", name), params=params, headers=headers + ) + + @query_params("allow_no_indices", "expand_wildcards", "ignore_unavailable", "local") + def get_alias(self, index=None, name=None, params=None, headers=None): + """ + Returns an alias. + ``_ + + :arg index: A comma-separated list of index names to filter + aliases + :arg name: A comma-separated list of alias names to return + :arg allow_no_indices: Whether to ignore if a wildcard indices + expression resolves into no concrete indices. (This includes `_all` + string or when no indices have been specified) + :arg expand_wildcards: Whether to expand wildcard expression to + concrete indices that are open, closed or both. Valid choices: open, + closed, hidden, none, all Default: all + :arg ignore_unavailable: Whether specified concrete indices + should be ignored when unavailable (missing or closed) + :arg local: Return local information, do not retrieve the state + from master node (default: false) + """ + return self.transport.perform_request( + "GET", _make_path(index, "_alias", name), params=params, headers=headers + ) + + @query_params("master_timeout", "timeout") + def update_aliases(self, body, params=None, headers=None): + """ + Updates index aliases. + ``_ + + :arg body: The definition of `actions` to perform + :arg master_timeout: Specify timeout for connection to master + :arg timeout: Request timeout + """ + if body in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument 'body'.") + + return self.transport.perform_request( + "POST", "/_aliases", params=params, headers=headers, body=body + ) + + @query_params("master_timeout", "timeout") + def delete_alias(self, index, name, params=None, headers=None): + """ + Deletes an alias. + ``_ + + :arg index: A comma-separated list of index names (supports + wildcards); use `_all` for all indices + :arg name: A comma-separated list of aliases to delete (supports + wildcards); use `_all` to delete all aliases for the specified indices. + :arg master_timeout: Specify timeout for connection to master + :arg timeout: Explicit timestamp for the document + """ + for param in (index, name): + if param in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument.") + + return self.transport.perform_request( + "DELETE", _make_path(index, "_alias", name), params=params, headers=headers + ) + + @query_params("create", "include_type_name", "master_timeout", "order") + def put_template(self, name, body, params=None, headers=None): + """ + Creates or updates an index template. + ``_ + + :arg name: The name of the template + :arg body: The template definition + :arg create: Whether the index template should only be added if + new or can also replace an existing one + :arg include_type_name: Whether a type should be returned in the + body of the mappings. + :arg master_timeout: Specify timeout for connection to master + :arg order: The order for this template when merging multiple + matching ones (higher numbers are merged later, overriding the lower + numbers) + """ + for param in (name, body): + if param in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument.") + + return self.transport.perform_request( + "PUT", + _make_path("_template", name), + params=params, + headers=headers, + body=body, + ) + + @query_params("flat_settings", "local", "master_timeout") + def exists_template(self, name, params=None, headers=None): + """ + Returns information about whether a particular index template exists. + ``_ + + :arg name: The comma separated names of the index templates + :arg flat_settings: Return settings in flat format (default: + false) + :arg local: Return local information, do not retrieve the state + from master node (default: false) + :arg master_timeout: Explicit operation timeout for connection + to master node + """ + if name in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument 'name'.") + + return self.transport.perform_request( + "HEAD", _make_path("_template", name), params=params, headers=headers + ) + + @query_params("flat_settings", "include_type_name", "local", "master_timeout") + def get_template(self, name=None, params=None, headers=None): + """ + Returns an index template. + ``_ + + :arg name: The comma separated names of the index templates + :arg flat_settings: Return settings in flat format (default: + false) + :arg include_type_name: Whether a type should be returned in the + body of the mappings. + :arg local: Return local information, do not retrieve the state + from master node (default: false) + :arg master_timeout: Explicit operation timeout for connection + to master node + """ + return self.transport.perform_request( + "GET", _make_path("_template", name), params=params, headers=headers + ) + + @query_params("master_timeout", "timeout") + def delete_template(self, name, params=None, headers=None): + """ + Deletes an index template. + ``_ + + :arg name: The name of the template + :arg master_timeout: Specify timeout for connection to master + :arg timeout: Explicit operation timeout + """ + if name in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument 'name'.") + + return self.transport.perform_request( + "DELETE", _make_path("_template", name), params=params, headers=headers + ) + + @query_params( + "allow_no_indices", + "expand_wildcards", + "flat_settings", + "ignore_unavailable", + "include_defaults", + "local", + "master_timeout", + ) + def get_settings(self, index=None, name=None, params=None, headers=None): + """ + Returns settings for one or more indices. + ``_ + + :arg index: A comma-separated list of index names; use `_all` or + empty string to perform the operation on all indices + :arg name: The name of the settings that should be included + :arg allow_no_indices: Whether to ignore if a wildcard indices + expression resolves into no concrete indices. (This includes `_all` + string or when no indices have been specified) + :arg expand_wildcards: Whether to expand wildcard expression to + concrete indices that are open, closed or both. Valid choices: open, + closed, hidden, none, all Default: all + :arg flat_settings: Return settings in flat format (default: + false) + :arg ignore_unavailable: Whether specified concrete indices + should be ignored when unavailable (missing or closed) + :arg include_defaults: Whether to return all default setting for + each of the indices. + :arg local: Return local information, do not retrieve the state + from master node (default: false) + :arg master_timeout: Specify timeout for connection to master + """ + return self.transport.perform_request( + "GET", _make_path(index, "_settings", name), params=params, headers=headers + ) + + @query_params( + "allow_no_indices", + "expand_wildcards", + "flat_settings", + "ignore_unavailable", + "master_timeout", + "preserve_existing", + "timeout", + ) + def put_settings(self, body, index=None, params=None, headers=None): + """ + Updates the index settings. + ``_ + + :arg body: The index settings to be updated + :arg index: A comma-separated list of index names; use `_all` or + empty string to perform the operation on all indices + :arg allow_no_indices: Whether to ignore if a wildcard indices + expression resolves into no concrete indices. (This includes `_all` + string or when no indices have been specified) + :arg expand_wildcards: Whether to expand wildcard expression to + concrete indices that are open, closed or both. Valid choices: open, + closed, hidden, none, all Default: open + :arg flat_settings: Return settings in flat format (default: + false) + :arg ignore_unavailable: Whether specified concrete indices + should be ignored when unavailable (missing or closed) + :arg master_timeout: Specify timeout for connection to master + :arg preserve_existing: Whether to update existing settings. If + set to `true` existing settings on an index remain unchanged, the + default is `false` + :arg timeout: Explicit operation timeout + """ + if body in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument 'body'.") + + return self.transport.perform_request( + "PUT", + _make_path(index, "_settings"), + params=params, + headers=headers, + body=body, + ) + + @query_params( + "completion_fields", + "expand_wildcards", + "fielddata_fields", + "fields", + "forbid_closed_indices", + "groups", + "include_segment_file_sizes", + "include_unloaded_segments", + "level", + "types", + ) + def stats(self, index=None, metric=None, params=None, headers=None): + """ + Provides statistics on operations happening in an index. + ``_ + + :arg index: A comma-separated list of index names; use `_all` or + empty string to perform the operation on all indices + :arg metric: Limit the information returned the specific + metrics. Valid choices: _all, completion, docs, fielddata, query_cache, + flush, get, indexing, merge, request_cache, refresh, search, segments, + store, warmer, suggest + :arg completion_fields: A comma-separated list of fields for + `fielddata` and `suggest` index metric (supports wildcards) + :arg expand_wildcards: Whether to expand wildcard expression to + concrete indices that are open, closed or both. Valid choices: open, + closed, hidden, none, all Default: open + :arg fielddata_fields: A comma-separated list of fields for + `fielddata` index metric (supports wildcards) + :arg fields: A comma-separated list of fields for `fielddata` + and `completion` index metric (supports wildcards) + :arg forbid_closed_indices: If set to false stats will also + collected from closed indices if explicitly specified or if + expand_wildcards expands to closed indices Default: True + :arg groups: A comma-separated list of search groups for + `search` index metric + :arg include_segment_file_sizes: Whether to report the + aggregated disk usage of each one of the Lucene index files (only + applies if segment stats are requested) + :arg include_unloaded_segments: If set to true segment stats + will include stats for segments that are not currently loaded into + memory + :arg level: Return stats aggregated at cluster, index or shard + level Valid choices: cluster, indices, shards Default: indices + :arg types: A comma-separated list of document types for the + `indexing` index metric + """ + return self.transport.perform_request( + "GET", _make_path(index, "_stats", metric), params=params, headers=headers + ) + + @query_params( + "allow_no_indices", "expand_wildcards", "ignore_unavailable", "verbose" + ) + def segments(self, index=None, params=None, headers=None): + """ + Provides low-level information about segments in a Lucene index. + ``_ + + :arg index: A comma-separated list of index names; use `_all` or + empty string to perform the operation on all indices + :arg allow_no_indices: Whether to ignore if a wildcard indices + expression resolves into no concrete indices. (This includes `_all` + string or when no indices have been specified) + :arg expand_wildcards: Whether to expand wildcard expression to + concrete indices that are open, closed or both. Valid choices: open, + closed, hidden, none, all Default: open + :arg ignore_unavailable: Whether specified concrete indices + should be ignored when unavailable (missing or closed) + :arg verbose: Includes detailed memory usage by Lucene. + """ + return self.transport.perform_request( + "GET", _make_path(index, "_segments"), params=params, headers=headers + ) + + @query_params( + "all_shards", + "allow_no_indices", + "analyze_wildcard", + "analyzer", + "default_operator", + "df", + "expand_wildcards", + "explain", + "ignore_unavailable", + "lenient", + "q", + "rewrite", + ) + def validate_query( + self, body=None, index=None, doc_type=None, params=None, headers=None + ): + """ + Allows a user to validate a potentially expensive query without executing it. + ``_ + + :arg body: The query definition specified with the Query DSL + :arg index: A comma-separated list of index names to restrict + the operation; use `_all` or empty string to perform the operation on + all indices + :arg doc_type: A comma-separated list of document types to + restrict the operation; leave empty to perform the operation on all + types + :arg all_shards: Execute validation on all shards instead of one + random shard per index + :arg allow_no_indices: Whether to ignore if a wildcard indices + expression resolves into no concrete indices. (This includes `_all` + string or when no indices have been specified) + :arg analyze_wildcard: Specify whether wildcard and prefix + queries should be analyzed (default: false) + :arg analyzer: The analyzer to use for the query string + :arg default_operator: The default operator for query string + query (AND or OR) Valid choices: AND, OR Default: OR + :arg df: The field to use as default where no field prefix is + given in the query string + :arg expand_wildcards: Whether to expand wildcard expression to + concrete indices that are open, closed or both. Valid choices: open, + closed, hidden, none, all Default: open + :arg explain: Return detailed information about the error + :arg ignore_unavailable: Whether specified concrete indices + should be ignored when unavailable (missing or closed) + :arg lenient: Specify whether format-based query failures (such + as providing text to a numeric field) should be ignored + :arg q: Query in the Lucene query string syntax + :arg rewrite: Provide a more detailed explanation showing the + actual Lucene query that will be executed. + """ + return self.transport.perform_request( + "POST", + _make_path(index, doc_type, "_validate", "query"), + params=params, + headers=headers, + body=body, + ) + + @query_params( + "allow_no_indices", + "expand_wildcards", + "fielddata", + "fields", + "ignore_unavailable", + "query", + "request", + ) + def clear_cache(self, index=None, params=None, headers=None): + """ + Clears all or specific caches for one or more indices. + ``_ + + :arg index: A comma-separated list of index name to limit the + operation + :arg allow_no_indices: Whether to ignore if a wildcard indices + expression resolves into no concrete indices. (This includes `_all` + string or when no indices have been specified) + :arg expand_wildcards: Whether to expand wildcard expression to + concrete indices that are open, closed or both. Valid choices: open, + closed, hidden, none, all Default: open + :arg fielddata: Clear field data + :arg fields: A comma-separated list of fields to clear when + using the `fielddata` parameter (default: all) + :arg ignore_unavailable: Whether specified concrete indices + should be ignored when unavailable (missing or closed) + :arg query: Clear query caches + :arg request: Clear request cache + """ + return self.transport.perform_request( + "POST", _make_path(index, "_cache", "clear"), params=params, headers=headers + ) + + @query_params("active_only", "detailed") + def recovery(self, index=None, params=None, headers=None): + """ + Returns information about ongoing index shard recoveries. + ``_ + + :arg index: A comma-separated list of index names; use `_all` or + empty string to perform the operation on all indices + :arg active_only: Display only those recoveries that are + currently on-going + :arg detailed: Whether to display detailed information about + shard recovery + """ + return self.transport.perform_request( + "GET", _make_path(index, "_recovery"), params=params, headers=headers + ) + + @query_params( + "allow_no_indices", + "expand_wildcards", + "ignore_unavailable", + "only_ancient_segments", + "wait_for_completion", + ) + def upgrade(self, index=None, params=None, headers=None): + """ + The _upgrade API is no longer useful and will be removed. + ``_ + + :arg index: A comma-separated list of index names; use `_all` or + empty string to perform the operation on all indices + :arg allow_no_indices: Whether to ignore if a wildcard indices + expression resolves into no concrete indices. (This includes `_all` + string or when no indices have been specified) + :arg expand_wildcards: Whether to expand wildcard expression to + concrete indices that are open, closed or both. Valid choices: open, + closed, hidden, none, all Default: open + :arg ignore_unavailable: Whether specified concrete indices + should be ignored when unavailable (missing or closed) + :arg only_ancient_segments: If true, only ancient (an older + Lucene major release) segments will be upgraded + :arg wait_for_completion: Specify whether the request should + block until the all segments are upgraded (default: false) + """ + return self.transport.perform_request( + "POST", _make_path(index, "_upgrade"), params=params, headers=headers + ) + + @query_params("allow_no_indices", "expand_wildcards", "ignore_unavailable") + def get_upgrade(self, index=None, params=None, headers=None): + """ + The _upgrade API is no longer useful and will be removed. + ``_ + + :arg index: A comma-separated list of index names; use `_all` or + empty string to perform the operation on all indices + :arg allow_no_indices: Whether to ignore if a wildcard indices + expression resolves into no concrete indices. (This includes `_all` + string or when no indices have been specified) + :arg expand_wildcards: Whether to expand wildcard expression to + concrete indices that are open, closed or both. Valid choices: open, + closed, hidden, none, all Default: open + :arg ignore_unavailable: Whether specified concrete indices + should be ignored when unavailable (missing or closed) + """ + return self.transport.perform_request( + "GET", _make_path(index, "_upgrade"), params=params, headers=headers + ) + + @query_params("allow_no_indices", "expand_wildcards", "ignore_unavailable") + def flush_synced(self, index=None, params=None, headers=None): + """ + Performs a synced flush operation on one or more indices. Synced flush is + deprecated and will be removed in 8.0. Use flush instead + ``_ + + :arg index: A comma-separated list of index names; use `_all` or + empty string for all indices + :arg allow_no_indices: Whether to ignore if a wildcard indices + expression resolves into no concrete indices. (This includes `_all` + string or when no indices have been specified) + :arg expand_wildcards: Whether to expand wildcard expression to + concrete indices that are open, closed or both. Valid choices: open, + closed, none, all Default: open + :arg ignore_unavailable: Whether specified concrete indices + should be ignored when unavailable (missing or closed) + """ + return self.transport.perform_request( + "POST", + _make_path(index, "_flush", "synced"), + params=params, + headers=headers, + ) + + @query_params( + "allow_no_indices", "expand_wildcards", "ignore_unavailable", "status" + ) + def shard_stores(self, index=None, params=None, headers=None): + """ + Provides store information for shard copies of indices. + ``_ + + :arg index: A comma-separated list of index names; use `_all` or + empty string to perform the operation on all indices + :arg allow_no_indices: Whether to ignore if a wildcard indices + expression resolves into no concrete indices. (This includes `_all` + string or when no indices have been specified) + :arg expand_wildcards: Whether to expand wildcard expression to + concrete indices that are open, closed or both. Valid choices: open, + closed, hidden, none, all Default: open + :arg ignore_unavailable: Whether specified concrete indices + should be ignored when unavailable (missing or closed) + :arg status: A comma-separated list of statuses used to filter + on shards to get store information for Valid choices: green, yellow, + red, all + """ + return self.transport.perform_request( + "GET", _make_path(index, "_shard_stores"), params=params, headers=headers + ) + + @query_params( + "allow_no_indices", + "expand_wildcards", + "flush", + "ignore_unavailable", + "max_num_segments", + "only_expunge_deletes", + ) + def forcemerge(self, index=None, params=None, headers=None): + """ + Performs the force merge operation on one or more indices. + ``_ + + :arg index: A comma-separated list of index names; use `_all` or + empty string to perform the operation on all indices + :arg allow_no_indices: Whether to ignore if a wildcard indices + expression resolves into no concrete indices. (This includes `_all` + string or when no indices have been specified) + :arg expand_wildcards: Whether to expand wildcard expression to + concrete indices that are open, closed or both. Valid choices: open, + closed, hidden, none, all Default: open + :arg flush: Specify whether the index should be flushed after + performing the operation (default: true) + :arg ignore_unavailable: Whether specified concrete indices + should be ignored when unavailable (missing or closed) + :arg max_num_segments: The number of segments the index should + be merged into (default: dynamic) + :arg only_expunge_deletes: Specify whether the operation should + only expunge deleted documents + """ + return self.transport.perform_request( + "POST", _make_path(index, "_forcemerge"), params=params, headers=headers + ) + + @query_params( + "copy_settings", "master_timeout", "timeout", "wait_for_active_shards" + ) + def shrink(self, index, target, body=None, params=None, headers=None): + """ + Allow to shrink an existing index into a new index with fewer primary shards. + ``_ + + :arg index: The name of the source index to shrink + :arg target: The name of the target index to shrink into + :arg body: The configuration for the target index (`settings` + and `aliases`) + :arg copy_settings: whether or not to copy settings from the + source index (defaults to false) + :arg master_timeout: Specify timeout for connection to master + :arg timeout: Explicit operation timeout + :arg wait_for_active_shards: Set the number of active shards to + wait for on the shrunken index before the operation returns. + """ + for param in (index, target): + if param in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument.") + + return self.transport.perform_request( + "PUT", + _make_path(index, "_shrink", target), + params=params, + headers=headers, + body=body, + ) + + @query_params( + "copy_settings", "master_timeout", "timeout", "wait_for_active_shards" + ) + def split(self, index, target, body=None, params=None, headers=None): + """ + Allows you to split an existing index into a new index with more primary + shards. + ``_ + + :arg index: The name of the source index to split + :arg target: The name of the target index to split into + :arg body: The configuration for the target index (`settings` + and `aliases`) + :arg copy_settings: whether or not to copy settings from the + source index (defaults to false) + :arg master_timeout: Specify timeout for connection to master + :arg timeout: Explicit operation timeout + :arg wait_for_active_shards: Set the number of active shards to + wait for on the shrunken index before the operation returns. + """ + for param in (index, target): + if param in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument.") + + return self.transport.perform_request( + "PUT", + _make_path(index, "_split", target), + params=params, + headers=headers, + body=body, + ) + + @query_params( + "dry_run", + "include_type_name", + "master_timeout", + "timeout", + "wait_for_active_shards", + ) + def rollover(self, alias, body=None, new_index=None, params=None, headers=None): + """ + Updates an alias to point to a new index when the existing index is considered + to be too large or too old. + ``_ + + :arg alias: The name of the alias to rollover + :arg body: The conditions that needs to be met for executing + rollover + :arg new_index: The name of the rollover index + :arg dry_run: If set to true the rollover action will only be + validated but not actually performed even if a condition matches. The + default is false + :arg include_type_name: Whether a type should be included in the + body of the mappings. + :arg master_timeout: Specify timeout for connection to master + :arg timeout: Explicit operation timeout + :arg wait_for_active_shards: Set the number of active shards to + wait for on the newly created rollover index before the operation + returns. + """ + if alias in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument 'alias'.") + + return self.transport.perform_request( + "POST", + _make_path(alias, "_rollover", new_index), + params=params, + headers=headers, + body=body, + ) + + @query_params( + "allow_no_indices", + "expand_wildcards", + "ignore_unavailable", + "master_timeout", + "timeout", + "wait_for_active_shards", + ) + def freeze(self, index, params=None, headers=None): + """ + Freezes an index. A frozen index has almost no overhead on the cluster (except + for maintaining its metadata in memory) and is read-only. + ``_ + + :arg index: The name of the index to freeze + :arg allow_no_indices: Whether to ignore if a wildcard indices + expression resolves into no concrete indices. (This includes `_all` + string or when no indices have been specified) + :arg expand_wildcards: Whether to expand wildcard expression to + concrete indices that are open, closed or both. Valid choices: open, + closed, hidden, none, all Default: closed + :arg ignore_unavailable: Whether specified concrete indices + should be ignored when unavailable (missing or closed) + :arg master_timeout: Specify timeout for connection to master + :arg timeout: Explicit operation timeout + :arg wait_for_active_shards: Sets the number of active shards to + wait for before the operation returns. + """ + if index in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument 'index'.") + + return self.transport.perform_request( + "POST", _make_path(index, "_freeze"), params=params, headers=headers + ) + + @query_params( + "allow_no_indices", + "expand_wildcards", + "ignore_unavailable", + "master_timeout", + "timeout", + "wait_for_active_shards", + ) + def unfreeze(self, index, params=None, headers=None): + """ + Unfreezes an index. When a frozen index is unfrozen, the index goes through the + normal recovery process and becomes writeable again. + ``_ + + :arg index: The name of the index to unfreeze + :arg allow_no_indices: Whether to ignore if a wildcard indices + expression resolves into no concrete indices. (This includes `_all` + string or when no indices have been specified) + :arg expand_wildcards: Whether to expand wildcard expression to + concrete indices that are open, closed or both. Valid choices: open, + closed, hidden, none, all Default: closed + :arg ignore_unavailable: Whether specified concrete indices + should be ignored when unavailable (missing or closed) + :arg master_timeout: Specify timeout for connection to master + :arg timeout: Explicit operation timeout + :arg wait_for_active_shards: Sets the number of active shards to + wait for before the operation returns. + """ + if index in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument 'index'.") + + return self.transport.perform_request( + "POST", _make_path(index, "_unfreeze"), params=params, headers=headers + ) + + @query_params("allow_no_indices", "expand_wildcards", "ignore_unavailable") + def reload_search_analyzers(self, index, params=None, headers=None): + """ + Reloads an index's search analyzers and their resources. + ``_ + + :arg index: A comma-separated list of index names to reload + analyzers for + :arg allow_no_indices: Whether to ignore if a wildcard indices + expression resolves into no concrete indices. (This includes `_all` + string or when no indices have been specified) + :arg expand_wildcards: Whether to expand wildcard expression to + concrete indices that are open, closed or both. Valid choices: open, + closed, hidden, none, all Default: open + :arg ignore_unavailable: Whether specified concrete indices + should be ignored when unavailable (missing or closed) + """ + if index in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument 'index'.") + + return self.transport.perform_request( + "GET", + _make_path(index, "_reload_search_analyzers"), + params=params, + headers=headers, + ) + + @query_params() + def create_data_stream(self, name, body, params=None, headers=None): + """ + Creates or updates a data stream + ``_ + + :arg name: The name of the data stream + :arg body: The data stream definition + """ + for param in (name, body): + if param in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument.") + + return self.transport.perform_request( + "PUT", + _make_path("_data_stream", name), + params=params, + headers=headers, + body=body, + ) + + @query_params() + def delete_data_stream(self, name, params=None, headers=None): + """ + Deletes a data stream. + ``_ + + :arg name: The name of the data stream + """ + if name in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument 'name'.") + + return self.transport.perform_request( + "DELETE", _make_path("_data_stream", name), params=params, headers=headers + ) + + @query_params() + def get_data_streams(self, name=None, params=None, headers=None): + """ + Returns data streams. + ``_ + + :arg name: The comma separated names of data streams + """ + return self.transport.perform_request( + "GET", _make_path("_data_streams", name), params=params, headers=headers + ) diff --git a/Python3.6-ClsToElasticSearch/src/elasticsearch/client/ingest.py b/Python3.6-ClsToElasticSearch/src/elasticsearch/client/ingest.py new file mode 100755 index 000000000..40fd7a209 --- /dev/null +++ b/Python3.6-ClsToElasticSearch/src/elasticsearch/client/ingest.py @@ -0,0 +1,95 @@ +from .utils import NamespacedClient, query_params, _make_path, SKIP_IN_PATH + + +class IngestClient(NamespacedClient): + @query_params("master_timeout") + def get_pipeline(self, id=None, params=None, headers=None): + """ + Returns a pipeline. + ``_ + + :arg id: Comma separated list of pipeline ids. Wildcards + supported + :arg master_timeout: Explicit operation timeout for connection + to master node + """ + return self.transport.perform_request( + "GET", _make_path("_ingest", "pipeline", id), params=params, headers=headers + ) + + @query_params("master_timeout", "timeout") + def put_pipeline(self, id, body, params=None, headers=None): + """ + Creates or updates a pipeline. + ``_ + + :arg id: Pipeline ID + :arg body: The ingest definition + :arg master_timeout: Explicit operation timeout for connection + to master node + :arg timeout: Explicit operation timeout + """ + for param in (id, body): + if param in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument.") + + return self.transport.perform_request( + "PUT", + _make_path("_ingest", "pipeline", id), + params=params, + headers=headers, + body=body, + ) + + @query_params("master_timeout", "timeout") + def delete_pipeline(self, id, params=None, headers=None): + """ + Deletes a pipeline. + ``_ + + :arg id: Pipeline ID + :arg master_timeout: Explicit operation timeout for connection + to master node + :arg timeout: Explicit operation timeout + """ + if id in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument 'id'.") + + return self.transport.perform_request( + "DELETE", + _make_path("_ingest", "pipeline", id), + params=params, + headers=headers, + ) + + @query_params("verbose") + def simulate(self, body, id=None, params=None, headers=None): + """ + Allows to simulate a pipeline with example documents. + ``_ + + :arg body: The simulate definition + :arg id: Pipeline ID + :arg verbose: Verbose mode. Display data output for each + processor in executed pipeline + """ + if body in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument 'body'.") + + return self.transport.perform_request( + "POST", + _make_path("_ingest", "pipeline", id, "_simulate"), + params=params, + headers=headers, + body=body, + ) + + @query_params() + def processor_grok(self, params=None, headers=None): + """ + Returns a list of the built-in patterns. + ``_ + """ + return self.transport.perform_request( + "GET", "/_ingest/processor/grok", params=params, headers=headers + ) diff --git a/Python3.6-ClsToElasticSearch/src/elasticsearch/client/license.py b/Python3.6-ClsToElasticSearch/src/elasticsearch/client/license.py new file mode 100755 index 000000000..1564c5337 --- /dev/null +++ b/Python3.6-ClsToElasticSearch/src/elasticsearch/client/license.py @@ -0,0 +1,94 @@ +from .utils import NamespacedClient, query_params + + +class LicenseClient(NamespacedClient): + @query_params() + def delete(self, params=None, headers=None): + """ + Deletes licensing information for the cluster + ``_ + """ + return self.transport.perform_request( + "DELETE", "/_license", params=params, headers=headers + ) + + @query_params("accept_enterprise", "local") + def get(self, params=None, headers=None): + """ + Retrieves licensing information for the cluster + ``_ + + :arg accept_enterprise: If the active license is an enterprise + license, return type as 'enterprise' (default: false) + :arg local: Return local information, do not retrieve the state + from master node (default: false) + """ + return self.transport.perform_request( + "GET", "/_license", params=params, headers=headers + ) + + @query_params() + def get_basic_status(self, params=None, headers=None): + """ + Retrieves information about the status of the basic license. + ``_ + """ + return self.transport.perform_request( + "GET", "/_license/basic_status", params=params, headers=headers + ) + + @query_params() + def get_trial_status(self, params=None, headers=None): + """ + Retrieves information about the status of the trial license. + ``_ + """ + return self.transport.perform_request( + "GET", "/_license/trial_status", params=params, headers=headers + ) + + @query_params("acknowledge") + def post(self, body=None, params=None, headers=None): + """ + Updates the license for the cluster. + ``_ + + :arg body: licenses to be installed + :arg acknowledge: whether the user has acknowledged acknowledge + messages (default: false) + """ + return self.transport.perform_request( + "PUT", "/_license", params=params, headers=headers, body=body + ) + + @query_params("acknowledge") + def post_start_basic(self, params=None, headers=None): + """ + Starts an indefinite basic license. + ``_ + + :arg acknowledge: whether the user has acknowledged acknowledge + messages (default: false) + """ + return self.transport.perform_request( + "POST", "/_license/start_basic", params=params, headers=headers + ) + + @query_params("acknowledge", "doc_type") + def post_start_trial(self, params=None, headers=None): + """ + starts a limited time trial license. + ``_ + + :arg acknowledge: whether the user has acknowledged acknowledge + messages (default: false) + :arg doc_type: The type of trial license to generate (default: + "trial") + """ + # type is a reserved word so it cannot be used, use doc_type instead + if "doc_type" in params: + params["type"] = params.pop("doc_type") + + return self.transport.perform_request( + "POST", "/_license/start_trial", params=params, headers=headers + ) diff --git a/Python3.6-ClsToElasticSearch/src/elasticsearch/client/migration.py b/Python3.6-ClsToElasticSearch/src/elasticsearch/client/migration.py new file mode 100755 index 000000000..ebe9a97aa --- /dev/null +++ b/Python3.6-ClsToElasticSearch/src/elasticsearch/client/migration.py @@ -0,0 +1,20 @@ +from .utils import NamespacedClient, query_params, _make_path + + +class MigrationClient(NamespacedClient): + @query_params() + def deprecations(self, index=None, params=None, headers=None): + """ + Retrieves information about different cluster, node, and index level settings + that use deprecated features that will be removed or changed in the next major + version. + ``_ + + :arg index: Index pattern + """ + return self.transport.perform_request( + "GET", + _make_path(index, "_migration", "deprecations"), + params=params, + headers=headers, + ) diff --git a/Python3.6-ClsToElasticSearch/src/elasticsearch/client/ml.py b/Python3.6-ClsToElasticSearch/src/elasticsearch/client/ml.py new file mode 100755 index 000000000..d62546642 --- /dev/null +++ b/Python3.6-ClsToElasticSearch/src/elasticsearch/client/ml.py @@ -0,0 +1,1478 @@ +from .utils import NamespacedClient, query_params, _make_path, SKIP_IN_PATH, _bulk_body + + +class MlClient(NamespacedClient): + @query_params("allow_no_jobs", "force", "timeout") + def close_job(self, job_id, body=None, params=None, headers=None): + """ + Closes one or more anomaly detection jobs. A job can be opened and closed + multiple times throughout its lifecycle. + ``_ + + :arg job_id: The name of the job to close + :arg body: The URL params optionally sent in the body + :arg allow_no_jobs: Whether to ignore if a wildcard expression + matches no jobs. (This includes `_all` string or when no jobs have been + specified) + :arg force: True if the job should be forcefully closed + :arg timeout: Controls the time to wait until a job has closed. + Default to 30 minutes + """ + if job_id in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument 'job_id'.") + + return self.transport.perform_request( + "POST", + _make_path("_ml", "anomaly_detectors", job_id, "_close"), + params=params, + headers=headers, + body=body, + ) + + @query_params() + def delete_calendar(self, calendar_id, params=None, headers=None): + """ + Deletes a calendar. + ``_ + + :arg calendar_id: The ID of the calendar to delete + """ + if calendar_id in SKIP_IN_PATH: + raise ValueError( + "Empty value passed for a required argument 'calendar_id'." + ) + + return self.transport.perform_request( + "DELETE", + _make_path("_ml", "calendars", calendar_id), + params=params, + headers=headers, + ) + + @query_params() + def delete_calendar_event(self, calendar_id, event_id, params=None, headers=None): + """ + Deletes scheduled events from a calendar. + ``_ + + :arg calendar_id: The ID of the calendar to modify + :arg event_id: The ID of the event to remove from the calendar + """ + for param in (calendar_id, event_id): + if param in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument.") + + return self.transport.perform_request( + "DELETE", + _make_path("_ml", "calendars", calendar_id, "events", event_id), + params=params, + headers=headers, + ) + + @query_params() + def delete_calendar_job(self, calendar_id, job_id, params=None, headers=None): + """ + Deletes anomaly detection jobs from a calendar. + ``_ + + :arg calendar_id: The ID of the calendar to modify + :arg job_id: The ID of the job to remove from the calendar + """ + for param in (calendar_id, job_id): + if param in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument.") + + return self.transport.perform_request( + "DELETE", + _make_path("_ml", "calendars", calendar_id, "jobs", job_id), + params=params, + headers=headers, + ) + + @query_params("force") + def delete_datafeed(self, datafeed_id, params=None, headers=None): + """ + Deletes an existing datafeed. + ``_ + + :arg datafeed_id: The ID of the datafeed to delete + :arg force: True if the datafeed should be forcefully deleted + """ + if datafeed_id in SKIP_IN_PATH: + raise ValueError( + "Empty value passed for a required argument 'datafeed_id'." + ) + + return self.transport.perform_request( + "DELETE", + _make_path("_ml", "datafeeds", datafeed_id), + params=params, + headers=headers, + ) + + @query_params() + def delete_expired_data(self, params=None, headers=None): + """ + Deletes expired and unused machine learning data. + ``_ + """ + return self.transport.perform_request( + "DELETE", "/_ml/_delete_expired_data", params=params, headers=headers + ) + + @query_params() + def delete_filter(self, filter_id, params=None, headers=None): + """ + Deletes a filter. + ``_ + + :arg filter_id: The ID of the filter to delete + """ + if filter_id in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument 'filter_id'.") + + return self.transport.perform_request( + "DELETE", + _make_path("_ml", "filters", filter_id), + params=params, + headers=headers, + ) + + @query_params("allow_no_forecasts", "timeout") + def delete_forecast(self, job_id, forecast_id=None, params=None, headers=None): + """ + Deletes forecasts from a machine learning job. + ``_ + + :arg job_id: The ID of the job from which to delete forecasts + :arg forecast_id: The ID of the forecast to delete, can be comma + delimited list. Leaving blank implies `_all` + :arg allow_no_forecasts: Whether to ignore if `_all` matches no + forecasts + :arg timeout: Controls the time to wait until the forecast(s) + are deleted. Default to 30 seconds + """ + if job_id in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument 'job_id'.") + + return self.transport.perform_request( + "DELETE", + _make_path("_ml", "anomaly_detectors", job_id, "_forecast", forecast_id), + params=params, + headers=headers, + ) + + @query_params("force", "wait_for_completion") + def delete_job(self, job_id, params=None, headers=None): + """ + Deletes an existing anomaly detection job. + ``_ + + :arg job_id: The ID of the job to delete + :arg force: True if the job should be forcefully deleted + :arg wait_for_completion: Should this request wait until the + operation has completed before returning Default: True + """ + if job_id in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument 'job_id'.") + + return self.transport.perform_request( + "DELETE", + _make_path("_ml", "anomaly_detectors", job_id), + params=params, + headers=headers, + ) + + @query_params() + def delete_model_snapshot(self, job_id, snapshot_id, params=None, headers=None): + """ + Deletes an existing model snapshot. + ``_ + + :arg job_id: The ID of the job to fetch + :arg snapshot_id: The ID of the snapshot to delete + """ + for param in (job_id, snapshot_id): + if param in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument.") + + return self.transport.perform_request( + "DELETE", + _make_path( + "_ml", "anomaly_detectors", job_id, "model_snapshots", snapshot_id + ), + params=params, + headers=headers, + ) + + @query_params( + "charset", + "column_names", + "delimiter", + "explain", + "format", + "grok_pattern", + "has_header_row", + "line_merge_size_limit", + "lines_to_sample", + "quote", + "should_trim_fields", + "timeout", + "timestamp_field", + "timestamp_format", + ) + def find_file_structure(self, body, params=None, headers=None): + """ + Finds the structure of a text file. The text file must contain data that is + suitable to be ingested into Elasticsearch. + ``_ + + :arg body: The contents of the file to be analyzed + :arg charset: Optional parameter to specify the character set of + the file + :arg column_names: Optional parameter containing a comma + separated list of the column names for a delimited file + :arg delimiter: Optional parameter to specify the delimiter + character for a delimited file - must be a single character + :arg explain: Whether to include a commentary on how the + structure was derived + :arg format: Optional parameter to specify the high level file + format Valid choices: ndjson, xml, delimited, semi_structured_text + :arg grok_pattern: Optional parameter to specify the Grok + pattern that should be used to extract fields from messages in a semi- + structured text file + :arg has_header_row: Optional parameter to specify whether a + delimited file includes the column names in its first row + :arg line_merge_size_limit: Maximum number of characters + permitted in a single message when lines are merged to create messages. + Default: 10000 + :arg lines_to_sample: How many lines of the file should be + included in the analysis Default: 1000 + :arg quote: Optional parameter to specify the quote character + for a delimited file - must be a single character + :arg should_trim_fields: Optional parameter to specify whether + the values between delimiters in a delimited file should have whitespace + trimmed from them + :arg timeout: Timeout after which the analysis will be aborted + Default: 25s + :arg timestamp_field: Optional parameter to specify the + timestamp field in the file + :arg timestamp_format: Optional parameter to specify the + timestamp format in the file - may be either a Joda or Java time format + """ + if body in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument 'body'.") + + body = _bulk_body(self.transport.serializer, body) + return self.transport.perform_request( + "POST", + "/_ml/find_file_structure", + params=params, + headers=headers, + body=body, + ) + + @query_params("advance_time", "calc_interim", "end", "skip_time", "start") + def flush_job(self, job_id, body=None, params=None, headers=None): + """ + Forces any buffered data to be processed by the job. + ``_ + + :arg job_id: The name of the job to flush + :arg body: Flush parameters + :arg advance_time: Advances time to the given value generating + results and updating the model for the advanced interval + :arg calc_interim: Calculates interim results for the most + recent bucket or all buckets within the latency period + :arg end: When used in conjunction with calc_interim, specifies + the range of buckets on which to calculate interim results + :arg skip_time: Skips time to the given value without generating + results or updating the model for the skipped interval + :arg start: When used in conjunction with calc_interim, + specifies the range of buckets on which to calculate interim results + """ + if job_id in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument 'job_id'.") + + return self.transport.perform_request( + "POST", + _make_path("_ml", "anomaly_detectors", job_id, "_flush"), + params=params, + headers=headers, + body=body, + ) + + @query_params("duration", "expires_in") + def forecast(self, job_id, params=None, headers=None): + """ + Predicts the future behavior of a time series by using its historical behavior. + ``_ + + :arg job_id: The ID of the job to forecast for + :arg duration: The duration of the forecast + :arg expires_in: The time interval after which the forecast + expires. Expired forecasts will be deleted at the first opportunity. + """ + if job_id in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument 'job_id'.") + + return self.transport.perform_request( + "POST", + _make_path("_ml", "anomaly_detectors", job_id, "_forecast"), + params=params, + headers=headers, + ) + + @query_params( + "anomaly_score", + "desc", + "end", + "exclude_interim", + "expand", + "from_", + "size", + "sort", + "start", + ) + def get_buckets(self, job_id, body=None, timestamp=None, params=None, headers=None): + """ + Retrieves anomaly detection job results for one or more buckets. + ``_ + + :arg job_id: ID of the job to get bucket results from + :arg body: Bucket selection details if not provided in URI + :arg timestamp: The timestamp of the desired single bucket + result + :arg anomaly_score: Filter for the most anomalous buckets + :arg desc: Set the sort direction + :arg end: End time filter for buckets + :arg exclude_interim: Exclude interim results + :arg expand: Include anomaly records + :arg from_: skips a number of buckets + :arg size: specifies a max number of buckets to get + :arg sort: Sort buckets by a particular field + :arg start: Start time filter for buckets + """ + # from is a reserved word so it cannot be used, use from_ instead + if "from_" in params: + params["from"] = params.pop("from_") + + if job_id in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument 'job_id'.") + + return self.transport.perform_request( + "POST", + _make_path( + "_ml", "anomaly_detectors", job_id, "results", "buckets", timestamp + ), + params=params, + headers=headers, + body=body, + ) + + @query_params("end", "from_", "job_id", "size", "start") + def get_calendar_events(self, calendar_id, params=None, headers=None): + """ + Retrieves information about the scheduled events in calendars. + ``_ + + :arg calendar_id: The ID of the calendar containing the events + :arg end: Get events before this time + :arg from_: Skips a number of events + :arg job_id: Get events for the job. When this option is used + calendar_id must be '_all' + :arg size: Specifies a max number of events to get + :arg start: Get events after this time + """ + # from is a reserved word so it cannot be used, use from_ instead + if "from_" in params: + params["from"] = params.pop("from_") + + if calendar_id in SKIP_IN_PATH: + raise ValueError( + "Empty value passed for a required argument 'calendar_id'." + ) + + return self.transport.perform_request( + "GET", + _make_path("_ml", "calendars", calendar_id, "events"), + params=params, + headers=headers, + ) + + @query_params("from_", "size") + def get_calendars(self, body=None, calendar_id=None, params=None, headers=None): + """ + Retrieves configuration information for calendars. + ``_ + + :arg body: The from and size parameters optionally sent in the + body + :arg calendar_id: The ID of the calendar to fetch + :arg from_: skips a number of calendars + :arg size: specifies a max number of calendars to get + """ + # from is a reserved word so it cannot be used, use from_ instead + if "from_" in params: + params["from"] = params.pop("from_") + + return self.transport.perform_request( + "POST", + _make_path("_ml", "calendars", calendar_id), + params=params, + headers=headers, + body=body, + ) + + @query_params("from_", "size") + def get_categories( + self, job_id, body=None, category_id=None, params=None, headers=None + ): + """ + Retrieves anomaly detection job results for one or more categories. + ``_ + + :arg job_id: The name of the job + :arg body: Category selection details if not provided in URI + :arg category_id: The identifier of the category definition of + interest + :arg from_: skips a number of categories + :arg size: specifies a max number of categories to get + """ + # from is a reserved word so it cannot be used, use from_ instead + if "from_" in params: + params["from"] = params.pop("from_") + + if job_id in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument 'job_id'.") + + return self.transport.perform_request( + "POST", + _make_path( + "_ml", "anomaly_detectors", job_id, "results", "categories", category_id + ), + params=params, + headers=headers, + body=body, + ) + + @query_params("allow_no_datafeeds") + def get_datafeed_stats(self, datafeed_id=None, params=None, headers=None): + """ + Retrieves usage information for datafeeds. + ``_ + + :arg datafeed_id: The ID of the datafeeds stats to fetch + :arg allow_no_datafeeds: Whether to ignore if a wildcard + expression matches no datafeeds. (This includes `_all` string or when no + datafeeds have been specified) + """ + return self.transport.perform_request( + "GET", + _make_path("_ml", "datafeeds", datafeed_id, "_stats"), + params=params, + headers=headers, + ) + + @query_params("allow_no_datafeeds") + def get_datafeeds(self, datafeed_id=None, params=None, headers=None): + """ + Retrieves configuration information for datafeeds. + ``_ + + :arg datafeed_id: The ID of the datafeeds to fetch + :arg allow_no_datafeeds: Whether to ignore if a wildcard + expression matches no datafeeds. (This includes `_all` string or when no + datafeeds have been specified) + """ + return self.transport.perform_request( + "GET", + _make_path("_ml", "datafeeds", datafeed_id), + params=params, + headers=headers, + ) + + @query_params("from_", "size") + def get_filters(self, filter_id=None, params=None, headers=None): + """ + Retrieves filters. + ``_ + + :arg filter_id: The ID of the filter to fetch + :arg from_: skips a number of filters + :arg size: specifies a max number of filters to get + """ + # from is a reserved word so it cannot be used, use from_ instead + if "from_" in params: + params["from"] = params.pop("from_") + + return self.transport.perform_request( + "GET", + _make_path("_ml", "filters", filter_id), + params=params, + headers=headers, + ) + + @query_params( + "desc", + "end", + "exclude_interim", + "from_", + "influencer_score", + "size", + "sort", + "start", + ) + def get_influencers(self, job_id, body=None, params=None, headers=None): + """ + Retrieves anomaly detection job results for one or more influencers. + ``_ + + :arg job_id: Identifier for the anomaly detection job + :arg body: Influencer selection criteria + :arg desc: whether the results should be sorted in decending + order + :arg end: end timestamp for the requested influencers + :arg exclude_interim: Exclude interim results + :arg from_: skips a number of influencers + :arg influencer_score: influencer score threshold for the + requested influencers + :arg size: specifies a max number of influencers to get + :arg sort: sort field for the requested influencers + :arg start: start timestamp for the requested influencers + """ + # from is a reserved word so it cannot be used, use from_ instead + if "from_" in params: + params["from"] = params.pop("from_") + + if job_id in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument 'job_id'.") + + return self.transport.perform_request( + "POST", + _make_path("_ml", "anomaly_detectors", job_id, "results", "influencers"), + params=params, + headers=headers, + body=body, + ) + + @query_params("allow_no_jobs") + def get_job_stats(self, job_id=None, params=None, headers=None): + """ + Retrieves usage information for anomaly detection jobs. + ``_ + + :arg job_id: The ID of the jobs stats to fetch + :arg allow_no_jobs: Whether to ignore if a wildcard expression + matches no jobs. (This includes `_all` string or when no jobs have been + specified) + """ + return self.transport.perform_request( + "GET", + _make_path("_ml", "anomaly_detectors", job_id, "_stats"), + params=params, + headers=headers, + ) + + @query_params("allow_no_jobs") + def get_jobs(self, job_id=None, params=None, headers=None): + """ + Retrieves configuration information for anomaly detection jobs. + ``_ + + :arg job_id: The ID of the jobs to fetch + :arg allow_no_jobs: Whether to ignore if a wildcard expression + matches no jobs. (This includes `_all` string or when no jobs have been + specified) + """ + return self.transport.perform_request( + "GET", + _make_path("_ml", "anomaly_detectors", job_id), + params=params, + headers=headers, + ) + + @query_params("desc", "end", "from_", "size", "sort", "start") + def get_model_snapshots( + self, job_id, body=None, snapshot_id=None, params=None, headers=None + ): + """ + Retrieves information about model snapshots. + ``_ + + :arg job_id: The ID of the job to fetch + :arg body: Model snapshot selection criteria + :arg snapshot_id: The ID of the snapshot to fetch + :arg desc: True if the results should be sorted in descending + order + :arg end: The filter 'end' query parameter + :arg from_: Skips a number of documents + :arg size: The default number of documents returned in queries + as a string. + :arg sort: Name of the field to sort on + :arg start: The filter 'start' query parameter + """ + # from is a reserved word so it cannot be used, use from_ instead + if "from_" in params: + params["from"] = params.pop("from_") + + if job_id in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument 'job_id'.") + + return self.transport.perform_request( + "POST", + _make_path( + "_ml", "anomaly_detectors", job_id, "model_snapshots", snapshot_id + ), + params=params, + headers=headers, + body=body, + ) + + @query_params( + "allow_no_jobs", + "bucket_span", + "end", + "exclude_interim", + "overall_score", + "start", + "top_n", + ) + def get_overall_buckets(self, job_id, body=None, params=None, headers=None): + """ + Retrieves overall bucket results that summarize the bucket results of multiple + anomaly detection jobs. + ``_ + + :arg job_id: The job IDs for which to calculate overall bucket + results + :arg body: Overall bucket selection details if not provided in + URI + :arg allow_no_jobs: Whether to ignore if a wildcard expression + matches no jobs. (This includes `_all` string or when no jobs have been + specified) + :arg bucket_span: The span of the overall buckets. Defaults to + the longest job bucket_span + :arg end: Returns overall buckets with timestamps earlier than + this time + :arg exclude_interim: If true overall buckets that include + interim buckets will be excluded + :arg overall_score: Returns overall buckets with overall scores + higher than this value + :arg start: Returns overall buckets with timestamps after this + time + :arg top_n: The number of top job bucket scores to be used in + the overall_score calculation + """ + if job_id in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument 'job_id'.") + + return self.transport.perform_request( + "POST", + _make_path( + "_ml", "anomaly_detectors", job_id, "results", "overall_buckets" + ), + params=params, + headers=headers, + body=body, + ) + + @query_params( + "desc", + "end", + "exclude_interim", + "from_", + "record_score", + "size", + "sort", + "start", + ) + def get_records(self, job_id, body=None, params=None, headers=None): + """ + Retrieves anomaly records for an anomaly detection job. + ``_ + + :arg job_id: The ID of the job + :arg body: Record selection criteria + :arg desc: Set the sort direction + :arg end: End time filter for records + :arg exclude_interim: Exclude interim results + :arg from_: skips a number of records + :arg record_score: Returns records with anomaly scores greater + or equal than this value + :arg size: specifies a max number of records to get + :arg sort: Sort records by a particular field + :arg start: Start time filter for records + """ + # from is a reserved word so it cannot be used, use from_ instead + if "from_" in params: + params["from"] = params.pop("from_") + + if job_id in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument 'job_id'.") + + return self.transport.perform_request( + "POST", + _make_path("_ml", "anomaly_detectors", job_id, "results", "records"), + params=params, + headers=headers, + body=body, + ) + + @query_params() + def info(self, params=None, headers=None): + """ + Returns defaults and limits used by machine learning. + ``_ + """ + return self.transport.perform_request( + "GET", "/_ml/info", params=params, headers=headers + ) + + @query_params() + def open_job(self, job_id, params=None, headers=None): + """ + Opens one or more anomaly detection jobs. + ``_ + + :arg job_id: The ID of the job to open + """ + if job_id in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument 'job_id'.") + + return self.transport.perform_request( + "POST", + _make_path("_ml", "anomaly_detectors", job_id, "_open"), + params=params, + headers=headers, + ) + + @query_params() + def post_calendar_events(self, calendar_id, body, params=None, headers=None): + """ + Posts scheduled events in a calendar. + ``_ + + :arg calendar_id: The ID of the calendar to modify + :arg body: A list of events + """ + for param in (calendar_id, body): + if param in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument.") + + return self.transport.perform_request( + "POST", + _make_path("_ml", "calendars", calendar_id, "events"), + params=params, + headers=headers, + body=body, + ) + + @query_params("reset_end", "reset_start") + def post_data(self, job_id, body, params=None, headers=None): + """ + Sends data to an anomaly detection job for analysis. + ``_ + + :arg job_id: The name of the job receiving the data + :arg body: The data to process + :arg reset_end: Optional parameter to specify the end of the + bucket resetting range + :arg reset_start: Optional parameter to specify the start of the + bucket resetting range + """ + for param in (job_id, body): + if param in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument.") + + body = _bulk_body(self.transport.serializer, body) + return self.transport.perform_request( + "POST", + _make_path("_ml", "anomaly_detectors", job_id, "_data"), + params=params, + headers=headers, + body=body, + ) + + @query_params() + def preview_datafeed(self, datafeed_id, params=None, headers=None): + """ + Previews a datafeed. + ``_ + + :arg datafeed_id: The ID of the datafeed to preview + """ + if datafeed_id in SKIP_IN_PATH: + raise ValueError( + "Empty value passed for a required argument 'datafeed_id'." + ) + + return self.transport.perform_request( + "GET", + _make_path("_ml", "datafeeds", datafeed_id, "_preview"), + params=params, + headers=headers, + ) + + @query_params() + def put_calendar(self, calendar_id, body=None, params=None, headers=None): + """ + Instantiates a calendar. + ``_ + + :arg calendar_id: The ID of the calendar to create + :arg body: The calendar details + """ + if calendar_id in SKIP_IN_PATH: + raise ValueError( + "Empty value passed for a required argument 'calendar_id'." + ) + + return self.transport.perform_request( + "PUT", + _make_path("_ml", "calendars", calendar_id), + params=params, + headers=headers, + body=body, + ) + + @query_params() + def put_calendar_job(self, calendar_id, job_id, params=None, headers=None): + """ + Adds an anomaly detection job to a calendar. + ``_ + + :arg calendar_id: The ID of the calendar to modify + :arg job_id: The ID of the job to add to the calendar + """ + for param in (calendar_id, job_id): + if param in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument.") + + return self.transport.perform_request( + "PUT", + _make_path("_ml", "calendars", calendar_id, "jobs", job_id), + params=params, + headers=headers, + ) + + @query_params( + "allow_no_indices", "expand_wildcards", "ignore_throttled", "ignore_unavailable" + ) + def put_datafeed(self, datafeed_id, body, params=None, headers=None): + """ + Instantiates a datafeed. + ``_ + + :arg datafeed_id: The ID of the datafeed to create + :arg body: The datafeed config + :arg allow_no_indices: Ignore if the source indices expressions + resolves to no concrete indices (default: true) + :arg expand_wildcards: Whether source index expressions should + get expanded to open or closed indices (default: open) Valid choices: + open, closed, hidden, none, all + :arg ignore_throttled: Ignore indices that are marked as + throttled (default: true) + :arg ignore_unavailable: Ignore unavailable indexes (default: + false) + """ + for param in (datafeed_id, body): + if param in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument.") + + return self.transport.perform_request( + "PUT", + _make_path("_ml", "datafeeds", datafeed_id), + params=params, + headers=headers, + body=body, + ) + + @query_params() + def put_filter(self, filter_id, body, params=None, headers=None): + """ + Instantiates a filter. + ``_ + + :arg filter_id: The ID of the filter to create + :arg body: The filter details + """ + for param in (filter_id, body): + if param in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument.") + + return self.transport.perform_request( + "PUT", + _make_path("_ml", "filters", filter_id), + params=params, + headers=headers, + body=body, + ) + + @query_params() + def put_job(self, job_id, body, params=None, headers=None): + """ + Instantiates an anomaly detection job. + ``_ + + :arg job_id: The ID of the job to create + :arg body: The job + """ + for param in (job_id, body): + if param in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument.") + + return self.transport.perform_request( + "PUT", + _make_path("_ml", "anomaly_detectors", job_id), + params=params, + headers=headers, + body=body, + ) + + @query_params("delete_intervening_results") + def revert_model_snapshot( + self, job_id, snapshot_id, body=None, params=None, headers=None + ): + """ + Reverts to a specific snapshot. + ``_ + + :arg job_id: The ID of the job to fetch + :arg snapshot_id: The ID of the snapshot to revert to + :arg body: Reversion options + :arg delete_intervening_results: Should we reset the results + back to the time of the snapshot? + """ + for param in (job_id, snapshot_id): + if param in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument.") + + return self.transport.perform_request( + "POST", + _make_path( + "_ml", + "anomaly_detectors", + job_id, + "model_snapshots", + snapshot_id, + "_revert", + ), + params=params, + headers=headers, + body=body, + ) + + @query_params("enabled", "timeout") + def set_upgrade_mode(self, params=None, headers=None): + """ + Sets a cluster wide upgrade_mode setting that prepares machine learning indices + for an upgrade. + ``_ + + :arg enabled: Whether to enable upgrade_mode ML setting or not. + Defaults to false. + :arg timeout: Controls the time to wait before action times out. + Defaults to 30 seconds + """ + return self.transport.perform_request( + "POST", "/_ml/set_upgrade_mode", params=params, headers=headers + ) + + @query_params("end", "start", "timeout") + def start_datafeed(self, datafeed_id, body=None, params=None, headers=None): + """ + Starts one or more datafeeds. + ``_ + + :arg datafeed_id: The ID of the datafeed to start + :arg body: The start datafeed parameters + :arg end: The end time when the datafeed should stop. When not + set, the datafeed continues in real time + :arg start: The start time from where the datafeed should begin + :arg timeout: Controls the time to wait until a datafeed has + started. Default to 20 seconds + """ + if datafeed_id in SKIP_IN_PATH: + raise ValueError( + "Empty value passed for a required argument 'datafeed_id'." + ) + + return self.transport.perform_request( + "POST", + _make_path("_ml", "datafeeds", datafeed_id, "_start"), + params=params, + headers=headers, + body=body, + ) + + @query_params("allow_no_datafeeds", "force", "timeout") + def stop_datafeed(self, datafeed_id, params=None, headers=None): + """ + Stops one or more datafeeds. + ``_ + + :arg datafeed_id: The ID of the datafeed to stop + :arg allow_no_datafeeds: Whether to ignore if a wildcard + expression matches no datafeeds. (This includes `_all` string or when no + datafeeds have been specified) + :arg force: True if the datafeed should be forcefully stopped. + :arg timeout: Controls the time to wait until a datafeed has + stopped. Default to 20 seconds + """ + if datafeed_id in SKIP_IN_PATH: + raise ValueError( + "Empty value passed for a required argument 'datafeed_id'." + ) + + return self.transport.perform_request( + "POST", + _make_path("_ml", "datafeeds", datafeed_id, "_stop"), + params=params, + headers=headers, + ) + + @query_params( + "allow_no_indices", "expand_wildcards", "ignore_throttled", "ignore_unavailable" + ) + def update_datafeed(self, datafeed_id, body, params=None, headers=None): + """ + Updates certain properties of a datafeed. + ``_ + + :arg datafeed_id: The ID of the datafeed to update + :arg body: The datafeed update settings + :arg allow_no_indices: Ignore if the source indices expressions + resolves to no concrete indices (default: true) + :arg expand_wildcards: Whether source index expressions should + get expanded to open or closed indices (default: open) Valid choices: + open, closed, hidden, none, all + :arg ignore_throttled: Ignore indices that are marked as + throttled (default: true) + :arg ignore_unavailable: Ignore unavailable indexes (default: + false) + """ + for param in (datafeed_id, body): + if param in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument.") + + return self.transport.perform_request( + "POST", + _make_path("_ml", "datafeeds", datafeed_id, "_update"), + params=params, + headers=headers, + body=body, + ) + + @query_params() + def update_filter(self, filter_id, body, params=None, headers=None): + """ + Updates the description of a filter, adds items, or removes items. + ``_ + + :arg filter_id: The ID of the filter to update + :arg body: The filter update + """ + for param in (filter_id, body): + if param in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument.") + + return self.transport.perform_request( + "POST", + _make_path("_ml", "filters", filter_id, "_update"), + params=params, + headers=headers, + body=body, + ) + + @query_params() + def update_job(self, job_id, body, params=None, headers=None): + """ + Updates certain properties of an anomaly detection job. + ``_ + + :arg job_id: The ID of the job to create + :arg body: The job update settings + """ + for param in (job_id, body): + if param in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument.") + + return self.transport.perform_request( + "POST", + _make_path("_ml", "anomaly_detectors", job_id, "_update"), + params=params, + headers=headers, + body=body, + ) + + @query_params() + def update_model_snapshot( + self, job_id, snapshot_id, body, params=None, headers=None + ): + """ + Updates certain properties of a snapshot. + ``_ + + :arg job_id: The ID of the job to fetch + :arg snapshot_id: The ID of the snapshot to update + :arg body: The model snapshot properties to update + """ + for param in (job_id, snapshot_id, body): + if param in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument.") + + return self.transport.perform_request( + "POST", + _make_path( + "_ml", + "anomaly_detectors", + job_id, + "model_snapshots", + snapshot_id, + "_update", + ), + params=params, + headers=headers, + body=body, + ) + + @query_params() + def validate(self, body, params=None, headers=None): + """ + Validates an anomaly detection job. + ``_ + + :arg body: The job config + """ + if body in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument 'body'.") + + return self.transport.perform_request( + "POST", + "/_ml/anomaly_detectors/_validate", + params=params, + headers=headers, + body=body, + ) + + @query_params() + def validate_detector(self, body, params=None, headers=None): + """ + Validates an anomaly detection detector. + ``_ + + :arg body: The detector + """ + if body in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument 'body'.") + + return self.transport.perform_request( + "POST", + "/_ml/anomaly_detectors/_validate/detector", + params=params, + headers=headers, + body=body, + ) + + @query_params("force") + def delete_data_frame_analytics(self, id, params=None, headers=None): + """ + Deletes an existing data frame analytics job. + ``_ + + :arg id: The ID of the data frame analytics to delete + :arg force: True if the job should be forcefully deleted + """ + if id in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument 'id'.") + + return self.transport.perform_request( + "DELETE", + _make_path("_ml", "data_frame", "analytics", id), + params=params, + headers=headers, + ) + + @query_params() + def evaluate_data_frame(self, body, params=None, headers=None): + """ + Evaluates the data frame analytics for an annotated index. + ``_ + + :arg body: The evaluation definition + """ + if body in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument 'body'.") + + return self.transport.perform_request( + "POST", + "/_ml/data_frame/_evaluate", + params=params, + headers=headers, + body=body, + ) + + @query_params("allow_no_match", "from_", "size") + def get_data_frame_analytics(self, id=None, params=None, headers=None): + """ + Retrieves configuration information for data frame analytics jobs. + ``_ + + :arg id: The ID of the data frame analytics to fetch + :arg allow_no_match: Whether to ignore if a wildcard expression + matches no data frame analytics. (This includes `_all` string or when no + data frame analytics have been specified) Default: True + :arg from_: skips a number of analytics + :arg size: specifies a max number of analytics to get Default: + 100 + """ + # from is a reserved word so it cannot be used, use from_ instead + if "from_" in params: + params["from"] = params.pop("from_") + + return self.transport.perform_request( + "GET", + _make_path("_ml", "data_frame", "analytics", id), + params=params, + headers=headers, + ) + + @query_params("allow_no_match", "from_", "size") + def get_data_frame_analytics_stats(self, id=None, params=None, headers=None): + """ + Retrieves usage information for data frame analytics jobs. + ``_ + + :arg id: The ID of the data frame analytics stats to fetch + :arg allow_no_match: Whether to ignore if a wildcard expression + matches no data frame analytics. (This includes `_all` string or when no + data frame analytics have been specified) Default: True + :arg from_: skips a number of analytics + :arg size: specifies a max number of analytics to get Default: + 100 + """ + # from is a reserved word so it cannot be used, use from_ instead + if "from_" in params: + params["from"] = params.pop("from_") + + return self.transport.perform_request( + "GET", + _make_path("_ml", "data_frame", "analytics", id, "_stats"), + params=params, + headers=headers, + ) + + @query_params() + def put_data_frame_analytics(self, id, body, params=None, headers=None): + """ + Instantiates a data frame analytics job. + ``_ + + :arg id: The ID of the data frame analytics to create + :arg body: The data frame analytics configuration + """ + for param in (id, body): + if param in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument.") + + return self.transport.perform_request( + "PUT", + _make_path("_ml", "data_frame", "analytics", id), + params=params, + headers=headers, + body=body, + ) + + @query_params("timeout") + def start_data_frame_analytics(self, id, body=None, params=None, headers=None): + """ + Starts a data frame analytics job. + ``_ + + :arg id: The ID of the data frame analytics to start + :arg body: The start data frame analytics parameters + :arg timeout: Controls the time to wait until the task has + started. Defaults to 20 seconds + """ + if id in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument 'id'.") + + return self.transport.perform_request( + "POST", + _make_path("_ml", "data_frame", "analytics", id, "_start"), + params=params, + headers=headers, + body=body, + ) + + @query_params("allow_no_match", "force", "timeout") + def stop_data_frame_analytics(self, id, body=None, params=None, headers=None): + """ + Stops one or more data frame analytics jobs. + ``_ + + :arg id: The ID of the data frame analytics to stop + :arg body: The stop data frame analytics parameters + :arg allow_no_match: Whether to ignore if a wildcard expression + matches no data frame analytics. (This includes `_all` string or when no + data frame analytics have been specified) + :arg force: True if the data frame analytics should be + forcefully stopped + :arg timeout: Controls the time to wait until the task has + stopped. Defaults to 20 seconds + """ + if id in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument 'id'.") + + return self.transport.perform_request( + "POST", + _make_path("_ml", "data_frame", "analytics", id, "_stop"), + params=params, + headers=headers, + body=body, + ) + + @query_params() + def delete_trained_model(self, model_id, params=None, headers=None): + """ + Deletes an existing trained inference model that is currently not referenced by + an ingest pipeline. + ``_ + + :arg model_id: The ID of the trained model to delete + """ + if model_id in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument 'model_id'.") + + return self.transport.perform_request( + "DELETE", + _make_path("_ml", "inference", model_id), + params=params, + headers=headers, + ) + + @query_params() + def explain_data_frame_analytics( + self, body=None, id=None, params=None, headers=None + ): + """ + Explains a data frame analytics config. + ``_ + + :arg body: The data frame analytics config to explain + :arg id: The ID of the data frame analytics to explain + """ + return self.transport.perform_request( + "POST", + _make_path("_ml", "data_frame", "analytics", id, "_explain"), + params=params, + headers=headers, + body=body, + ) + + @query_params( + "allow_no_match", + "decompress_definition", + "from_", + "include_model_definition", + "size", + "tags", + ) + def get_trained_models(self, model_id=None, params=None, headers=None): + """ + Retrieves configuration information for a trained inference model. + ``_ + + :arg model_id: The ID of the trained models to fetch + :arg allow_no_match: Whether to ignore if a wildcard expression + matches no trained models. (This includes `_all` string or when no + trained models have been specified) Default: True + :arg decompress_definition: Should the model definition be + decompressed into valid JSON or returned in a custom compressed format. + Defaults to true. Default: True + :arg from_: skips a number of trained models + :arg include_model_definition: Should the full model definition + be included in the results. These definitions can be large. So be + cautious when including them. Defaults to false. + :arg size: specifies a max number of trained models to get + Default: 100 + :arg tags: A comma-separated list of tags that the model must + have. + """ + # from is a reserved word so it cannot be used, use from_ instead + if "from_" in params: + params["from"] = params.pop("from_") + + return self.transport.perform_request( + "GET", + _make_path("_ml", "inference", model_id), + params=params, + headers=headers, + ) + + @query_params("allow_no_match", "from_", "size") + def get_trained_models_stats(self, model_id=None, params=None, headers=None): + """ + Retrieves usage information for trained inference models. + ``_ + + :arg model_id: The ID of the trained models stats to fetch + :arg allow_no_match: Whether to ignore if a wildcard expression + matches no trained models. (This includes `_all` string or when no + trained models have been specified) Default: True + :arg from_: skips a number of trained models + :arg size: specifies a max number of trained models to get + Default: 100 + """ + # from is a reserved word so it cannot be used, use from_ instead + if "from_" in params: + params["from"] = params.pop("from_") + + return self.transport.perform_request( + "GET", + _make_path("_ml", "inference", model_id, "_stats"), + params=params, + headers=headers, + ) + + @query_params() + def put_trained_model(self, model_id, body, params=None, headers=None): + """ + Creates an inference trained model. + ``_ + + :arg model_id: The ID of the trained models to store + :arg body: The trained model configuration + """ + for param in (model_id, body): + if param in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument.") + + return self.transport.perform_request( + "PUT", + _make_path("_ml", "inference", model_id), + params=params, + headers=headers, + body=body, + ) + + @query_params() + def estimate_model_memory(self, body, params=None, headers=None): + """ + Estimates the model memory + ``_ + + :arg body: The analysis config, plus cardinality estimates for + fields it references + """ + if body in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument 'body'.") + + return self.transport.perform_request( + "POST", + "/_ml/anomaly_detectors/_estimate_model_memory", + params=params, + headers=headers, + body=body, + ) diff --git a/Python3.6-ClsToElasticSearch/src/elasticsearch/client/monitoring.py b/Python3.6-ClsToElasticSearch/src/elasticsearch/client/monitoring.py new file mode 100755 index 000000000..48ca5d50e --- /dev/null +++ b/Python3.6-ClsToElasticSearch/src/elasticsearch/client/monitoring.py @@ -0,0 +1,30 @@ +from .utils import NamespacedClient, query_params, _make_path, SKIP_IN_PATH, _bulk_body + + +class MonitoringClient(NamespacedClient): + @query_params("interval", "system_api_version", "system_id") + def bulk(self, body, doc_type=None, params=None, headers=None): + """ + Used by the monitoring features to send monitoring data. + ``_ + + :arg body: The operation definition and data (action-data + pairs), separated by newlines + :arg doc_type: Default document type for items which don't + provide one + :arg interval: Collection interval (e.g., '10s' or '10000ms') of + the payload + :arg system_api_version: API Version of the monitored system + :arg system_id: Identifier of the monitored system + """ + if body in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument 'body'.") + + body = _bulk_body(self.transport.serializer, body) + return self.transport.perform_request( + "POST", + _make_path("_monitoring", doc_type, "bulk"), + params=params, + headers=headers, + body=body, + ) diff --git a/Python3.6-ClsToElasticSearch/src/elasticsearch/client/nodes.py b/Python3.6-ClsToElasticSearch/src/elasticsearch/client/nodes.py new file mode 100755 index 000000000..175f705c2 --- /dev/null +++ b/Python3.6-ClsToElasticSearch/src/elasticsearch/client/nodes.py @@ -0,0 +1,156 @@ +from .utils import NamespacedClient, query_params, _make_path + + +class NodesClient(NamespacedClient): + @query_params("timeout") + def reload_secure_settings( + self, body=None, node_id=None, params=None, headers=None + ): + """ + Reloads secure settings. + ``_ + + :arg body: An object containing the password for the + elasticsearch keystore + :arg node_id: A comma-separated list of node IDs to span the + reload/reinit call. Should stay empty because reloading usually involves + all cluster nodes. + :arg timeout: Explicit operation timeout + """ + return self.transport.perform_request( + "POST", + _make_path("_nodes", node_id, "reload_secure_settings"), + params=params, + headers=headers, + body=body, + ) + + @query_params("flat_settings", "timeout") + def info(self, node_id=None, metric=None, params=None, headers=None): + """ + Returns information about nodes in the cluster. + ``_ + + :arg node_id: A comma-separated list of node IDs or names to + limit the returned information; use `_local` to return information from + the node you're connecting to, leave empty to get information from all + nodes + :arg metric: A comma-separated list of metrics you wish + returned. Leave empty to return all. Valid choices: settings, os, + process, jvm, thread_pool, transport, http, plugins, ingest + :arg flat_settings: Return settings in flat format (default: + false) + :arg timeout: Explicit operation timeout + """ + return self.transport.perform_request( + "GET", _make_path("_nodes", node_id, metric), params=params, headers=headers + ) + + @query_params( + "completion_fields", + "fielddata_fields", + "fields", + "groups", + "include_segment_file_sizes", + "level", + "timeout", + "types", + ) + def stats( + self, node_id=None, metric=None, index_metric=None, params=None, headers=None + ): + """ + Returns statistical information about nodes in the cluster. + ``_ + + :arg node_id: A comma-separated list of node IDs or names to + limit the returned information; use `_local` to return information from + the node you're connecting to, leave empty to get information from all + nodes + :arg metric: Limit the information returned to the specified + metrics Valid choices: _all, breaker, fs, http, indices, jvm, os, + process, thread_pool, transport, discovery + :arg index_metric: Limit the information returned for `indices` + metric to the specific index metrics. Isn't used if `indices` (or `all`) + metric isn't specified. Valid choices: _all, completion, docs, + fielddata, query_cache, flush, get, indexing, merge, request_cache, + refresh, search, segments, store, warmer, suggest + :arg completion_fields: A comma-separated list of fields for + `fielddata` and `suggest` index metric (supports wildcards) + :arg fielddata_fields: A comma-separated list of fields for + `fielddata` index metric (supports wildcards) + :arg fields: A comma-separated list of fields for `fielddata` + and `completion` index metric (supports wildcards) + :arg groups: A comma-separated list of search groups for + `search` index metric + :arg include_segment_file_sizes: Whether to report the + aggregated disk usage of each one of the Lucene index files (only + applies if segment stats are requested) + :arg level: Return indices stats aggregated at index, node or + shard level Valid choices: indices, node, shards Default: node + :arg timeout: Explicit operation timeout + :arg types: A comma-separated list of document types for the + `indexing` index metric + """ + return self.transport.perform_request( + "GET", + _make_path("_nodes", node_id, "stats", metric, index_metric), + params=params, + headers=headers, + ) + + @query_params( + "doc_type", "ignore_idle_threads", "interval", "snapshots", "threads", "timeout" + ) + def hot_threads(self, node_id=None, params=None, headers=None): + """ + Returns information about hot threads on each node in the cluster. + ``_ + + :arg node_id: A comma-separated list of node IDs or names to + limit the returned information; use `_local` to return information from + the node you're connecting to, leave empty to get information from all + nodes + :arg doc_type: The type to sample (default: cpu) Valid choices: + cpu, wait, block + :arg ignore_idle_threads: Don't show threads that are in known- + idle places, such as waiting on a socket select or pulling from an empty + task queue (default: true) + :arg interval: The interval for the second sampling of threads + :arg snapshots: Number of samples of thread stacktrace (default: + 10) + :arg threads: Specify the number of threads to provide + information for (default: 3) + :arg timeout: Explicit operation timeout + """ + # type is a reserved word so it cannot be used, use doc_type instead + if "doc_type" in params: + params["type"] = params.pop("doc_type") + + return self.transport.perform_request( + "GET", + _make_path("_nodes", node_id, "hot_threads"), + params=params, + headers=headers, + ) + + @query_params("timeout") + def usage(self, node_id=None, metric=None, params=None, headers=None): + """ + Returns low-level information about REST actions usage on nodes. + ``_ + + :arg node_id: A comma-separated list of node IDs or names to + limit the returned information; use `_local` to return information from + the node you're connecting to, leave empty to get information from all + nodes + :arg metric: Limit the information returned to the specified + metrics Valid choices: _all, rest_actions + :arg timeout: Explicit operation timeout + """ + return self.transport.perform_request( + "GET", + _make_path("_nodes", node_id, "usage", metric), + params=params, + headers=headers, + ) diff --git a/Python3.6-ClsToElasticSearch/src/elasticsearch/client/remote.py b/Python3.6-ClsToElasticSearch/src/elasticsearch/client/remote.py new file mode 100755 index 000000000..8590313a7 --- /dev/null +++ b/Python3.6-ClsToElasticSearch/src/elasticsearch/client/remote.py @@ -0,0 +1,12 @@ +from .utils import NamespacedClient, query_params + + +class RemoteClient(NamespacedClient): + @query_params() + def info(self, params=None, headers=None): + """ + ``_ + """ + return self.transport.perform_request( + "GET", "/_remote/info", params=params, headers=headers + ) diff --git a/Python3.6-ClsToElasticSearch/src/elasticsearch/client/rollup.py b/Python3.6-ClsToElasticSearch/src/elasticsearch/client/rollup.py new file mode 100755 index 000000000..afcd93ae1 --- /dev/null +++ b/Python3.6-ClsToElasticSearch/src/elasticsearch/client/rollup.py @@ -0,0 +1,151 @@ +from .utils import NamespacedClient, query_params, _make_path, SKIP_IN_PATH + + +class RollupClient(NamespacedClient): + @query_params() + def delete_job(self, id, params=None, headers=None): + """ + Deletes an existing rollup job. + ``_ + + :arg id: The ID of the job to delete + """ + if id in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument 'id'.") + + return self.transport.perform_request( + "DELETE", _make_path("_rollup", "job", id), params=params, headers=headers + ) + + @query_params() + def get_jobs(self, id=None, params=None, headers=None): + """ + Retrieves the configuration, stats, and status of rollup jobs. + ``_ + + :arg id: The ID of the job(s) to fetch. Accepts glob patterns, + or left blank for all jobs + """ + return self.transport.perform_request( + "GET", _make_path("_rollup", "job", id), params=params, headers=headers + ) + + @query_params() + def get_rollup_caps(self, id=None, params=None, headers=None): + """ + Returns the capabilities of any rollup jobs that have been configured for a + specific index or index pattern. + ``_ + + :arg id: The ID of the index to check rollup capabilities on, or + left blank for all jobs + """ + return self.transport.perform_request( + "GET", _make_path("_rollup", "data", id), params=params, headers=headers + ) + + @query_params() + def get_rollup_index_caps(self, index, params=None, headers=None): + """ + Returns the rollup capabilities of all jobs inside of a rollup index (e.g. the + index where rollup data is stored). + ``_ + + :arg index: The rollup index or index pattern to obtain rollup + capabilities from. + """ + if index in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument 'index'.") + + return self.transport.perform_request( + "GET", _make_path(index, "_rollup", "data"), params=params, headers=headers + ) + + @query_params() + def put_job(self, id, body, params=None, headers=None): + """ + Creates a rollup job. + ``_ + + :arg id: The ID of the job to create + :arg body: The job configuration + """ + for param in (id, body): + if param in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument.") + + return self.transport.perform_request( + "PUT", + _make_path("_rollup", "job", id), + params=params, + headers=headers, + body=body, + ) + + @query_params("rest_total_hits_as_int", "typed_keys") + def rollup_search(self, index, body, doc_type=None, params=None, headers=None): + """ + Enables searching rolled-up data using the standard query DSL. + ``_ + + :arg index: The indices or index-pattern(s) (containing rollup + or regular data) that should be searched + :arg body: The search request body + :arg doc_type: The doc type inside the index + :arg rest_total_hits_as_int: Indicates whether hits.total should + be rendered as an integer or an object in the rest search response + :arg typed_keys: Specify whether aggregation and suggester names + should be prefixed by their respective types in the response + """ + for param in (index, body): + if param in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument.") + + return self.transport.perform_request( + "POST", + _make_path(index, doc_type, "_rollup_search"), + params=params, + headers=headers, + body=body, + ) + + @query_params() + def start_job(self, id, params=None, headers=None): + """ + Starts an existing, stopped rollup job. + ``_ + + :arg id: The ID of the job to start + """ + if id in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument 'id'.") + + return self.transport.perform_request( + "POST", + _make_path("_rollup", "job", id, "_start"), + params=params, + headers=headers, + ) + + @query_params("timeout", "wait_for_completion") + def stop_job(self, id, params=None, headers=None): + """ + Stops an existing, started rollup job. + ``_ + + :arg id: The ID of the job to stop + :arg timeout: Block for (at maximum) the specified duration + while waiting for the job to stop. Defaults to 30s. + :arg wait_for_completion: True if the API should block until the + job has fully stopped, false if should be executed async. Defaults to + false. + """ + if id in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument 'id'.") + + return self.transport.perform_request( + "POST", + _make_path("_rollup", "job", id, "_stop"), + params=params, + headers=headers, + ) diff --git a/Python3.6-ClsToElasticSearch/src/elasticsearch/client/security.py b/Python3.6-ClsToElasticSearch/src/elasticsearch/client/security.py new file mode 100755 index 000000000..9f54fc330 --- /dev/null +++ b/Python3.6-ClsToElasticSearch/src/elasticsearch/client/security.py @@ -0,0 +1,493 @@ +from .utils import NamespacedClient, query_params, _make_path, SKIP_IN_PATH + + +class SecurityClient(NamespacedClient): + @query_params() + def authenticate(self, params=None, headers=None): + """ + Enables authentication as a user and retrieve information about the + authenticated user. + ``_ + """ + return self.transport.perform_request( + "GET", "/_security/_authenticate", params=params, headers=headers + ) + + @query_params("refresh") + def change_password(self, body, username=None, params=None, headers=None): + """ + Changes the passwords of users in the native realm and built-in users. + ``_ + + :arg body: the new password for the user + :arg username: The username of the user to change the password + for + :arg refresh: If `true` (the default) then refresh the affected + shards to make this operation visible to search, if `wait_for` then wait + for a refresh to make this operation visible to search, if `false` then + do nothing with refreshes. Valid choices: true, false, wait_for + """ + if body in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument 'body'.") + + return self.transport.perform_request( + "PUT", + _make_path("_security", "user", username, "_password"), + params=params, + headers=headers, + body=body, + ) + + @query_params("usernames") + def clear_cached_realms(self, realms, params=None, headers=None): + """ + Evicts users from the user cache. Can completely clear the cache or evict + specific users. + ``_ + + :arg realms: Comma-separated list of realms to clear + :arg usernames: Comma-separated list of usernames to clear from + the cache + """ + if realms in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument 'realms'.") + + return self.transport.perform_request( + "POST", + _make_path("_security", "realm", realms, "_clear_cache"), + params=params, + headers=headers, + ) + + @query_params() + def clear_cached_roles(self, name, params=None, headers=None): + """ + Evicts roles from the native role cache. + ``_ + + :arg name: Role name + """ + if name in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument 'name'.") + + return self.transport.perform_request( + "POST", + _make_path("_security", "role", name, "_clear_cache"), + params=params, + headers=headers, + ) + + @query_params("refresh") + def create_api_key(self, body, params=None, headers=None): + """ + Creates an API key for access without requiring basic authentication. + ``_ + + :arg body: The api key request to create an API key + :arg refresh: If `true` (the default) then refresh the affected + shards to make this operation visible to search, if `wait_for` then wait + for a refresh to make this operation visible to search, if `false` then + do nothing with refreshes. Valid choices: true, false, wait_for + """ + if body in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument 'body'.") + + return self.transport.perform_request( + "PUT", "/_security/api_key", params=params, headers=headers, body=body + ) + + @query_params("refresh") + def delete_privileges(self, application, name, params=None, headers=None): + """ + Removes application privileges. + ``_ + + :arg application: Application name + :arg name: Privilege name + :arg refresh: If `true` (the default) then refresh the affected + shards to make this operation visible to search, if `wait_for` then wait + for a refresh to make this operation visible to search, if `false` then + do nothing with refreshes. Valid choices: true, false, wait_for + """ + for param in (application, name): + if param in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument.") + + return self.transport.perform_request( + "DELETE", + _make_path("_security", "privilege", application, name), + params=params, + headers=headers, + ) + + @query_params("refresh") + def delete_role(self, name, params=None, headers=None): + """ + Removes roles in the native realm. + ``_ + + :arg name: Role name + :arg refresh: If `true` (the default) then refresh the affected + shards to make this operation visible to search, if `wait_for` then wait + for a refresh to make this operation visible to search, if `false` then + do nothing with refreshes. Valid choices: true, false, wait_for + """ + if name in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument 'name'.") + + return self.transport.perform_request( + "DELETE", + _make_path("_security", "role", name), + params=params, + headers=headers, + ) + + @query_params("refresh") + def delete_role_mapping(self, name, params=None, headers=None): + """ + Removes role mappings. + ``_ + + :arg name: Role-mapping name + :arg refresh: If `true` (the default) then refresh the affected + shards to make this operation visible to search, if `wait_for` then wait + for a refresh to make this operation visible to search, if `false` then + do nothing with refreshes. Valid choices: true, false, wait_for + """ + if name in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument 'name'.") + + return self.transport.perform_request( + "DELETE", + _make_path("_security", "role_mapping", name), + params=params, + headers=headers, + ) + + @query_params("refresh") + def delete_user(self, username, params=None, headers=None): + """ + Deletes users from the native realm. + ``_ + + :arg username: username + :arg refresh: If `true` (the default) then refresh the affected + shards to make this operation visible to search, if `wait_for` then wait + for a refresh to make this operation visible to search, if `false` then + do nothing with refreshes. Valid choices: true, false, wait_for + """ + if username in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument 'username'.") + + return self.transport.perform_request( + "DELETE", + _make_path("_security", "user", username), + params=params, + headers=headers, + ) + + @query_params("refresh") + def disable_user(self, username, params=None, headers=None): + """ + Disables users in the native realm. + ``_ + + :arg username: The username of the user to disable + :arg refresh: If `true` (the default) then refresh the affected + shards to make this operation visible to search, if `wait_for` then wait + for a refresh to make this operation visible to search, if `false` then + do nothing with refreshes. Valid choices: true, false, wait_for + """ + if username in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument 'username'.") + + return self.transport.perform_request( + "PUT", + _make_path("_security", "user", username, "_disable"), + params=params, + headers=headers, + ) + + @query_params("refresh") + def enable_user(self, username, params=None, headers=None): + """ + Enables users in the native realm. + ``_ + + :arg username: The username of the user to enable + :arg refresh: If `true` (the default) then refresh the affected + shards to make this operation visible to search, if `wait_for` then wait + for a refresh to make this operation visible to search, if `false` then + do nothing with refreshes. Valid choices: true, false, wait_for + """ + if username in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument 'username'.") + + return self.transport.perform_request( + "PUT", + _make_path("_security", "user", username, "_enable"), + params=params, + headers=headers, + ) + + @query_params("id", "name", "owner", "realm_name", "username") + def get_api_key(self, params=None, headers=None): + """ + Retrieves information for one or more API keys. + ``_ + + :arg id: API key id of the API key to be retrieved + :arg name: API key name of the API key to be retrieved + :arg owner: flag to query API keys owned by the currently + authenticated user + :arg realm_name: realm name of the user who created this API key + to be retrieved + :arg username: user name of the user who created this API key to + be retrieved + """ + return self.transport.perform_request( + "GET", "/_security/api_key", params=params, headers=headers + ) + + @query_params() + def get_privileges(self, application=None, name=None, params=None, headers=None): + """ + Retrieves application privileges. + ``_ + + :arg application: Application name + :arg name: Privilege name + """ + return self.transport.perform_request( + "GET", + _make_path("_security", "privilege", application, name), + params=params, + headers=headers, + ) + + @query_params() + def get_role(self, name=None, params=None, headers=None): + """ + Retrieves roles in the native realm. + ``_ + + :arg name: Role name + """ + return self.transport.perform_request( + "GET", _make_path("_security", "role", name), params=params, headers=headers + ) + + @query_params() + def get_role_mapping(self, name=None, params=None, headers=None): + """ + Retrieves role mappings. + ``_ + + :arg name: Role-Mapping name + """ + return self.transport.perform_request( + "GET", + _make_path("_security", "role_mapping", name), + params=params, + headers=headers, + ) + + @query_params() + def get_token(self, body, params=None, headers=None): + """ + Creates a bearer token for access without requiring basic authentication. + ``_ + + :arg body: The token request to get + """ + if body in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument 'body'.") + + return self.transport.perform_request( + "POST", "/_security/oauth2/token", params=params, headers=headers, body=body + ) + + @query_params() + def get_user(self, username=None, params=None, headers=None): + """ + Retrieves information about users in the native realm and built-in users. + ``_ + + :arg username: A comma-separated list of usernames + """ + return self.transport.perform_request( + "GET", + _make_path("_security", "user", username), + params=params, + headers=headers, + ) + + @query_params() + def get_user_privileges(self, params=None, headers=None): + """ + Retrieves application privileges. + ``_ + """ + return self.transport.perform_request( + "GET", "/_security/user/_privileges", params=params, headers=headers + ) + + @query_params() + def has_privileges(self, body, user=None, params=None, headers=None): + """ + Determines whether the specified user has a specified list of privileges. + ``_ + + :arg body: The privileges to test + :arg user: Username + """ + if body in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument 'body'.") + + return self.transport.perform_request( + "POST", + _make_path("_security", "user", user, "_has_privileges"), + params=params, + headers=headers, + body=body, + ) + + @query_params() + def invalidate_api_key(self, body, params=None, headers=None): + """ + Invalidates one or more API keys. + ``_ + + :arg body: The api key request to invalidate API key(s) + """ + if body in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument 'body'.") + + return self.transport.perform_request( + "DELETE", "/_security/api_key", params=params, headers=headers, body=body + ) + + @query_params() + def invalidate_token(self, body, params=None, headers=None): + """ + Invalidates one or more access tokens or refresh tokens. + ``_ + + :arg body: The token to invalidate + """ + if body in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument 'body'.") + + return self.transport.perform_request( + "DELETE", + "/_security/oauth2/token", + params=params, + headers=headers, + body=body, + ) + + @query_params("refresh") + def put_privileges(self, body, params=None, headers=None): + """ + Adds or updates application privileges. + ``_ + + :arg body: The privilege(s) to add + :arg refresh: If `true` (the default) then refresh the affected + shards to make this operation visible to search, if `wait_for` then wait + for a refresh to make this operation visible to search, if `false` then + do nothing with refreshes. Valid choices: true, false, wait_for + """ + if body in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument 'body'.") + + return self.transport.perform_request( + "PUT", "/_security/privilege/", params=params, headers=headers, body=body + ) + + @query_params("refresh") + def put_role(self, name, body, params=None, headers=None): + """ + Adds and updates roles in the native realm. + ``_ + + :arg name: Role name + :arg body: The role to add + :arg refresh: If `true` (the default) then refresh the affected + shards to make this operation visible to search, if `wait_for` then wait + for a refresh to make this operation visible to search, if `false` then + do nothing with refreshes. Valid choices: true, false, wait_for + """ + for param in (name, body): + if param in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument.") + + return self.transport.perform_request( + "PUT", + _make_path("_security", "role", name), + params=params, + headers=headers, + body=body, + ) + + @query_params("refresh") + def put_role_mapping(self, name, body, params=None, headers=None): + """ + Creates and updates role mappings. + ``_ + + :arg name: Role-mapping name + :arg body: The role mapping to add + :arg refresh: If `true` (the default) then refresh the affected + shards to make this operation visible to search, if `wait_for` then wait + for a refresh to make this operation visible to search, if `false` then + do nothing with refreshes. Valid choices: true, false, wait_for + """ + for param in (name, body): + if param in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument.") + + return self.transport.perform_request( + "PUT", + _make_path("_security", "role_mapping", name), + params=params, + headers=headers, + body=body, + ) + + @query_params("refresh") + def put_user(self, username, body, params=None, headers=None): + """ + Adds and updates users in the native realm. These users are commonly referred + to as native users. + ``_ + + :arg username: The username of the User + :arg body: The user to add + :arg refresh: If `true` (the default) then refresh the affected + shards to make this operation visible to search, if `wait_for` then wait + for a refresh to make this operation visible to search, if `false` then + do nothing with refreshes. Valid choices: true, false, wait_for + """ + for param in (username, body): + if param in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument.") + + return self.transport.perform_request( + "PUT", + _make_path("_security", "user", username), + params=params, + headers=headers, + body=body, + ) + + @query_params() + def get_builtin_privileges(self, params=None, headers=None): + """ + Retrieves the list of cluster privileges and index privileges that are + available in this version of Elasticsearch. + ``_ + """ + return self.transport.perform_request( + "GET", "/_security/privilege/_builtin", params=params, headers=headers + ) diff --git a/Python3.6-ClsToElasticSearch/src/elasticsearch/client/slm.py b/Python3.6-ClsToElasticSearch/src/elasticsearch/client/slm.py new file mode 100755 index 000000000..576928f39 --- /dev/null +++ b/Python3.6-ClsToElasticSearch/src/elasticsearch/client/slm.py @@ -0,0 +1,131 @@ +from .utils import NamespacedClient, query_params, _make_path, SKIP_IN_PATH + + +class SlmClient(NamespacedClient): + @query_params() + def delete_lifecycle(self, policy_id, params=None, headers=None): + """ + Deletes an existing snapshot lifecycle policy. + ``_ + + :arg policy_id: The id of the snapshot lifecycle policy to + remove + """ + if policy_id in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument 'policy_id'.") + + return self.transport.perform_request( + "DELETE", + _make_path("_slm", "policy", policy_id), + params=params, + headers=headers, + ) + + @query_params() + def execute_lifecycle(self, policy_id, params=None, headers=None): + """ + Immediately creates a snapshot according to the lifecycle policy, without + waiting for the scheduled time. + ``_ + + :arg policy_id: The id of the snapshot lifecycle policy to be + executed + """ + if policy_id in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument 'policy_id'.") + + return self.transport.perform_request( + "PUT", + _make_path("_slm", "policy", policy_id, "_execute"), + params=params, + headers=headers, + ) + + @query_params() + def execute_retention(self, params=None, headers=None): + """ + Deletes any snapshots that are expired according to the policy's retention + rules. + ``_ + """ + return self.transport.perform_request( + "POST", "/_slm/_execute_retention", params=params, headers=headers + ) + + @query_params() + def get_lifecycle(self, policy_id=None, params=None, headers=None): + """ + Retrieves one or more snapshot lifecycle policy definitions and information + about the latest snapshot attempts. + ``_ + + :arg policy_id: Comma-separated list of snapshot lifecycle + policies to retrieve + """ + return self.transport.perform_request( + "GET", + _make_path("_slm", "policy", policy_id), + params=params, + headers=headers, + ) + + @query_params() + def get_stats(self, params=None, headers=None): + """ + Returns global and policy-level statistics about actions taken by snapshot + lifecycle management. + ``_ + """ + return self.transport.perform_request( + "GET", "/_slm/stats", params=params, headers=headers + ) + + @query_params() + def put_lifecycle(self, policy_id, body=None, params=None, headers=None): + """ + Creates or updates a snapshot lifecycle policy. + ``_ + + :arg policy_id: The id of the snapshot lifecycle policy + :arg body: The snapshot lifecycle policy definition to register + """ + if policy_id in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument 'policy_id'.") + + return self.transport.perform_request( + "PUT", + _make_path("_slm", "policy", policy_id), + params=params, + headers=headers, + body=body, + ) + + @query_params() + def get_status(self, params=None, headers=None): + """ + Retrieves the status of snapshot lifecycle management (SLM). + ``_ + """ + return self.transport.perform_request( + "GET", "/_slm/status", params=params, headers=headers + ) + + @query_params() + def start(self, params=None, headers=None): + """ + Turns on snapshot lifecycle management (SLM). + ``_ + """ + return self.transport.perform_request( + "POST", "/_slm/start", params=params, headers=headers + ) + + @query_params() + def stop(self, params=None, headers=None): + """ + Turns off snapshot lifecycle management (SLM). + ``_ + """ + return self.transport.perform_request( + "POST", "/_slm/stop", params=params, headers=headers + ) diff --git a/Python3.6-ClsToElasticSearch/src/elasticsearch/client/snapshot.py b/Python3.6-ClsToElasticSearch/src/elasticsearch/client/snapshot.py new file mode 100755 index 000000000..deda62f80 --- /dev/null +++ b/Python3.6-ClsToElasticSearch/src/elasticsearch/client/snapshot.py @@ -0,0 +1,229 @@ +from .utils import NamespacedClient, query_params, _make_path, SKIP_IN_PATH + + +class SnapshotClient(NamespacedClient): + @query_params("master_timeout", "wait_for_completion") + def create(self, repository, snapshot, body=None, params=None, headers=None): + """ + Creates a snapshot in a repository. + ``_ + + :arg repository: A repository name + :arg snapshot: A snapshot name + :arg body: The snapshot definition + :arg master_timeout: Explicit operation timeout for connection + to master node + :arg wait_for_completion: Should this request wait until the + operation has completed before returning + """ + for param in (repository, snapshot): + if param in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument.") + + return self.transport.perform_request( + "PUT", + _make_path("_snapshot", repository, snapshot), + params=params, + headers=headers, + body=body, + ) + + @query_params("master_timeout") + def delete(self, repository, snapshot, params=None, headers=None): + """ + Deletes a snapshot. + ``_ + + :arg repository: A repository name + :arg snapshot: A snapshot name + :arg master_timeout: Explicit operation timeout for connection + to master node + """ + for param in (repository, snapshot): + if param in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument.") + + return self.transport.perform_request( + "DELETE", + _make_path("_snapshot", repository, snapshot), + params=params, + headers=headers, + ) + + @query_params("ignore_unavailable", "master_timeout", "verbose") + def get(self, repository, snapshot, params=None, headers=None): + """ + Returns information about a snapshot. + ``_ + + :arg repository: A repository name + :arg snapshot: A comma-separated list of snapshot names + :arg ignore_unavailable: Whether to ignore unavailable + snapshots, defaults to false which means a SnapshotMissingException is + thrown + :arg master_timeout: Explicit operation timeout for connection + to master node + :arg verbose: Whether to show verbose snapshot info or only show + the basic info found in the repository index blob + """ + for param in (repository, snapshot): + if param in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument.") + + return self.transport.perform_request( + "GET", + _make_path("_snapshot", repository, snapshot), + params=params, + headers=headers, + ) + + @query_params("master_timeout", "timeout") + def delete_repository(self, repository, params=None, headers=None): + """ + Deletes a repository. + ``_ + + :arg repository: A comma-separated list of repository names + :arg master_timeout: Explicit operation timeout for connection + to master node + :arg timeout: Explicit operation timeout + """ + if repository in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument 'repository'.") + + return self.transport.perform_request( + "DELETE", + _make_path("_snapshot", repository), + params=params, + headers=headers, + ) + + @query_params("local", "master_timeout") + def get_repository(self, repository=None, params=None, headers=None): + """ + Returns information about a repository. + ``_ + + :arg repository: A comma-separated list of repository names + :arg local: Return local information, do not retrieve the state + from master node (default: false) + :arg master_timeout: Explicit operation timeout for connection + to master node + """ + return self.transport.perform_request( + "GET", _make_path("_snapshot", repository), params=params, headers=headers + ) + + @query_params("master_timeout", "timeout", "verify") + def create_repository(self, repository, body, params=None, headers=None): + """ + Creates a repository. + ``_ + + :arg repository: A repository name + :arg body: The repository definition + :arg master_timeout: Explicit operation timeout for connection + to master node + :arg timeout: Explicit operation timeout + :arg verify: Whether to verify the repository after creation + """ + for param in (repository, body): + if param in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument.") + + return self.transport.perform_request( + "PUT", + _make_path("_snapshot", repository), + params=params, + headers=headers, + body=body, + ) + + @query_params("master_timeout", "wait_for_completion") + def restore(self, repository, snapshot, body=None, params=None, headers=None): + """ + Restores a snapshot. + ``_ + + :arg repository: A repository name + :arg snapshot: A snapshot name + :arg body: Details of what to restore + :arg master_timeout: Explicit operation timeout for connection + to master node + :arg wait_for_completion: Should this request wait until the + operation has completed before returning + """ + for param in (repository, snapshot): + if param in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument.") + + return self.transport.perform_request( + "POST", + _make_path("_snapshot", repository, snapshot, "_restore"), + params=params, + headers=headers, + body=body, + ) + + @query_params("ignore_unavailable", "master_timeout") + def status(self, repository=None, snapshot=None, params=None, headers=None): + """ + Returns information about the status of a snapshot. + ``_ + + :arg repository: A repository name + :arg snapshot: A comma-separated list of snapshot names + :arg ignore_unavailable: Whether to ignore unavailable + snapshots, defaults to false which means a SnapshotMissingException is + thrown + :arg master_timeout: Explicit operation timeout for connection + to master node + """ + return self.transport.perform_request( + "GET", + _make_path("_snapshot", repository, snapshot, "_status"), + params=params, + headers=headers, + ) + + @query_params("master_timeout", "timeout") + def verify_repository(self, repository, params=None, headers=None): + """ + Verifies a repository. + ``_ + + :arg repository: A repository name + :arg master_timeout: Explicit operation timeout for connection + to master node + :arg timeout: Explicit operation timeout + """ + if repository in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument 'repository'.") + + return self.transport.perform_request( + "POST", + _make_path("_snapshot", repository, "_verify"), + params=params, + headers=headers, + ) + + @query_params("master_timeout", "timeout") + def cleanup_repository(self, repository, params=None, headers=None): + """ + Removes stale data from repository. + ``_ + + :arg repository: A repository name + :arg master_timeout: Explicit operation timeout for connection + to master node + :arg timeout: Explicit operation timeout + """ + if repository in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument 'repository'.") + + return self.transport.perform_request( + "POST", + _make_path("_snapshot", repository, "_cleanup"), + params=params, + headers=headers, + ) diff --git a/Python3.6-ClsToElasticSearch/src/elasticsearch/client/sql.py b/Python3.6-ClsToElasticSearch/src/elasticsearch/client/sql.py new file mode 100755 index 000000000..eb79e8b47 --- /dev/null +++ b/Python3.6-ClsToElasticSearch/src/elasticsearch/client/sql.py @@ -0,0 +1,52 @@ +from .utils import NamespacedClient, query_params, SKIP_IN_PATH + + +class SqlClient(NamespacedClient): + @query_params() + def clear_cursor(self, body, params=None, headers=None): + """ + Clears the SQL cursor + ``_ + + :arg body: Specify the cursor value in the `cursor` element to + clean the cursor. + """ + if body in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument 'body'.") + + return self.transport.perform_request( + "POST", "/_sql/close", params=params, headers=headers, body=body + ) + + @query_params("format") + def query(self, body, params=None, headers=None): + """ + Executes a SQL request + ``_ + + :arg body: Use the `query` element to start a query. Use the + `cursor` element to continue a query. + :arg format: a short version of the Accept header, e.g. json, + yaml + """ + if body in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument 'body'.") + + return self.transport.perform_request( + "POST", "/_sql", params=params, headers=headers, body=body + ) + + @query_params() + def translate(self, body, params=None, headers=None): + """ + Translates SQL into Elasticsearch queries + ``_ + + :arg body: Specify the query in the `query` element. + """ + if body in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument 'body'.") + + return self.transport.perform_request( + "POST", "/_sql/translate", params=params, headers=headers, body=body + ) diff --git a/Python3.6-ClsToElasticSearch/src/elasticsearch/client/ssl.py b/Python3.6-ClsToElasticSearch/src/elasticsearch/client/ssl.py new file mode 100755 index 000000000..da6fa0b77 --- /dev/null +++ b/Python3.6-ClsToElasticSearch/src/elasticsearch/client/ssl.py @@ -0,0 +1,14 @@ +from .utils import NamespacedClient, query_params + + +class SslClient(NamespacedClient): + @query_params() + def certificates(self, params=None, headers=None): + """ + Retrieves information about the X.509 certificates used to encrypt + communications in the cluster. + ``_ + """ + return self.transport.perform_request( + "GET", "/_ssl/certificates", params=params, headers=headers + ) diff --git a/Python3.6-ClsToElasticSearch/src/elasticsearch/client/tasks.py b/Python3.6-ClsToElasticSearch/src/elasticsearch/client/tasks.py new file mode 100755 index 000000000..89d8c6ef3 --- /dev/null +++ b/Python3.6-ClsToElasticSearch/src/elasticsearch/client/tasks.py @@ -0,0 +1,83 @@ +import warnings +from .utils import NamespacedClient, query_params, _make_path, SKIP_IN_PATH + + +class TasksClient(NamespacedClient): + @query_params( + "actions", + "detailed", + "group_by", + "nodes", + "parent_task_id", + "timeout", + "wait_for_completion", + ) + def list(self, params=None, headers=None): + """ + Returns a list of tasks. + ``_ + + :arg actions: A comma-separated list of actions that should be + returned. Leave empty to return all. + :arg detailed: Return detailed task information (default: false) + :arg group_by: Group tasks by nodes or parent/child + relationships Valid choices: nodes, parents, none Default: nodes + :arg nodes: A comma-separated list of node IDs or names to limit + the returned information; use `_local` to return information from the + node you're connecting to, leave empty to get information from all nodes + :arg parent_task_id: Return tasks with specified parent task id + (node_id:task_number). Set to -1 to return all. + :arg timeout: Explicit operation timeout + :arg wait_for_completion: Wait for the matching tasks to + complete (default: false) + """ + return self.transport.perform_request( + "GET", "/_tasks", params=params, headers=headers + ) + + @query_params("actions", "nodes", "parent_task_id") + def cancel(self, task_id=None, params=None, headers=None): + """ + Cancels a task, if it can be cancelled through an API. + ``_ + + :arg task_id: Cancel the task with specified task id + (node_id:task_number) + :arg actions: A comma-separated list of actions that should be + cancelled. Leave empty to cancel all. + :arg nodes: A comma-separated list of node IDs or names to limit + the returned information; use `_local` to return information from the + node you're connecting to, leave empty to get information from all nodes + :arg parent_task_id: Cancel tasks with specified parent task id + (node_id:task_number). Set to -1 to cancel all. + """ + return self.transport.perform_request( + "POST", + _make_path("_tasks", task_id, "_cancel"), + params=params, + headers=headers, + ) + + @query_params("timeout", "wait_for_completion") + def get(self, task_id=None, params=None, headers=None): + """ + Returns information about a task. + ``_ + + :arg task_id: Return the task with specified id + (node_id:task_number) + :arg timeout: Explicit operation timeout + :arg wait_for_completion: Wait for the matching tasks to + complete (default: false) + """ + if task_id in SKIP_IN_PATH: + warnings.warn( + "Calling client.tasks.get() without a task_id is deprecated " + "and will be removed in v8.0. Use client.tasks.list() instead.", + category=DeprecationWarning, + stacklevel=3, + ) + + return self.transport.perform_request( + "GET", _make_path("_tasks", task_id), params=params, headers=headers + ) diff --git a/Python3.6-ClsToElasticSearch/src/elasticsearch/client/transform.py b/Python3.6-ClsToElasticSearch/src/elasticsearch/client/transform.py new file mode 100755 index 000000000..92f472f8b --- /dev/null +++ b/Python3.6-ClsToElasticSearch/src/elasticsearch/client/transform.py @@ -0,0 +1,204 @@ +from .utils import NamespacedClient, query_params, _make_path, SKIP_IN_PATH + + +class TransformClient(NamespacedClient): + @query_params("force") + def delete_transform(self, transform_id, params=None, headers=None): + """ + Deletes an existing transform. + ``_ + + :arg transform_id: The id of the transform to delete + :arg force: When `true`, the transform is deleted regardless of + its current state. The default value is `false`, meaning that the + transform must be `stopped` before it can be deleted. + """ + if transform_id in SKIP_IN_PATH: + raise ValueError( + "Empty value passed for a required argument 'transform_id'." + ) + + return self.transport.perform_request( + "DELETE", + _make_path("_transform", transform_id), + params=params, + headers=headers, + ) + + @query_params("allow_no_match", "from_", "size") + def get_transform(self, transform_id=None, params=None, headers=None): + """ + Retrieves configuration information for transforms. + ``_ + + :arg transform_id: The id or comma delimited list of id + expressions of the transforms to get, '_all' or '*' implies get all + transforms + :arg allow_no_match: Whether to ignore if a wildcard expression + matches no transforms. (This includes `_all` string or when no + transforms have been specified) + :arg from_: skips a number of transform configs, defaults to 0 + :arg size: specifies a max number of transforms to get, defaults + to 100 + """ + # from is a reserved word so it cannot be used, use from_ instead + if "from_" in params: + params["from"] = params.pop("from_") + + return self.transport.perform_request( + "GET", + _make_path("_transform", transform_id), + params=params, + headers=headers, + ) + + @query_params("allow_no_match", "from_", "size") + def get_transform_stats(self, transform_id, params=None, headers=None): + """ + Retrieves usage information for transforms. + ``_ + + :arg transform_id: The id of the transform for which to get + stats. '_all' or '*' implies all transforms + :arg allow_no_match: Whether to ignore if a wildcard expression + matches no transforms. (This includes `_all` string or when no + transforms have been specified) + :arg from_: skips a number of transform stats, defaults to 0 + :arg size: specifies a max number of transform stats to get, + defaults to 100 + """ + # from is a reserved word so it cannot be used, use from_ instead + if "from_" in params: + params["from"] = params.pop("from_") + + if transform_id in SKIP_IN_PATH: + raise ValueError( + "Empty value passed for a required argument 'transform_id'." + ) + + return self.transport.perform_request( + "GET", + _make_path("_transform", transform_id, "_stats"), + params=params, + headers=headers, + ) + + @query_params() + def preview_transform(self, body, params=None, headers=None): + """ + Previews a transform. + ``_ + + :arg body: The definition for the transform to preview + """ + if body in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument 'body'.") + + return self.transport.perform_request( + "POST", "/_transform/_preview", params=params, headers=headers, body=body + ) + + @query_params("defer_validation") + def put_transform(self, transform_id, body, params=None, headers=None): + """ + Instantiates a transform. + ``_ + + :arg transform_id: The id of the new transform. + :arg body: The transform definition + :arg defer_validation: If validations should be deferred until + transform starts, defaults to false. + """ + for param in (transform_id, body): + if param in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument.") + + return self.transport.perform_request( + "PUT", + _make_path("_transform", transform_id), + params=params, + headers=headers, + body=body, + ) + + @query_params("timeout") + def start_transform(self, transform_id, params=None, headers=None): + """ + Starts one or more transforms. + ``_ + + :arg transform_id: The id of the transform to start + :arg timeout: Controls the time to wait for the transform to + start + """ + if transform_id in SKIP_IN_PATH: + raise ValueError( + "Empty value passed for a required argument 'transform_id'." + ) + + return self.transport.perform_request( + "POST", + _make_path("_transform", transform_id, "_start"), + params=params, + headers=headers, + ) + + @query_params( + "allow_no_match", + "force", + "timeout", + "wait_for_checkpoint", + "wait_for_completion", + ) + def stop_transform(self, transform_id, params=None, headers=None): + """ + Stops one or more transforms. + ``_ + + :arg transform_id: The id of the transform to stop + :arg allow_no_match: Whether to ignore if a wildcard expression + matches no transforms. (This includes `_all` string or when no + transforms have been specified) + :arg force: Whether to force stop a failed transform or not. + Default to false + :arg timeout: Controls the time to wait until the transform has + stopped. Default to 30 seconds + :arg wait_for_checkpoint: Whether to wait for the transform to + reach a checkpoint before stopping. Default to false + :arg wait_for_completion: Whether to wait for the transform to + fully stop before returning or not. Default to false + """ + if transform_id in SKIP_IN_PATH: + raise ValueError( + "Empty value passed for a required argument 'transform_id'." + ) + + return self.transport.perform_request( + "POST", + _make_path("_transform", transform_id, "_stop"), + params=params, + headers=headers, + ) + + @query_params("defer_validation") + def update_transform(self, transform_id, body, params=None, headers=None): + """ + Updates certain properties of a transform. + ``_ + + :arg transform_id: The id of the transform. + :arg body: The update transform definition + :arg defer_validation: If validations should be deferred until + transform starts, defaults to false. + """ + for param in (transform_id, body): + if param in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument.") + + return self.transport.perform_request( + "POST", + _make_path("_transform", transform_id, "_update"), + params=params, + headers=headers, + body=body, + ) diff --git a/Python3.6-ClsToElasticSearch/src/elasticsearch/client/utils.py b/Python3.6-ClsToElasticSearch/src/elasticsearch/client/utils.py new file mode 100755 index 000000000..3baa7666b --- /dev/null +++ b/Python3.6-ClsToElasticSearch/src/elasticsearch/client/utils.py @@ -0,0 +1,125 @@ +from __future__ import unicode_literals + +import weakref +from datetime import date, datetime +from functools import wraps +from ..compat import string_types, quote, PY2 + +# parts of URL to be omitted +SKIP_IN_PATH = (None, "", b"", [], ()) + + +def _escape(value): + """ + Escape a single value of a URL string or a query parameter. If it is a list + or tuple, turn it into a comma-separated string first. + """ + + # make sequences into comma-separated stings + if isinstance(value, (list, tuple)): + value = ",".join(value) + + # dates and datetimes into isoformat + elif isinstance(value, (date, datetime)): + value = value.isoformat() + + # make bools into true/false strings + elif isinstance(value, bool): + value = str(value).lower() + + # don't decode bytestrings + elif isinstance(value, bytes): + return value + + # encode strings to utf-8 + if isinstance(value, string_types): + if PY2 and isinstance(value, unicode): # noqa: F821 + return value.encode("utf-8") + if not PY2 and isinstance(value, str): + return value.encode("utf-8") + + return str(value) + + +def _make_path(*parts): + """ + Create a URL string from parts, omit all `None` values and empty strings. + Convert lists and tuples to comma separated values. + """ + # TODO: maybe only allow some parts to be lists/tuples ? + return "/" + "/".join( + # preserve ',' and '*' in url for nicer URLs in logs + quote(_escape(p), b",*") + for p in parts + if p not in SKIP_IN_PATH + ) + + +# parameters that apply to all methods +GLOBAL_PARAMS = ("pretty", "human", "error_trace", "format", "filter_path") + + +def query_params(*es_query_params): + """ + Decorator that pops all accepted parameters from method's kwargs and puts + them in the params argument. + """ + + def _wrapper(func): + @wraps(func) + def _wrapped(*args, **kwargs): + params = {} + headers = {} + if "params" in kwargs: + params = kwargs.pop("params").copy() + if "headers" in kwargs: + headers = { + k.lower(): v for k, v in (kwargs.pop("headers") or {}).items() + } + if "opaque_id" in kwargs: + headers["x-opaque-id"] = kwargs.pop("opaque_id") + + for p in es_query_params + GLOBAL_PARAMS: + if p in kwargs: + v = kwargs.pop(p) + if v is not None: + params[p] = _escape(v) + + # don't treat ignore, request_timeout, and opaque_id as other params to avoid escaping + for p in ("ignore", "request_timeout"): + if p in kwargs: + params[p] = kwargs.pop(p) + return func(*args, params=params, headers=headers, **kwargs) + + return _wrapped + + return _wrapper + + +def _bulk_body(serializer, body): + # if not passed in a string, serialize items and join by newline + if not isinstance(body, string_types): + body = "\n".join(map(serializer.dumps, body)) + + # bulk body must end with a newline + if not body.endswith("\n"): + body += "\n" + + return body + + +class NamespacedClient(object): + def __init__(self, client): + self.client = client + + @property + def transport(self): + return self.client.transport + + +class AddonClient(NamespacedClient): + @classmethod + def infect_client(cls, client): + addon = cls(weakref.proxy(client)) + setattr(client, cls.namespace, addon) + return client diff --git a/Python3.6-ClsToElasticSearch/src/elasticsearch/client/watcher.py b/Python3.6-ClsToElasticSearch/src/elasticsearch/client/watcher.py new file mode 100755 index 000000000..3a3450dd0 --- /dev/null +++ b/Python3.6-ClsToElasticSearch/src/elasticsearch/client/watcher.py @@ -0,0 +1,176 @@ +from .utils import NamespacedClient, query_params, _make_path, SKIP_IN_PATH + + +class WatcherClient(NamespacedClient): + @query_params() + def ack_watch(self, watch_id, action_id=None, params=None, headers=None): + """ + Acknowledges a watch, manually throttling the execution of the watch's actions. + ``_ + + :arg watch_id: Watch ID + :arg action_id: A comma-separated list of the action ids to be + acked + """ + if watch_id in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument 'watch_id'.") + + return self.transport.perform_request( + "PUT", + _make_path("_watcher", "watch", watch_id, "_ack", action_id), + params=params, + headers=headers, + ) + + @query_params() + def activate_watch(self, watch_id, params=None, headers=None): + """ + Activates a currently inactive watch. + ``_ + + :arg watch_id: Watch ID + """ + if watch_id in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument 'watch_id'.") + + return self.transport.perform_request( + "PUT", + _make_path("_watcher", "watch", watch_id, "_activate"), + params=params, + headers=headers, + ) + + @query_params() + def deactivate_watch(self, watch_id, params=None, headers=None): + """ + Deactivates a currently active watch. + ``_ + + :arg watch_id: Watch ID + """ + if watch_id in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument 'watch_id'.") + + return self.transport.perform_request( + "PUT", + _make_path("_watcher", "watch", watch_id, "_deactivate"), + params=params, + headers=headers, + ) + + @query_params() + def delete_watch(self, id, params=None, headers=None): + """ + Removes a watch from Watcher. + ``_ + + :arg id: Watch ID + """ + if id in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument 'id'.") + + return self.transport.perform_request( + "DELETE", + _make_path("_watcher", "watch", id), + params=params, + headers=headers, + ) + + @query_params("debug") + def execute_watch(self, body=None, id=None, params=None, headers=None): + """ + Forces the execution of a stored watch. + ``_ + + :arg body: Execution control + :arg id: Watch ID + :arg debug: indicates whether the watch should execute in debug + mode + """ + return self.transport.perform_request( + "PUT", + _make_path("_watcher", "watch", id, "_execute"), + params=params, + headers=headers, + body=body, + ) + + @query_params() + def get_watch(self, id, params=None, headers=None): + """ + Retrieves a watch by its ID. + ``_ + + :arg id: Watch ID + """ + if id in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument 'id'.") + + return self.transport.perform_request( + "GET", _make_path("_watcher", "watch", id), params=params, headers=headers + ) + + @query_params("active", "if_primary_term", "if_seq_no", "version") + def put_watch(self, id, body=None, params=None, headers=None): + """ + Creates a new watch, or updates an existing one. + ``_ + + :arg id: Watch ID + :arg body: The watch + :arg active: Specify whether the watch is in/active by default + :arg if_primary_term: only update the watch if the last + operation that has changed the watch has the specified primary term + :arg if_seq_no: only update the watch if the last operation that + has changed the watch has the specified sequence number + :arg version: Explicit version number for concurrency control + """ + if id in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument 'id'.") + + return self.transport.perform_request( + "PUT", + _make_path("_watcher", "watch", id), + params=params, + headers=headers, + body=body, + ) + + @query_params() + def start(self, params=None, headers=None): + """ + Starts Watcher if it is not already running. + ``_ + """ + return self.transport.perform_request( + "POST", "/_watcher/_start", params=params, headers=headers + ) + + @query_params("emit_stacktraces") + def stats(self, metric=None, params=None, headers=None): + """ + Retrieves the current Watcher metrics. + ``_ + + :arg metric: Controls what additional stat metrics should be + include in the response Valid choices: _all, queued_watches, + current_watches, pending_watches + :arg emit_stacktraces: Emits stack traces of currently running + watches + """ + return self.transport.perform_request( + "GET", + _make_path("_watcher", "stats", metric), + params=params, + headers=headers, + ) + + @query_params() + def stop(self, params=None, headers=None): + """ + Stops Watcher if it is running. + ``_ + """ + return self.transport.perform_request( + "POST", "/_watcher/_stop", params=params, headers=headers + ) diff --git a/Python3.6-ClsToElasticSearch/src/elasticsearch/client/xpack.py b/Python3.6-ClsToElasticSearch/src/elasticsearch/client/xpack.py new file mode 100755 index 000000000..2e1a85b45 --- /dev/null +++ b/Python3.6-ClsToElasticSearch/src/elasticsearch/client/xpack.py @@ -0,0 +1,32 @@ +from .utils import NamespacedClient, query_params + + +class XPackClient(NamespacedClient): + def __getattr__(self, attr_name): + return getattr(self.client, attr_name) + + # AUTO-GENERATED-API-DEFINITIONS # + @query_params("categories") + def info(self, params=None, headers=None): + """ + Retrieves information about the installed X-Pack features. + ``_ + + :arg categories: Comma-separated list of info categories. Can be + any of: build, license, features + """ + return self.transport.perform_request( + "GET", "/_xpack", params=params, headers=headers + ) + + @query_params("master_timeout") + def usage(self, params=None, headers=None): + """ + Retrieves usage information about the installed X-Pack features. + ``_ + + :arg master_timeout: Specify timeout for watch write operation + """ + return self.transport.perform_request( + "GET", "/_xpack/usage", params=params, headers=headers + ) diff --git a/Python3.6-ClsToElasticSearch/src/elasticsearch/compat.py b/Python3.6-ClsToElasticSearch/src/elasticsearch/compat.py new file mode 100755 index 000000000..aba63ea73 --- /dev/null +++ b/Python3.6-ClsToElasticSearch/src/elasticsearch/compat.py @@ -0,0 +1,27 @@ +import sys + +PY2 = sys.version_info[0] == 2 + +if PY2: + string_types = (basestring,) # noqa: F821 + from urllib import quote_plus, quote, urlencode, unquote + from urlparse import urlparse + from itertools import imap as map + from Queue import Queue +else: + string_types = str, bytes + from urllib.parse import quote, quote_plus, urlencode, urlparse, unquote + + map = map + from queue import Queue + +__all__ = [ + "string_types", + "quote_plus", + "quote", + "urlencode", + "unquote", + "urlparse", + "map", + "Queue", +] diff --git a/Python3.6-ClsToElasticSearch/src/elasticsearch/connection/__init__.py b/Python3.6-ClsToElasticSearch/src/elasticsearch/connection/__init__.py new file mode 100755 index 000000000..e56e541d5 --- /dev/null +++ b/Python3.6-ClsToElasticSearch/src/elasticsearch/connection/__init__.py @@ -0,0 +1,10 @@ +from .base import Connection +from .http_requests import RequestsHttpConnection +from .http_urllib3 import Urllib3HttpConnection, create_ssl_context + +__all__ = [ + "Connection", + "RequestsHttpConnection", + "Urllib3HttpConnection", + "create_ssl_context", +] diff --git a/Python3.6-ClsToElasticSearch/src/elasticsearch/connection/__pycache__/__init__.cpython-37.pyc b/Python3.6-ClsToElasticSearch/src/elasticsearch/connection/__pycache__/__init__.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..82fe50d2077cd87617ca64e48529ce277bb6c15c GIT binary patch literal 403 zcmY*VO-lnY5KZ=@-Igtgp#Py4bMWFpL~I35qSjt=3F#)ZE7_!GvS9HS`Iq+U$-m&q z$y!k-yDm$=~!K@mj_X(*>O^28)f%F~{wCTnt@ zHv>MvJX5(EsNp)}Lscxubo7C~WJPDH3YZ`Vf#l(x}&8xre2-? z7rMjBYZorE^584t9PSqsM>6ss?Yr=nvDaB=$e&LWW8Ft%3HI zl{eN!5X*id(z`+=BA;9B^+nR>A{yRK;%>M(5S-<&-Hig7h8@WR(b>Ag`+|3Z6w5pe zL>$JO`OF9F>-YV;k8a<+zrOn6+Q+n^zrUAEh41In{NNg+Zolt%^Iky zY=$*aJ8YI6LtSIX*&J$@onR+XPq9<%H0nCLz|OFtnLcTGq4d%%!2`A^gDzik)xevPOn&d^QL?#-W0yl#0T)J-m~c$0}gx_0&IfjJoEOBt=G z;doH=r2P(1G3L_W&2&9EE)xxoCpKyx6W2B?IP% zY{gs0t!~qsw|d9xilnzM7^-o}gS>stAJ~F1^?w=?7rD z+TmjkIZ3y`{m+AF!0ARr%7sUZAF4r%t8Bp|CHA%;8(gSGNUZNch)Fq>07ns|L`f3K z8{^&v4nvD#L}5jW-SgT+2-fc9+P7qaJIW5@VHRJ~=AYs#f2V6&=7@h9ZfDLeEbyNG z*ceZZomAg7&|)%U4{f^^)#x|($SzxdVVu!0u0Eb-dRE`DMU>TdD{8ih)~RtLgWcY7 z(B5E`9aBW9oi(28V*~ctv$V9DI$Z-c`fmywoz|HBT>nxpZFDu8&gxy`q&C)>Gd8oT zTI;uXuEzCp<=?W}*ht+BR`|*0PmX6YJDWL!zU`@O zX1BhlUDrPQuZQY=cbe?fo}ur*O=sF}V^32pHSAdXMi*DmHj}}=(+aF{UCXLyz0PLT zdlT>OZj0lr`D$9Rs7Du;b&VI=_jt&C<+%%;xZXEW>*K@ytLu$4+F; zOsCu8?F81HEBF3~OtZ9Y*hp$SslB5a)Nf(se`BXIYxg8OwWpub##Uxw=BccL@-)+6 zAy2P;r>~4EgS5N&?sxhn0TZdU&9~ovYjkO0U+5Re62(ahwJh8XDP}K7Q_e<}dAT$% z*Tet4sA+k%5J)+yL*~g4E~djiA2ox1KMD&kw7e~oc;v!hh{a$NL)z290Nzf7Bju$= zbyC7B(CR2i`DiBSba+2qO`iJYn@@N3)m_Vupdb1>eCQX{ z@}cFqi3xeNG^6?SA={KYWTDB5=N6e|+#T<@U@F3X?!GXhd9^fxt=Zg?JnD)C98kPY z1=ynrfGSPAK?R*$TtbmM`&`be<+*Zu=P5jh%yqwA16{x*pT+Gem;PW7vi4Njrp{vG zB7)4s-)rJcRNwu48J1MYj`+tbVv*^I24>Ew+SW(MVY*AGu(ZEx2R2WAs3zUGjjue0 z0@h61*0!lMNF$+3#@fia4oURp<&Bn^*L=T2Km&jZw;=xczT!wr7k_{i6upY#8eVCT z09^C^KchVe^l3*ARrGgMAG)cw@KuK*zNT8Hr#g(>+0>u|7RG|iC5)4~hj%J>jN-7D zAH;IdCpL2C!B+%On4t&#>g*`jJ1-SvnU?^cZYcNNmLTtt*Vduhg@WMV(1e-qG;wo-yWEN52vtxJ$MT+N#+Ot

#7AluFTh91Bve22{Kf^O|994}N< z=GC(6sBp~qig^D1cHiLe@+1na0@G(%x?6E!0_#=-X3^3|XAjEC;i(g0Atjus$PK&z zOTzX+I1Ewk4VKjPLX-BV)xQ zb17kn$S>lqAR+t(`BeCtAWa0E`ohS5Jh*!G+B;Awi%>*fVS`>@k^>GH=mFyxA)FGg z;C$j;6#M4LVnG@lB~zUuk2D?nwA4Rhw5+4h38xMdmG_ z9$i}ez*_*p6zn74u;LwseGA@b_Tne=@>B2f{L))p*>W$=17)})!@by=%59Zc5P3b6 zcVkjuhJYroma%Pat*+hs5LgINbOeAQeu+aWM3Y-#3_$t>?|J3U-CGadhh*@L!Dg#k z*iyU4fiM}+Q-FB-9*vuV0;dBB6AJX_l$iiLu>r&l_~WRwxIT{Is{{Tk=F1BxG=L7v zJP-Ilpu&JRGw?TO5)7dyLm!W_R8+F6k^}>x7p(Li!W8sckQpcN9VKyn@ z=$!YngmN8n0F*6dcMy3u@2`5u2}DbtS4IWU#?3epeDoUR0RNv9qyy024=`60H$NR& zH_$(_uA^LQ+2S-Vls9eyD2E$^w8RQFu@l%JM5oxH;xiO2M{!CJ4RQzPhrg%az====f3ykv5Rw3w~*D?}WdC zqObk`oHW;#ZvtRB{8z7il)3;Ivd4vA<=Yzx3w3%6$9#e!B)l0 z32*Zn_CvJy68&o)WKuT>qyND|k^&fQx`6bB#@S$X9e!?~;dNsUVZi8>?_oEs)G=ul ze8?dav{l;p*LcW`vB`jZhVN#ExIwl(Lu!MN(5}H?jloMZ1qMp3=jewvny~UWGU(+Q zZEQkwuR(Kxgp?B$x;BQr9yQbIt^;YQ5cdcfVTa0}Rvu}kUf0N_rm)wA&OSH4#0Y3> zWpo8RCImuu6@j=2>3Kpi1=W@N1^`;}*76Fp4EEDfA^6e*cvKiWGOGYrPd(MSpGQRg zlM-2gQAjyc_|lpZpMU`3Q!2>TiFZ)s77IJ+3;(KkFNV}jo=^~whD4~?hM3xRu#->D zw>_=)^GnFmo(ULJ(RL6i`N~c7AJyu531rmNIs{jeMbV(d#9!bT74}g8cOFRQu(vL7 zjGB2Unjl^){xl_`H z#vp*Za6t6FCoM36S#&zeFsKv$1P?_#mm7T*T=XJ4lPCTs;CYE^Vc~=thxPCd97t*ySazcs!(N z)C;_H(1-NnoPk&5fxIFO#LDnsa+gu{i>u?6FWwwOX~FFc6r}|mRd4n~x}9S0wA-o` zno&_}=T?Lt0CKxXkBL(>nUHte-MAJ^a!*1fZj`BPnT572VoW9^=Ta$=gE@j;M^VpdQtC3k%v0qP(^RwA$aK8629jv}x6 zK1({tM!bb_;)hiH5fv0wi&ZKfQ1OHc^4{WeD!!oN&#CaJptB#ikMh{$tX1}aOp3gU z!kM)!{P1?@&wbA^oeI9TbIx&{)6O~Sch5Vsj;Ut1&WkRsM5GnBsCW-WZU_K4ga9d# zU#Y101=`!L40~Wa;e2J)@+tgqhFIgSvXAb>f=Xq;REkdpXN6HCEUc71uOUYNS%F@Z z)3b{g1Y$vq5Dh6nI4RV3U?kgT3$Td9Uqrv)41qNFf1(qNPFo`z9&AF2^D6!1fMkI9 zId#~`9YD*}O_d>w^@c%}HO>kcBAI>1iDIBnMr*J{8{BH?AY=Klfl C`R%p< literal 0 HcmV?d00001 diff --git a/Python3.6-ClsToElasticSearch/src/elasticsearch/connection/__pycache__/http_requests.cpython-37.pyc b/Python3.6-ClsToElasticSearch/src/elasticsearch/connection/__pycache__/http_requests.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e81f5923551bc7f01f591ad288d3bbaaaa12efa1 GIT binary patch literal 4880 zcmb7I&2QYs6(_meUGAq=vLY*vldwh{M?owd8l;7gCRS{@QJTtbq5V_TEE*9&_fQDA1EH{R499?+v*hF5*ME;BYweX5PoV z_j`}~UaM8ta2@{Uj`#HyP5U>!OfCzZTe!uSXqd+INOSV1?&!mg8ZjJ$8BXa_EnYRX zz&xq*pXwU@Sjj1UuEoY@8Z%irD)%Z*1)~*KjjBD%v3fP9)~h@9Uc+gqaVu)}T28Ci zcG|l3bB)zl{jtXCLFKIEEHHgfTWdUl9NO;w-6uMpYkF3{oy2kA%P@(b7&K&N3$vkZ z&J&(3@AUdS=?6R-VyYV+4tT(_#TnUt*b9<@%&fh=57m-2BdZN~6vTeQf~+Yd5932u z4*P-dNr1`KE;`~CI)UcsOoK6Wr^FV%(z-gS`ncwptfN%XopMlN4c2_Do#{@MwOISH z=2%~8rsmY3++w!!QSike5K_D^Wq&50_b*e^>Nc*~m^~1X$(Bcf{eTy%9@tTMz&$=( zuPf?Jk007c*vj<=@@Uge`eY|BQe!ra*5 z%yTo?3Aa644*F5BZg0gy`=J*N0{b+KBKsh)`<@U1!@e@HCvwrw4~X4^^_g4)5x7D` zn@}XM(XkwJeVze81S$=bp3!!v1y+k1xjEr%O`9cp-q!vH}oOB zn5_g4R~#j$?y1M)O`Ezlb;*p_C!Xw}wzflIvru>kQDFdOOW&I?o|>}n!HQsid&@o; z#Egvc-q2=2*Be9Z#pe1vY0LKMI3bDsG#wY0iTVyBkJ<4HHQ( zRFczJpN9{jt9=p-ZSW&2PT9=a;te`Fdbg7}8j^8yA1ZiZ^~_kavHy?#WYa1Ci5umN zNjj2}v=H`db6VzxC=|;DB-KdVwV7B)fyV-lgZJQg{y@m2mpgMYK_>wx^cajV^S`JO~7K`53NuI12kPXg-86m=XI?GGK1VHtl^#a+}`lojZ1ygXwnU2^sooaGk>P zVGz@a!J~%6x^QCERSUzIe8CSye#jJ0_n;uRj}jp#@I>&|n|6+8><0pQrgYNx06|LYy+HXAA|Vg)Fjnz- zJJFljl3tiW~c;!<~TC$dEXU1{8tEGBsq-I)A8)-9brEO;X$x0XgV0;5wtZWF$ z=!yMU>1{17G3}(r|0bE+tr{!;TVL+8)uttbTH!S3bFR~RDvvtWBqutJ zIW%|bvjn2dp>*a;*q&daW|>WPT~)Gz&EjRpC`vC`ozZVNbD!udmBLx$0G2!f3#76FHUAdsWLgR_5P4w1>>8z z#YbqgB~v%_idol}^d+N-{`{|^TcwK8G%UTX*YWGI!~XZCFM{ZyL)O zH}uiV&xo7p;je+MPjxk2WD9bR+qlK|KsUsxmKv9}v5{8$olqNB(-ME30j;E-mipI1 zl#YZTm^a51)dv3{HF0a_rPRF8$0ox{!YpZ?n-|)+CdW1pq(#CN;q`Drzi}JY1%4xa(E#?}bK5nJ0^Jdy4>=?dE^vA8V(bdz|H~P4pb4hEf z+IS&tAGgOHdnNfw>n?_vYkikY->LU{ZgR^-~WkFP=> zWDYgkxu_BbpTAhV(JJlyC8hU-l6L8&3yPBZkR|POOTgsT?x=iSyn7viNj1r+12>j% z_bE8y+osY(y_>KCVG!w=$vo+0RhfUnP!t7^2vXtvqtfoimQ&r|`Q`S#kN2}`5%`=u z@X0^Wgm1v()?Vg6LZ8>DQOMMGiT|`r%1qjvU&g#sExenr(%S++3FS)Q76Im50o`HX z2Jyp?Co#pUD*`DM_)yUC4K!J0(1*|g;ZDr3d+JJAha`pc1=8&6sj4jC8`d_+qkcpSB<)P6}U&J z2NYDT2{dfajVN?2jb42QCjO6bKD<#~^rTy%BAqbMLp!GLe|>k&P`;-Oyr$Mw>gY-XF z!sp+dHm-@ZSy>T1L*5U->hHL7dCk$)XULoH(dJFi?xIHC-z~1SGCIltyK8l-Gh(k7 zxNcT=-Cn{55%rs{`^CVEikYhGvcyLzLg|q2pvkQKKN2}q_TwZ})+#cxA4Cy|r*?)S zuhByCF;(0=hmN4=uvSbH_Ik#z{dRq)X{r@#9sVY*zD5mU3tvH#HSk{z>dO3&8znx4$zt4?#2PiLae{35g7&aLqd`Ievx#DYf~%J<>($mpw; GPU*i9I&PW( literal 0 HcmV?d00001 diff --git a/Python3.6-ClsToElasticSearch/src/elasticsearch/connection/__pycache__/http_urllib3.cpython-37.pyc b/Python3.6-ClsToElasticSearch/src/elasticsearch/connection/__pycache__/http_urllib3.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c4cebf76e9af3fadd6201597ea2408a17156c5ed GIT binary patch literal 6554 zcmb7I&2!vFb_Xz+84NgI{FE&3I*1)bomMhsX-fU!wid1$eg`_MmsuBcDl7kMTtz43SB2}raRE~SbH7DJ2Ol{?m-)mroL$43XpwQFk zkM~}`e((2Q_ot19t>Jg{*AM(}Zfn|q(#P~?q4KAA#lN8lv_OwEH~-gNU40v_fqx?^ z4NTV@l--I>{iUcnuv|;k^{D370y8K__F%zXKwBlKM)g6%Z48=jbI@{Im{(TqR@5FW zx{JETmM%4SS(Ph+?yink6t^{Zjjc^?yw=>+=UTjCYV78z&HqW)==rf08((TcEwI1V z+_(Nr3l@U<*IH0#3zsGLR-o@`oyHriL)+;XS$miH!G1Vk$w+QihEh1EFMNI^oG^CeF>{1JV9vmYhPdMk=RnP2UcmbPD3V@5 z>R?030L_=c#a;XEQRuGZ)ZJ1~zWis6H=&1LKkGgdjEnBrk3|w69r|JSLDCxySS&^N zDNjzYIikB2iTz}IugfA|$gn4v&wIyR@GthROA7FycP#fF6p$?yLO`IYn7>WBH&JL- zUFWn3v+beAWh(F!E()fR55WF)w-gvb36IH4R#r9oqUYLIfBC0cUnl$jy5?4bhO$-N zt>%{MT0hlH&8=aho7w8KT+T<3d26-={}17jN|6t8_0kR(%X4~Iw{S*+_MbN9pjhgG zQ|vFSB2Yb~2nXgWlU@>SY86^o|(6>&zzp(MKPAFgFL{c|*?n;mb`Y8IpJTk%LA^MvYEC;m)_41mu57^{$>TZRN-fj|GGIP{Dk8BX%Vw^x(m`HkCq9QA?t&ncKCn?;Py&5j(h= zcE)(vAA3E{95clodd)j>&qin14|@8BZYDTmidANIbw3Q^Mc8% z_4lF>n4lH_Yb?Yjh4;fqXo8eDO06o0rId`kKl+PD|fZ&S8nd)B(z{CIZmW=@X(LMyt~bj>BCVNhhfA( zv%=d*pJFr|!n6=zb8{uTz;4V2&uH%?@jU5)|5AkKkcx=Jqro9i1fY85{sL|p_Kx#y zJS1Bj()!`4Fmrhv=DtmyMr+#mKlb`gScvhJtpxPi^;bEXJcYrs8ChaM8XL_hS6Su z(-UZ&E5%KRlNk_x51@B)94sX~ilLTZP6tqzr#yHDfcKw1c0MDNKx}{vQ-ivS42Qm8 zfABHvloGn&H$%c4^GbOvPis@R=WtAWEK4-T89;i5j&2IJo0H+TVX#Xn*E8 z4}nOM`#pknAIq5T1JW==>O&|9mO~?AK_NNkp4Dc7d*|UB%K3XMl*{0E1MfodioZqH zOPOb&2gWygs(FSiNi)?hjgvC6Q9U(MGi|2rbTM5@mxIzPD_!{q<7c|ArKLbSt??6C zNlUUSt-f(-@Gk;$QcFv(^@~!fofs3FXk%pg8~rur;A`PxHPkNF(iZi(0e4?cl%=kh{+k_k|YntMSRsne?tFLifNmtRc)`x~q zZs5I{HiFixQo2UEgx22*G$exUo&ApP+NX>SJwFQ1SjWmtf`ZJXoz1F6ijh?d2hAGS zTrjhGy+}(A{4o;Z=pEcZncr zDtHnJ7*NtmW zMYjw~Z|b(bVpMdqWa<_kGk*+Iub8%ucEix?rd8Ao-LB9Wyv7pRDxg>?(-?Y+aj4m# zXrcA|ooC;5Qr(zZm6Eqd7fJM6!b_263$OTZL>eHUmKry-3nMM@cLGEgiRf^t_l-4b zL&Q+MmMl$5ueFO>YF^mme?h!hNXz_TT22=N;}Z?>$&lqsL%69isRR^l47BO0El%~K zjj*;lu|TJu))AGe!>TRSzL@k zvg+h!P{)dmi$$#St#py1QqVxpEfZHyr#iU43Yw=H;?%EV12GCcY2_4=kN*lYW^yC` zQ%~!46=Sr?+r<~n&?euTt}bs)n`#t|Z%n@b8j-tSLS$?0oR@ElAKezpU(d@U*E6$(`;Y&8`^mHY ztXg2qoBx2Ng?C|Top&-DkXT$faI(T2P)q_(eh1&Uo02@M2 z>5*^~y*d5v4eX&13S%32=xL)<9L(6a)pmLrkXiw+<Df6=&vQ5{z>Mt>l?;t_G_=iXyr@L+Fq`5|3KK!y`|P z-Xtko({)Yr5DO=OA&!15vw{(BOt3|n#r+qa(vHruR>; z9o^z73aw-VpV~km!YMqiw}# z0s~h{=eP4+xcBJVUE9XD`1fE#>PP>yM+(D8Xn3?X(J7}>m(0fb)e&U-CC-h#P~zeM ziI~8V8dhCg*v-;;+(#=r%F1d?M^_t^t3oM7q5WThlL_8}cx&eQ+utp9PNKZCv!e*? zbZm<7bT+04?|J!Y8}+*9{bb}vMNif9aAAY+au>w;Iu#UN_+L^%tg}WDf_e;=P=q0p z4&weX4N^i7{n{8cYeUfYaPsnrxEsWx@1#7IsPV4&3O1+Fo PYfzM>@1|ZUS@r(~4M&xP literal 0 HcmV?d00001 diff --git a/Python3.6-ClsToElasticSearch/src/elasticsearch/connection/base.py b/Python3.6-ClsToElasticSearch/src/elasticsearch/connection/base.py new file mode 100755 index 000000000..6da40fa66 --- /dev/null +++ b/Python3.6-ClsToElasticSearch/src/elasticsearch/connection/base.py @@ -0,0 +1,297 @@ +import logging +import binascii +import gzip +import io +import re +from platform import python_version +import warnings + +try: + import simplejson as json +except ImportError: + import json + +from ..exceptions import ( + TransportError, + ImproperlyConfigured, + ElasticsearchDeprecationWarning, + HTTP_EXCEPTIONS, +) +from .. import __versionstr__ + +logger = logging.getLogger("elasticsearch") + +# create the elasticsearch.trace logger, but only set propagate to False if the +# logger hasn't already been configured +_tracer_already_configured = "elasticsearch.trace" in logging.Logger.manager.loggerDict +tracer = logging.getLogger("elasticsearch.trace") +if not _tracer_already_configured: + tracer.propagate = False + +_WARNING_RE = re.compile(r"\"([^\"]*)\"") + + +class Connection(object): + """ + Class responsible for maintaining a connection to an Elasticsearch node. It + holds persistent connection pool to it and it's main interface + (`perform_request`) is thread-safe. + + Also responsible for logging. + + :arg host: hostname of the node (default: localhost) + :arg port: port to use (integer, default: 9200) + :arg use_ssl: use ssl for the connection if `True` + :arg url_prefix: optional url prefix for elasticsearch + :arg timeout: default timeout in seconds (float, default: 10) + :arg http_compress: Use gzip compression + :arg cloud_id: The Cloud ID from ElasticCloud. Convenient way to connect to cloud instances. + :arg opaque_id: Send this value in the 'X-Opaque-Id' HTTP header + For tracing all requests made by this transport. + """ + + def __init__( + self, + host="localhost", + port=None, + use_ssl=False, + url_prefix="", + timeout=10, + headers=None, + http_compress=None, + cloud_id=None, + api_key=None, + **kwargs + ): + + if cloud_id: + try: + _, cloud_id = cloud_id.split(":") + parent_dn, es_uuid = ( + binascii.a2b_base64(cloud_id.encode("utf-8")) + .decode("utf-8") + .split("$")[:2] + ) + if ":" in parent_dn: + parent_dn, _, parent_port = parent_dn.rpartition(":") + if port is None and parent_port != "443": + port = int(parent_port) + except (ValueError, IndexError): + raise ImproperlyConfigured("'cloud_id' is not properly formatted") + + host = "%s.%s" % (es_uuid, parent_dn) + use_ssl = True + if http_compress is None: + http_compress = True + + # If cloud_id isn't set and port is default then use 9200. + # Cloud should use '443' by default via the 'https' scheme. + elif port is None: + port = 9200 + + # Work-around if the implementing class doesn't + # define the headers property before calling super().__init__() + if not hasattr(self, "headers"): + self.headers = {} + + headers = headers or {} + for key in headers: + self.headers[key.lower()] = headers[key] + + self.headers.setdefault("content-type", "application/json") + self.headers.setdefault("user-agent", self._get_default_user_agent()) + + if api_key is not None: + self.headers["authorization"] = self._get_api_key_header_val(api_key) + + if http_compress: + self.headers["accept-encoding"] = "gzip,deflate" + + scheme = kwargs.get("scheme", "http") + if use_ssl or scheme == "https": + scheme = "https" + use_ssl = True + self.use_ssl = use_ssl + self.http_compress = http_compress or False + + self.hostname = host + self.port = port + self.host = "%s://%s" % (scheme, host) + if self.port is not None: + self.host += ":%s" % self.port + if url_prefix: + url_prefix = "/" + url_prefix.strip("/") + self.url_prefix = url_prefix + self.timeout = timeout + + def __repr__(self): + return "<%s: %s>" % (self.__class__.__name__, self.host) + + def __eq__(self, other): + if not isinstance(other, Connection): + raise TypeError("Unsupported equality check for %s and %s" % (self, other)) + return self.__hash__() == other.__hash__() + + def __hash__(self): + return id(self) + + def _gzip_compress(self, body): + buf = io.BytesIO() + with gzip.GzipFile(fileobj=buf, mode="wb") as f: + f.write(body) + return buf.getvalue() + + def _raise_warnings(self, warning_headers): + """If 'headers' contains a 'Warning' header raise + the warnings to be seen by the user. Takes an iterable + of string values from any number of 'Warning' headers. + """ + if not warning_headers: + return + + # Grab only the message from each header, the rest is discarded. + # Format is: '(number) Elasticsearch-(version)-(instance) "(message)"' + warning_messages = [] + for header in warning_headers: + # Because 'Requests' does it's own folding of multiple HTTP headers + # into one header delimited by commas (totally standard compliant, just + # annoying for cases like this) we need to expect there may be + # more than one message per 'Warning' header. + matches = _WARNING_RE.findall(header) + if matches: + warning_messages.extend(matches) + else: + # Don't want to throw away any warnings, even if they + # don't follow the format we have now. Use the whole header. + warning_messages.append(header) + + for message in warning_messages: + warnings.warn( + message, category=ElasticsearchDeprecationWarning, stacklevel=6 + ) + + def _pretty_json(self, data): + # pretty JSON in tracer curl logs + try: + return json.dumps( + json.loads(data), sort_keys=True, indent=2, separators=(",", ": ") + ).replace("'", r"\u0027") + except (ValueError, TypeError): + # non-json data or a bulk request + return data + + def _log_trace(self, method, path, body, status_code, response, duration): + if not tracer.isEnabledFor(logging.INFO) or not tracer.handlers: + return + + # include pretty in trace curls + path = path.replace("?", "?pretty&", 1) if "?" in path else path + "?pretty" + if self.url_prefix: + path = path.replace(self.url_prefix, "", 1) + tracer.info( + "curl %s-X%s 'http://localhost:9200%s' -d '%s'", + "-H 'Content-Type: application/json' " if body else "", + method, + path, + self._pretty_json(body) if body else "", + ) + + if tracer.isEnabledFor(logging.DEBUG): + tracer.debug( + "#[%s] (%.3fs)\n#%s", + status_code, + duration, + self._pretty_json(response).replace("\n", "\n#") if response else "", + ) + + def log_request_success( + self, method, full_url, path, body, status_code, response, duration + ): + """ Log a successful API call. """ + # TODO: optionally pass in params instead of full_url and do urlencode only when needed + + # body has already been serialized to utf-8, deserialize it for logging + # TODO: find a better way to avoid (de)encoding the body back and forth + if body: + try: + body = body.decode("utf-8", "ignore") + except AttributeError: + pass + + logger.info( + "%s %s [status:%s request:%.3fs]", method, full_url, status_code, duration + ) + logger.debug("> %s", body) + logger.debug("< %s", response) + + self._log_trace(method, path, body, status_code, response, duration) + + def log_request_fail( + self, + method, + full_url, + path, + body, + duration, + status_code=None, + response=None, + exception=None, + ): + """ Log an unsuccessful API call. """ + # do not log 404s on HEAD requests + if method == "HEAD" and status_code == 404: + return + logger.warning( + "%s %s [status:%s request:%.3fs]", + method, + full_url, + status_code or "N/A", + duration, + exc_info=exception is not None, + ) + + # body has already been serialized to utf-8, deserialize it for logging + # TODO: find a better way to avoid (de)encoding the body back and forth + if body: + try: + body = body.decode("utf-8", "ignore") + except AttributeError: + pass + + logger.debug("> %s", body) + + self._log_trace(method, path, body, status_code, response, duration) + + if response is not None: + logger.debug("< %s", response) + + def _raise_error(self, status_code, raw_data): + """ Locate appropriate exception and raise it. """ + error_message = raw_data + additional_info = None + try: + if raw_data: + additional_info = json.loads(raw_data) + error_message = additional_info.get("error", error_message) + if isinstance(error_message, dict) and "type" in error_message: + error_message = error_message["type"] + except (ValueError, TypeError) as err: + logger.warning("Undecodable raw error response from server: %s", err) + + raise HTTP_EXCEPTIONS.get(status_code, TransportError)( + status_code, error_message, additional_info + ) + + def _get_default_user_agent(self): + return "elasticsearch-py/%s (Python %s)" % (__versionstr__, python_version()) + + def _get_api_key_header_val(self, api_key): + """ + Check the type of the passed api_key and return the correct header value + for the `API Key authentication ` + :arg api_key, either a tuple or a base64 encoded string + """ + if isinstance(api_key, (tuple, list)): + s = "{0}:{1}".format(api_key[0], api_key[1]).encode("utf-8") + return "ApiKey " + binascii.b2a_base64(s).rstrip(b"\r\n").decode("utf-8") + return "ApiKey " + api_key diff --git a/Python3.6-ClsToElasticSearch/src/elasticsearch/connection/http_requests.py b/Python3.6-ClsToElasticSearch/src/elasticsearch/connection/http_requests.py new file mode 100755 index 000000000..4da838517 --- /dev/null +++ b/Python3.6-ClsToElasticSearch/src/elasticsearch/connection/http_requests.py @@ -0,0 +1,201 @@ +import time +import warnings + +try: + import requests + + REQUESTS_AVAILABLE = True +except ImportError: + REQUESTS_AVAILABLE = False + +from .base import Connection +from ..exceptions import ( + ConnectionError, + ImproperlyConfigured, + ConnectionTimeout, + SSLError, +) +from ..compat import urlencode, string_types + + +class RequestsHttpConnection(Connection): + """ + Connection using the `requests` library. + + :arg http_auth: optional http auth information as either ':' separated + string or a tuple. Any value will be passed into requests as `auth`. + :arg use_ssl: use ssl for the connection if `True` + :arg verify_certs: whether to verify SSL certificates + :arg ssl_show_warn: show warning when verify certs is disabled + :arg ca_certs: optional path to CA bundle. By default standard requests' + bundle will be used. + :arg client_cert: path to the file containing the private key and the + certificate, or cert only if using client_key + :arg client_key: path to the file containing the private key if using + separate cert and key files (client_cert will contain only the cert) + :arg headers: any custom http headers to be add to requests + :arg http_compress: Use gzip compression + :arg cloud_id: The Cloud ID from ElasticCloud. Convenient way to connect to cloud instances. + Other host connection params will be ignored. + :arg api_key: optional API Key authentication as either base64 encoded string or a tuple. + :arg opaque_id: Send this value in the 'X-Opaque-Id' HTTP header + For tracing all requests made by this transport. + """ + + def __init__( + self, + host="localhost", + port=None, + http_auth=None, + use_ssl=False, + verify_certs=True, + ssl_show_warn=True, + ca_certs=None, + client_cert=None, + client_key=None, + headers=None, + http_compress=None, + cloud_id=None, + api_key=None, + opaque_id=None, + **kwargs + ): + if not REQUESTS_AVAILABLE: + raise ImproperlyConfigured( + "Please install requests to use RequestsHttpConnection." + ) + + # Initialize Session so .headers works before calling super().__init__(). + self.session = requests.Session() + for key in list(self.session.headers): + self.session.headers.pop(key) + + super(RequestsHttpConnection, self).__init__( + host=host, + port=port, + use_ssl=use_ssl, + headers=headers, + http_compress=http_compress, + cloud_id=cloud_id, + api_key=api_key, + opaque_id=opaque_id, + **kwargs + ) + + if not self.http_compress: + # Need to set this to 'None' otherwise Requests adds its own. + self.session.headers["accept-encoding"] = None + + if http_auth is not None: + if isinstance(http_auth, (tuple, list)): + http_auth = tuple(http_auth) + elif isinstance(http_auth, string_types): + http_auth = tuple(http_auth.split(":", 1)) + self.session.auth = http_auth + + self.base_url = "%s%s" % (self.host, self.url_prefix,) + self.session.verify = verify_certs + if not client_key: + self.session.cert = client_cert + elif client_cert: + # cert is a tuple of (certfile, keyfile) + self.session.cert = (client_cert, client_key) + if ca_certs: + if not verify_certs: + raise ImproperlyConfigured( + "You cannot pass CA certificates when verify SSL is off." + ) + self.session.verify = ca_certs + + if not ssl_show_warn: + requests.packages.urllib3.disable_warnings() + + if self.use_ssl and not verify_certs and ssl_show_warn: + warnings.warn( + "Connecting to %s using SSL with verify_certs=False is insecure." + % self.host + ) + + def perform_request( + self, method, url, params=None, body=None, timeout=None, ignore=(), headers=None + ): + url = self.base_url + url + headers = headers or {} + if params: + url = "%s?%s" % (url, urlencode(params or {})) + + orig_body = body + if self.http_compress and body: + body = self._gzip_compress(body) + headers["content-encoding"] = "gzip" + + start = time.time() + request = requests.Request(method=method, headers=headers, url=url, data=body) + prepared_request = self.session.prepare_request(request) + settings = self.session.merge_environment_settings( + prepared_request.url, {}, None, None, None + ) + send_kwargs = {"timeout": timeout or self.timeout} + send_kwargs.update(settings) + try: + response = self.session.send(prepared_request, **send_kwargs) + duration = time.time() - start + raw_data = response.text + except Exception as e: + self.log_request_fail( + method, + url, + prepared_request.path_url, + body, + time.time() - start, + exception=e, + ) + if isinstance(e, requests.exceptions.SSLError): + raise SSLError("N/A", str(e), e) + if isinstance(e, requests.Timeout): + raise ConnectionTimeout("TIMEOUT", str(e), e) + raise ConnectionError("N/A", str(e), e) + + # raise warnings if any from the 'Warnings' header. + warnings_headers = ( + (response.headers["warning"],) if "warning" in response.headers else () + ) + self._raise_warnings(warnings_headers) + + # raise errors based on http status codes, let the client handle those if needed + if ( + not (200 <= response.status_code < 300) + and response.status_code not in ignore + ): + self.log_request_fail( + method, + url, + response.request.path_url, + orig_body, + duration, + response.status_code, + raw_data, + ) + self._raise_error(response.status_code, raw_data) + + self.log_request_success( + method, + url, + response.request.path_url, + orig_body, + response.status_code, + raw_data, + duration, + ) + + return response.status_code, response.headers, raw_data + + @property + def headers(self): + return self.session.headers + + def close(self): + """ + Explicitly closes connections + """ + self.session.close() diff --git a/Python3.6-ClsToElasticSearch/src/elasticsearch/connection/http_urllib3.py b/Python3.6-ClsToElasticSearch/src/elasticsearch/connection/http_urllib3.py new file mode 100755 index 000000000..a4c5a268d --- /dev/null +++ b/Python3.6-ClsToElasticSearch/src/elasticsearch/connection/http_urllib3.py @@ -0,0 +1,264 @@ +import time +import ssl +import urllib3 +from urllib3.exceptions import ReadTimeoutError, SSLError as UrllibSSLError +from urllib3.util.retry import Retry +import warnings + +from .base import Connection +from ..exceptions import ( + ConnectionError, + ImproperlyConfigured, + ConnectionTimeout, + SSLError, +) +from ..compat import urlencode + +# sentinel value for `verify_certs` and `ssl_show_warn`. +# This is used to detect if a user is passing in a value +# for SSL kwargs if also using an SSLContext. +VERIFY_CERTS_DEFAULT = object() +SSL_SHOW_WARN_DEFAULT = object() + +CA_CERTS = None + +try: + import certifi + + CA_CERTS = certifi.where() +except ImportError: + pass + + +def create_ssl_context(**kwargs): + """ + A helper function around creating an SSL context + + https://docs.python.org/3/library/ssl.html#context-creation + + Accepts kwargs in the same manner as `create_default_context`. + """ + ctx = ssl.create_default_context(**kwargs) + return ctx + + +class Urllib3HttpConnection(Connection): + """ + Default connection class using the `urllib3` library and the http protocol. + + :arg host: hostname of the node (default: localhost) + :arg port: port to use (integer, default: 9200) + :arg url_prefix: optional url prefix for elasticsearch + :arg timeout: default timeout in seconds (float, default: 10) + :arg http_auth: optional http auth information as either ':' separated + string or a tuple + :arg use_ssl: use ssl for the connection if `True` + :arg verify_certs: whether to verify SSL certificates + :arg ssl_show_warn: show warning when verify certs is disabled + :arg ca_certs: optional path to CA bundle. + See https://urllib3.readthedocs.io/en/latest/security.html#using-certifi-with-urllib3 + for instructions how to get default set + :arg client_cert: path to the file containing the private key and the + certificate, or cert only if using client_key + :arg client_key: path to the file containing the private key if using + separate cert and key files (client_cert will contain only the cert) + :arg ssl_version: version of the SSL protocol to use. Choices are: + SSLv23 (default) SSLv2 SSLv3 TLSv1 (see ``PROTOCOL_*`` constants in the + ``ssl`` module for exact options for your environment). + :arg ssl_assert_hostname: use hostname verification if not `False` + :arg ssl_assert_fingerprint: verify the supplied certificate fingerprint if not `None` + :arg maxsize: the number of connections which will be kept open to this + host. See https://urllib3.readthedocs.io/en/1.4/pools.html#api for more + information. + :arg headers: any custom http headers to be add to requests + :arg http_compress: Use gzip compression + :arg cloud_id: The Cloud ID from ElasticCloud. Convenient way to connect to cloud instances. + Other host connection params will be ignored. + :arg api_key: optional API Key authentication as either base64 encoded string or a tuple. + :arg opaque_id: Send this value in the 'X-Opaque-Id' HTTP header + For tracing all requests made by this transport. + """ + + def __init__( + self, + host="localhost", + port=None, + http_auth=None, + use_ssl=False, + verify_certs=VERIFY_CERTS_DEFAULT, + ssl_show_warn=SSL_SHOW_WARN_DEFAULT, + ca_certs=None, + client_cert=None, + client_key=None, + ssl_version=None, + ssl_assert_hostname=None, + ssl_assert_fingerprint=None, + maxsize=10, + headers=None, + ssl_context=None, + http_compress=None, + cloud_id=None, + api_key=None, + opaque_id=None, + **kwargs + ): + # Initialize headers before calling super().__init__(). + self.headers = urllib3.make_headers(keep_alive=True) + + super(Urllib3HttpConnection, self).__init__( + host=host, + port=port, + use_ssl=use_ssl, + headers=headers, + http_compress=http_compress, + cloud_id=cloud_id, + api_key=api_key, + opaque_id=opaque_id, + **kwargs + ) + if http_auth is not None: + if isinstance(http_auth, (tuple, list)): + http_auth = ":".join(http_auth) + self.headers.update(urllib3.make_headers(basic_auth=http_auth)) + + pool_class = urllib3.HTTPConnectionPool + kw = {} + + # if providing an SSL context, raise error if any other SSL related flag is used + if ssl_context and ( + (verify_certs is not VERIFY_CERTS_DEFAULT) + or (ssl_show_warn is not SSL_SHOW_WARN_DEFAULT) + or ca_certs + or client_cert + or client_key + or ssl_version + ): + warnings.warn( + "When using `ssl_context`, all other SSL related kwargs are ignored" + ) + + # if ssl_context provided use SSL by default + if ssl_context and self.use_ssl: + pool_class = urllib3.HTTPSConnectionPool + kw.update( + { + "assert_fingerprint": ssl_assert_fingerprint, + "ssl_context": ssl_context, + } + ) + + elif self.use_ssl: + pool_class = urllib3.HTTPSConnectionPool + kw.update( + { + "ssl_version": ssl_version, + "assert_hostname": ssl_assert_hostname, + "assert_fingerprint": ssl_assert_fingerprint, + } + ) + + # Convert all sentinel values to their actual default + # values if not using an SSLContext. + if verify_certs is VERIFY_CERTS_DEFAULT: + verify_certs = True + if ssl_show_warn is SSL_SHOW_WARN_DEFAULT: + ssl_show_warn = True + + ca_certs = CA_CERTS if ca_certs is None else ca_certs + if verify_certs: + if not ca_certs: + raise ImproperlyConfigured( + "Root certificates are missing for certificate " + "validation. Either pass them in using the ca_certs parameter or " + "install certifi to use it automatically." + ) + + kw.update( + { + "cert_reqs": "CERT_REQUIRED", + "ca_certs": ca_certs, + "cert_file": client_cert, + "key_file": client_key, + } + ) + else: + kw["cert_reqs"] = "CERT_NONE" + if ssl_show_warn: + warnings.warn( + "Connecting to %s using SSL with verify_certs=False is insecure." + % self.host + ) + if not ssl_show_warn: + urllib3.disable_warnings() + + self.pool = pool_class( + self.hostname, port=self.port, timeout=self.timeout, maxsize=maxsize, **kw + ) + + def perform_request( + self, method, url, params=None, body=None, timeout=None, ignore=(), headers=None + ): + url = self.url_prefix + url + if params: + url = "%s?%s" % (url, urlencode(params)) + full_url = self.host + url + + start = time.time() + orig_body = body + try: + kw = {} + if timeout: + kw["timeout"] = timeout + + # in python2 we need to make sure the url and method are not + # unicode. Otherwise the body will be decoded into unicode too and + # that will fail (#133, #201). + if not isinstance(url, str): + url = url.encode("utf-8") + if not isinstance(method, str): + method = method.encode("utf-8") + + request_headers = self.headers.copy() + request_headers.update(headers or ()) + + if self.http_compress and body: + body = self._gzip_compress(body) + request_headers["content-encoding"] = "gzip" + + response = self.pool.urlopen( + method, url, body, retries=Retry(False), headers=request_headers, **kw + ) + duration = time.time() - start + raw_data = response.data.decode("utf-8") + except Exception as e: + self.log_request_fail( + method, full_url, url, orig_body, time.time() - start, exception=e + ) + if isinstance(e, UrllibSSLError): + raise SSLError("N/A", str(e), e) + if isinstance(e, ReadTimeoutError): + raise ConnectionTimeout("TIMEOUT", str(e), e) + raise ConnectionError("N/A", str(e), e) + + # raise warnings if any from the 'Warnings' header. + warning_headers = response.headers.get_all("warning", ()) + self._raise_warnings(warning_headers) + + # raise errors based on http status codes, let the client handle those if needed + if not (200 <= response.status < 300) and response.status not in ignore: + self.log_request_fail( + method, full_url, url, orig_body, duration, response.status, raw_data + ) + self._raise_error(response.status, raw_data) + + self.log_request_success( + method, full_url, url, orig_body, response.status, raw_data, duration + ) + + return response.status, response.getheaders(), raw_data + + def close(self): + """ + Explicitly closes connection + """ + self.pool.close() diff --git a/Python3.6-ClsToElasticSearch/src/elasticsearch/connection/pooling.py b/Python3.6-ClsToElasticSearch/src/elasticsearch/connection/pooling.py new file mode 100755 index 000000000..dd5431e15 --- /dev/null +++ b/Python3.6-ClsToElasticSearch/src/elasticsearch/connection/pooling.py @@ -0,0 +1,33 @@ +try: + import queue +except ImportError: + import Queue as queue +from .base import Connection + + +class PoolingConnection(Connection): + """ + Base connection class for connections that use libraries without thread + safety and no capacity for connection pooling. To use this just implement a + ``_make_connection`` method that constructs a new connection and returns + it. + """ + + def __init__(self, *args, **kwargs): + self._free_connections = queue.Queue() + super(PoolingConnection, self).__init__(*args, **kwargs) + + def _get_connection(self): + try: + return self._free_connections.get_nowait() + except queue.Empty: + return self._make_connection() + + def _release_connection(self, con): + self._free_connections.put(con) + + def close(self): + """ + Explicitly close connection + """ + pass diff --git a/Python3.6-ClsToElasticSearch/src/elasticsearch/connection_pool.py b/Python3.6-ClsToElasticSearch/src/elasticsearch/connection_pool.py new file mode 100755 index 000000000..6881707c2 --- /dev/null +++ b/Python3.6-ClsToElasticSearch/src/elasticsearch/connection_pool.py @@ -0,0 +1,282 @@ +import time +import random +import logging +import threading + +try: + from Queue import PriorityQueue, Empty +except ImportError: + from queue import PriorityQueue, Empty + +from .exceptions import ImproperlyConfigured + +logger = logging.getLogger("elasticsearch") + + +class ConnectionSelector(object): + """ + Simple class used to select a connection from a list of currently live + connection instances. In init time it is passed a dictionary containing all + the connections' options which it can then use during the selection + process. When the `select` method is called it is given a list of + *currently* live connections to choose from. + + The options dictionary is the one that has been passed to + :class:`~elasticsearch.Transport` as `hosts` param and the same that is + used to construct the Connection object itself. When the Connection was + created from information retrieved from the cluster via the sniffing + process it will be the dictionary returned by the `host_info_callback`. + + Example of where this would be useful is a zone-aware selector that would + only select connections from it's own zones and only fall back to other + connections where there would be none in it's zones. + """ + + def __init__(self, opts): + """ + :arg opts: dictionary of connection instances and their options + """ + self.connection_opts = opts + + def select(self, connections): + """ + Select a connection from the given list. + + :arg connections: list of live connections to choose from + """ + pass + + +class RandomSelector(ConnectionSelector): + """ + Select a connection at random + """ + + def select(self, connections): + return random.choice(connections) + + +class RoundRobinSelector(ConnectionSelector): + """ + Selector using round-robin. + """ + + def __init__(self, opts): + super(RoundRobinSelector, self).__init__(opts) + self.data = threading.local() + + def select(self, connections): + self.data.rr = getattr(self.data, "rr", -1) + 1 + self.data.rr %= len(connections) + return connections[self.data.rr] + + +class ConnectionPool(object): + """ + Container holding the :class:`~elasticsearch.Connection` instances, + managing the selection process (via a + :class:`~elasticsearch.ConnectionSelector`) and dead connections. + + It's only interactions are with the :class:`~elasticsearch.Transport` class + that drives all the actions within `ConnectionPool`. + + Initially connections are stored on the class as a list and, along with the + connection options, get passed to the `ConnectionSelector` instance for + future reference. + + Upon each request the `Transport` will ask for a `Connection` via the + `get_connection` method. If the connection fails (it's `perform_request` + raises a `ConnectionError`) it will be marked as dead (via `mark_dead`) and + put on a timeout (if it fails N times in a row the timeout is exponentially + longer - the formula is `default_timeout * 2 ** (fail_count - 1)`). When + the timeout is over the connection will be resurrected and returned to the + live pool. A connection that has been previously marked as dead and + succeeds will be marked as live (its fail count will be deleted). + """ + + def __init__( + self, + connections, + dead_timeout=60, + timeout_cutoff=5, + selector_class=RoundRobinSelector, + randomize_hosts=True, + **kwargs + ): + """ + :arg connections: list of tuples containing the + :class:`~elasticsearch.Connection` instance and it's options + :arg dead_timeout: number of seconds a connection should be retired for + after a failure, increases on consecutive failures + :arg timeout_cutoff: number of consecutive failures after which the + timeout doesn't increase + :arg selector_class: :class:`~elasticsearch.ConnectionSelector` + subclass to use if more than one connection is live + :arg randomize_hosts: shuffle the list of connections upon arrival to + avoid dog piling effect across processes + """ + if not connections: + raise ImproperlyConfigured( + "No defined connections, you need to " "specify at least one host." + ) + self.connection_opts = connections + self.connections = [c for (c, opts) in connections] + # remember original connection list for resurrect(force=True) + self.orig_connections = tuple(self.connections) + # PriorityQueue for thread safety and ease of timeout management + self.dead = PriorityQueue(len(self.connections)) + self.dead_count = {} + + if randomize_hosts: + # randomize the connection list to avoid all clients hitting same node + # after startup/restart + random.shuffle(self.connections) + + # default timeout after which to try resurrecting a connection + self.dead_timeout = dead_timeout + self.timeout_cutoff = timeout_cutoff + + self.selector = selector_class(dict(connections)) + + def mark_dead(self, connection, now=None): + """ + Mark the connection as dead (failed). Remove it from the live pool and + put it on a timeout. + + :arg connection: the failed instance + """ + # allow inject for testing purposes + now = now if now else time.time() + try: + self.connections.remove(connection) + except ValueError: + logger.info( + "Attempted to remove %r, but it does not exist in the connection pool.", + connection, + ) + # connection not alive or another thread marked it already, ignore + return + else: + dead_count = self.dead_count.get(connection, 0) + 1 + self.dead_count[connection] = dead_count + timeout = self.dead_timeout * 2 ** min(dead_count - 1, self.timeout_cutoff) + self.dead.put((now + timeout, connection)) + logger.warning( + "Connection %r has failed for %i times in a row, putting on %i second timeout.", + connection, + dead_count, + timeout, + ) + + def mark_live(self, connection): + """ + Mark connection as healthy after a resurrection. Resets the fail + counter for the connection. + + :arg connection: the connection to redeem + """ + try: + del self.dead_count[connection] + except KeyError: + # race condition, safe to ignore + pass + + def resurrect(self, force=False): + """ + Attempt to resurrect a connection from the dead pool. It will try to + locate one (not all) eligible (it's timeout is over) connection to + return to the live pool. Any resurrected connection is also returned. + + :arg force: resurrect a connection even if there is none eligible (used + when we have no live connections). If force is specified resurrect + always returns a connection. + + """ + # no dead connections + if self.dead.empty(): + # we are forced to return a connection, take one from the original + # list. This is to avoid a race condition where get_connection can + # see no live connections but when it calls resurrect self.dead is + # also empty. We assume that other threat has resurrected all + # available connections so we can safely return one at random. + if force: + return random.choice(self.orig_connections) + return + + try: + # retrieve a connection to check + timeout, connection = self.dead.get(block=False) + except Empty: + # other thread has been faster and the queue is now empty. If we + # are forced, return a connection at random again. + if force: + return random.choice(self.orig_connections) + return + + if not force and timeout > time.time(): + # return it back if not eligible and not forced + self.dead.put((timeout, connection)) + return + + # either we were forced or the connection is elligible to be retried + self.connections.append(connection) + logger.info("Resurrecting connection %r (force=%s).", connection, force) + return connection + + def get_connection(self): + """ + Return a connection from the pool using the `ConnectionSelector` + instance. + + It tries to resurrect eligible connections, forces a resurrection when + no connections are availible and passes the list of live connections to + the selector instance to choose from. + + Returns a connection instance and it's current fail count. + """ + self.resurrect() + connections = self.connections[:] + + # no live nodes, resurrect one by force and return it + if not connections: + return self.resurrect(True) + + # only call selector if we have a selection + if len(connections) > 1: + return self.selector.select(connections) + + # only one connection, no need for a selector + return connections[0] + + def close(self): + """ + Explicitly closes connections + """ + for conn in self.orig_connections: + conn.close() + + +class DummyConnectionPool(ConnectionPool): + def __init__(self, connections, **kwargs): + if len(connections) != 1: + raise ImproperlyConfigured( + "DummyConnectionPool needs exactly one " "connection defined." + ) + # we need connection opts for sniffing logic + self.connection_opts = connections + self.connection = connections[0][0] + self.connections = (self.connection,) + + def get_connection(self): + return self.connection + + def close(self): + """ + Explicitly closes connections + """ + self.connection.close() + + def _noop(self, *args, **kwargs): + pass + + mark_dead = mark_live = resurrect = _noop diff --git a/Python3.6-ClsToElasticSearch/src/elasticsearch/exceptions.py b/Python3.6-ClsToElasticSearch/src/elasticsearch/exceptions.py new file mode 100755 index 000000000..f3d48ce92 --- /dev/null +++ b/Python3.6-ClsToElasticSearch/src/elasticsearch/exceptions.py @@ -0,0 +1,152 @@ +__all__ = [ + "ImproperlyConfigured", + "ElasticsearchException", + "SerializationError", + "TransportError", + "NotFoundError", + "ConflictError", + "RequestError", + "ConnectionError", + "SSLError", + "ConnectionTimeout", + "AuthenticationException", + "AuthorizationException", +] + + +class ImproperlyConfigured(Exception): + """ + Exception raised when the config passed to the client is inconsistent or invalid. + """ + + +class ElasticsearchException(Exception): + """ + Base class for all exceptions raised by this package's operations (doesn't + apply to :class:`~elasticsearch.ImproperlyConfigured`). + """ + + +class SerializationError(ElasticsearchException): + """ + Data passed in failed to serialize properly in the ``Serializer`` being + used. + """ + + +class TransportError(ElasticsearchException): + """ + Exception raised when ES returns a non-OK (>=400) HTTP status code. Or when + an actual connection error happens; in that case the ``status_code`` will + be set to ``'N/A'``. + """ + + @property + def status_code(self): + """ + The HTTP status code of the response that precipitated the error or + ``'N/A'`` if not applicable. + """ + return self.args[0] + + @property + def error(self): + """ A string error message. """ + return self.args[1] + + @property + def info(self): + """ + Dict of returned error info from ES, where available, underlying + exception when not. + """ + return self.args[2] + + def __str__(self): + cause = "" + try: + if self.info and "error" in self.info: + if isinstance(self.info["error"], dict): + root_cause = self.info["error"]["root_cause"][0] + cause = ", ".join( + filter( + None, + [ + repr(root_cause["reason"]), + root_cause.get("resource.id"), + root_cause.get("resource.type"), + ], + ) + ) + + else: + cause = repr(self.info["error"]) + except LookupError: + pass + msg = ", ".join(filter(None, [str(self.status_code), repr(self.error), cause])) + return "%s(%s)" % (self.__class__.__name__, msg) + + +class ConnectionError(TransportError): + """ + Error raised when there was an exception while talking to ES. Original + exception from the underlying :class:`~elasticsearch.Connection` + implementation is available as ``.info``. + """ + + def __str__(self): + return "ConnectionError(%s) caused by: %s(%s)" % ( + self.error, + self.info.__class__.__name__, + self.info, + ) + + +class SSLError(ConnectionError): + """ Error raised when encountering SSL errors. """ + + +class ConnectionTimeout(ConnectionError): + """ A network timeout. Doesn't cause a node retry by default. """ + + def __str__(self): + return "ConnectionTimeout caused by - %s(%s)" % ( + self.info.__class__.__name__, + self.info, + ) + + +class NotFoundError(TransportError): + """ Exception representing a 404 status code. """ + + +class ConflictError(TransportError): + """ Exception representing a 409 status code. """ + + +class RequestError(TransportError): + """ Exception representing a 400 status code. """ + + +class AuthenticationException(TransportError): + """ Exception representing a 401 status code. """ + + +class AuthorizationException(TransportError): + """ Exception representing a 403 status code. """ + + +class ElasticsearchDeprecationWarning(Warning): + """ Warning that is raised when a deprecated option + is flagged via the 'Warning' HTTP header. + """ + + +# more generic mappings from status_code to python exceptions +HTTP_EXCEPTIONS = { + 400: RequestError, + 401: AuthenticationException, + 403: AuthorizationException, + 404: NotFoundError, + 409: ConflictError, +} diff --git a/Python3.6-ClsToElasticSearch/src/elasticsearch/helpers/__init__.py b/Python3.6-ClsToElasticSearch/src/elasticsearch/helpers/__init__.py new file mode 100755 index 000000000..28a11c303 --- /dev/null +++ b/Python3.6-ClsToElasticSearch/src/elasticsearch/helpers/__init__.py @@ -0,0 +1,17 @@ +from .errors import BulkIndexError, ScanError +from .actions import expand_action, streaming_bulk, bulk, parallel_bulk +from .actions import scan, reindex +from .actions import _chunk_actions, _process_bulk_chunk + +__all__ = [ + "BulkIndexError", + "ScanError", + "expand_action", + "streaming_bulk", + "bulk", + "parallel_bulk", + "scan", + "reindex", + "_chunk_actions", + "_process_bulk_chunk", +] diff --git a/Python3.6-ClsToElasticSearch/src/elasticsearch/helpers/actions.py b/Python3.6-ClsToElasticSearch/src/elasticsearch/helpers/actions.py new file mode 100755 index 000000000..af409c37f --- /dev/null +++ b/Python3.6-ClsToElasticSearch/src/elasticsearch/helpers/actions.py @@ -0,0 +1,543 @@ +from operator import methodcaller +import time + +from ..exceptions import TransportError +from ..compat import map, string_types, Queue + +from .errors import ScanError, BulkIndexError + +import logging + + +logger = logging.getLogger("elasticsearch.helpers") + + +def expand_action(data): + """ + From one document or action definition passed in by the user extract the + action/data lines needed for elasticsearch's + :meth:`~elasticsearch.Elasticsearch.bulk` api. + """ + # when given a string, assume user wants to index raw json + if isinstance(data, string_types): + return '{"index":{}}', data + + # make sure we don't alter the action + data = data.copy() + op_type = data.pop("_op_type", "index") + action = {op_type: {}} + for key in ( + "_id", + "_index", + "_parent", + "_percolate", + "_retry_on_conflict", + "_routing", + "_timestamp", + "_type", + "_version", + "_version_type", + "parent", + "pipeline", + "retry_on_conflict", + "routing", + "version", + "version_type", + ): + if key in data: + if key in [ + "_parent", + "_retry_on_conflict", + "_routing", + "_version", + "_version_type", + ]: + action[op_type][key[1:]] = data.pop(key) + else: + action[op_type][key] = data.pop(key) + + # no data payload for delete + if op_type == "delete": + return action, None + + return action, data.get("_source", data) + + +def _chunk_actions(actions, chunk_size, max_chunk_bytes, serializer): + """ + Split actions into chunks by number or size, serialize them into strings in + the process. + """ + bulk_actions, bulk_data = [], [] + size, action_count = 0, 0 + for action, data in actions: + raw_data, raw_action = data, action + action = serializer.dumps(action) + # +1 to account for the trailing new line character + cur_size = len(action.encode("utf-8")) + 1 + + if data is not None: + data = serializer.dumps(data) + cur_size += len(data.encode("utf-8")) + 1 + + # full chunk, send it and start a new one + if bulk_actions and ( + size + cur_size > max_chunk_bytes or action_count == chunk_size + ): + yield bulk_data, bulk_actions + bulk_actions, bulk_data = [], [] + size, action_count = 0, 0 + + bulk_actions.append(action) + if data is not None: + bulk_actions.append(data) + bulk_data.append((raw_action, raw_data)) + else: + bulk_data.append((raw_action,)) + + size += cur_size + action_count += 1 + + if bulk_actions: + yield bulk_data, bulk_actions + + +def _process_bulk_chunk( + client, + bulk_actions, + bulk_data, + raise_on_exception=True, + raise_on_error=True, + *args, + **kwargs +): + """ + Send a bulk request to elasticsearch and process the output. + """ + # if raise on error is set, we need to collect errors per chunk before raising them + errors = [] + + try: + # send the actual request + resp = client.bulk("\n".join(bulk_actions) + "\n", *args, **kwargs) + except TransportError as e: + # default behavior - just propagate exception + if raise_on_exception: + raise e + + # if we are not propagating, mark all actions in current chunk as failed + err_message = str(e) + exc_errors = [] + + for data in bulk_data: + # collect all the information about failed actions + op_type, action = data[0].copy().popitem() + info = {"error": err_message, "status": e.status_code, "exception": e} + if op_type != "delete": + info["data"] = data[1] + info.update(action) + exc_errors.append({op_type: info}) + + # emulate standard behavior for failed actions + if raise_on_error: + raise BulkIndexError( + "%i document(s) failed to index." % len(exc_errors), exc_errors + ) + else: + for err in exc_errors: + yield False, err + return + + # go through request-response pairs and detect failures + for data, (op_type, item) in zip( + bulk_data, map(methodcaller("popitem"), resp["items"]) + ): + ok = 200 <= item.get("status", 500) < 300 + if not ok and raise_on_error: + # include original document source + if len(data) > 1: + item["data"] = data[1] + errors.append({op_type: item}) + + if ok or not errors: + # if we are not just recording all errors to be able to raise + # them all at once, yield items individually + yield ok, {op_type: item} + + if errors: + raise BulkIndexError("%i document(s) failed to index." % len(errors), errors) + + +def streaming_bulk( + client, + actions, + chunk_size=500, + max_chunk_bytes=100 * 1024 * 1024, + raise_on_error=True, + expand_action_callback=expand_action, + raise_on_exception=True, + max_retries=0, + initial_backoff=2, + max_backoff=600, + yield_ok=True, + *args, + **kwargs +): + + """ + Streaming bulk consumes actions from the iterable passed in and yields + results per action. For non-streaming usecases use + :func:`~elasticsearch.helpers.bulk` which is a wrapper around streaming + bulk that returns summary information about the bulk operation once the + entire input is consumed and sent. + + If you specify ``max_retries`` it will also retry any documents that were + rejected with a ``429`` status code. To do this it will wait (**by calling + time.sleep which will block**) for ``initial_backoff`` seconds and then, + every subsequent rejection for the same chunk, for double the time every + time up to ``max_backoff`` seconds. + + :arg client: instance of :class:`~elasticsearch.Elasticsearch` to use + :arg actions: iterable containing the actions to be executed + :arg chunk_size: number of docs in one chunk sent to es (default: 500) + :arg max_chunk_bytes: the maximum size of the request in bytes (default: 100MB) + :arg raise_on_error: raise ``BulkIndexError`` containing errors (as `.errors`) + from the execution of the last chunk when some occur. By default we raise. + :arg raise_on_exception: if ``False`` then don't propagate exceptions from + call to ``bulk`` and just report the items that failed as failed. + :arg expand_action_callback: callback executed on each action passed in, + should return a tuple containing the action line and the data line + (`None` if data line should be omitted). + :arg max_retries: maximum number of times a document will be retried when + ``429`` is received, set to 0 (default) for no retries on ``429`` + :arg initial_backoff: number of seconds we should wait before the first + retry. Any subsequent retries will be powers of ``initial_backoff * + 2**retry_number`` + :arg max_backoff: maximum number of seconds a retry will wait + :arg yield_ok: if set to False will skip successful documents in the output + """ + actions = map(expand_action_callback, actions) + + for bulk_data, bulk_actions in _chunk_actions( + actions, chunk_size, max_chunk_bytes, client.transport.serializer + ): + + for attempt in range(max_retries + 1): + to_retry, to_retry_data = [], [] + if attempt: + time.sleep(min(max_backoff, initial_backoff * 2 ** (attempt - 1))) + + try: + for data, (ok, info) in zip( + bulk_data, + _process_bulk_chunk( + client, + bulk_actions, + bulk_data, + raise_on_exception, + raise_on_error, + *args, + **kwargs + ), + ): + + if not ok: + action, info = info.popitem() + # retry if retries enabled, we get 429, and we are not + # in the last attempt + if ( + max_retries + and info["status"] == 429 + and (attempt + 1) <= max_retries + ): + # _process_bulk_chunk expects strings so we need to + # re-serialize the data + to_retry.extend( + map(client.transport.serializer.dumps, data) + ) + to_retry_data.append(data) + else: + yield ok, {action: info} + elif yield_ok: + yield ok, info + + except TransportError as e: + # suppress 429 errors since we will retry them + if attempt == max_retries or e.status_code != 429: + raise + else: + if not to_retry: + break + # retry only subset of documents that didn't succeed + bulk_actions, bulk_data = to_retry, to_retry_data + + +def bulk(client, actions, stats_only=False, *args, **kwargs): + """ + Helper for the :meth:`~elasticsearch.Elasticsearch.bulk` api that provides + a more human friendly interface - it consumes an iterator of actions and + sends them to elasticsearch in chunks. It returns a tuple with summary + information - number of successfully executed actions and either list of + errors or number of errors if ``stats_only`` is set to ``True``. Note that + by default we raise a ``BulkIndexError`` when we encounter an error so + options like ``stats_only`` only apply when ``raise_on_error`` is set to + ``False``. + + When errors are being collected original document data is included in the + error dictionary which can lead to an extra high memory usage. If you need + to process a lot of data and want to ignore/collect errors please consider + using the :func:`~elasticsearch.helpers.streaming_bulk` helper which will + just return the errors and not store them in memory. + + + :arg client: instance of :class:`~elasticsearch.Elasticsearch` to use + :arg actions: iterator containing the actions + :arg stats_only: if `True` only report number of successful/failed + operations instead of just number of successful and a list of error responses + + Any additional keyword arguments will be passed to + :func:`~elasticsearch.helpers.streaming_bulk` which is used to execute + the operation, see :func:`~elasticsearch.helpers.streaming_bulk` for more + accepted parameters. + """ + success, failed = 0, 0 + + # list of errors to be collected is not stats_only + errors = [] + + # make streaming_bulk yield successful results so we can count them + kwargs["yield_ok"] = True + for ok, item in streaming_bulk(client, actions, *args, **kwargs): + # go through request-response pairs and detect failures + if not ok: + if not stats_only: + errors.append(item) + failed += 1 + else: + success += 1 + + return success, failed if stats_only else errors + + +def parallel_bulk( + client, + actions, + thread_count=4, + chunk_size=500, + max_chunk_bytes=100 * 1024 * 1024, + queue_size=4, + expand_action_callback=expand_action, + *args, + **kwargs +): + """ + Parallel version of the bulk helper run in multiple threads at once. + + :arg client: instance of :class:`~elasticsearch.Elasticsearch` to use + :arg actions: iterator containing the actions + :arg thread_count: size of the threadpool to use for the bulk requests + :arg chunk_size: number of docs in one chunk sent to es (default: 500) + :arg max_chunk_bytes: the maximum size of the request in bytes (default: 100MB) + :arg raise_on_error: raise ``BulkIndexError`` containing errors (as `.errors`) + from the execution of the last chunk when some occur. By default we raise. + :arg raise_on_exception: if ``False`` then don't propagate exceptions from + call to ``bulk`` and just report the items that failed as failed. + :arg expand_action_callback: callback executed on each action passed in, + should return a tuple containing the action line and the data line + (`None` if data line should be omitted). + :arg queue_size: size of the task queue between the main thread (producing + chunks to send) and the processing threads. + """ + # Avoid importing multiprocessing unless parallel_bulk is used + # to avoid exceptions on restricted environments like App Engine + from multiprocessing.pool import ThreadPool + + actions = map(expand_action_callback, actions) + + class BlockingPool(ThreadPool): + def _setup_queues(self): + super(BlockingPool, self)._setup_queues() + # The queue must be at least the size of the number of threads to + # prevent hanging when inserting sentinel values during teardown. + self._inqueue = Queue(max(queue_size, thread_count)) + self._quick_put = self._inqueue.put + + pool = BlockingPool(thread_count) + + try: + for result in pool.imap( + lambda bulk_chunk: list( + _process_bulk_chunk( + client, bulk_chunk[1], bulk_chunk[0], *args, **kwargs + ) + ), + _chunk_actions( + actions, chunk_size, max_chunk_bytes, client.transport.serializer + ), + ): + for item in result: + yield item + + finally: + pool.close() + pool.join() + + +def scan( + client, + query=None, + scroll="5m", + raise_on_error=True, + preserve_order=False, + size=1000, + request_timeout=None, + clear_scroll=True, + scroll_kwargs=None, + **kwargs +): + """ + Simple abstraction on top of the + :meth:`~elasticsearch.Elasticsearch.scroll` api - a simple iterator that + yields all hits as returned by underlining scroll requests. + + By default scan does not return results in any pre-determined order. To + have a standard order in the returned documents (either by score or + explicit sort definition) when scrolling, use ``preserve_order=True``. This + may be an expensive operation and will negate the performance benefits of + using ``scan``. + + :arg client: instance of :class:`~elasticsearch.Elasticsearch` to use + :arg query: body for the :meth:`~elasticsearch.Elasticsearch.search` api + :arg scroll: Specify how long a consistent view of the index should be + maintained for scrolled search + :arg raise_on_error: raises an exception (``ScanError``) if an error is + encountered (some shards fail to execute). By default we raise. + :arg preserve_order: don't set the ``search_type`` to ``scan`` - this will + cause the scroll to paginate with preserving the order. Note that this + can be an extremely expensive operation and can easily lead to + unpredictable results, use with caution. + :arg size: size (per shard) of the batch send at each iteration. + :arg request_timeout: explicit timeout for each call to ``scan`` + :arg clear_scroll: explicitly calls delete on the scroll id via the clear + scroll API at the end of the method on completion or error, defaults + to true. + :arg scroll_kwargs: additional kwargs to be passed to + :meth:`~elasticsearch.Elasticsearch.scroll` + + Any additional keyword arguments will be passed to the initial + :meth:`~elasticsearch.Elasticsearch.search` call:: + + scan(es, + query={"query": {"match": {"title": "python"}}}, + index="orders-*", + doc_type="books" + ) + + """ + scroll_kwargs = scroll_kwargs or {} + + if not preserve_order: + query = query.copy() if query else {} + query["sort"] = "_doc" + + # initial search + resp = client.search( + body=query, scroll=scroll, size=size, request_timeout=request_timeout, **kwargs + ) + scroll_id = resp.get("_scroll_id") + + try: + while scroll_id and resp["hits"]["hits"]: + for hit in resp["hits"]["hits"]: + yield hit + + # check if we have any errors + if (resp["_shards"]["successful"] + resp["_shards"]["skipped"]) < resp[ + "_shards" + ]["total"]: + logger.warning( + "Scroll request has only succeeded on %d (+%d skipped) shards out of %d.", + resp["_shards"]["successful"], + resp["_shards"]["skipped"], + resp["_shards"]["total"], + ) + if raise_on_error: + raise ScanError( + scroll_id, + "Scroll request has only succeeded on %d (+%d skiped) shards out of %d." + % ( + resp["_shards"]["successful"], + resp["_shards"]["skipped"], + resp["_shards"]["total"], + ), + ) + resp = client.scroll( + body={"scroll_id": scroll_id, "scroll": scroll}, **scroll_kwargs + ) + scroll_id = resp.get("_scroll_id") + + finally: + if scroll_id and clear_scroll: + client.clear_scroll(body={"scroll_id": [scroll_id]}, ignore=(404,)) + + +def reindex( + client, + source_index, + target_index, + query=None, + target_client=None, + chunk_size=500, + scroll="5m", + scan_kwargs={}, + bulk_kwargs={}, +): + + """ + Reindex all documents from one index that satisfy a given query + to another, potentially (if `target_client` is specified) on a different cluster. + If you don't specify the query you will reindex all the documents. + + Since ``2.3`` a :meth:`~elasticsearch.Elasticsearch.reindex` api is + available as part of elasticsearch itself. It is recommended to use the api + instead of this helper wherever possible. The helper is here mostly for + backwards compatibility and for situations where more flexibility is + needed. + + .. note:: + + This helper doesn't transfer mappings, just the data. + + :arg client: instance of :class:`~elasticsearch.Elasticsearch` to use (for + read if `target_client` is specified as well) + :arg source_index: index (or list of indices) to read documents from + :arg target_index: name of the index in the target cluster to populate + :arg query: body for the :meth:`~elasticsearch.Elasticsearch.search` api + :arg target_client: optional, is specified will be used for writing (thus + enabling reindex between clusters) + :arg chunk_size: number of docs in one chunk sent to es (default: 500) + :arg scroll: Specify how long a consistent view of the index should be + maintained for scrolled search + :arg scan_kwargs: additional kwargs to be passed to + :func:`~elasticsearch.helpers.scan` + :arg bulk_kwargs: additional kwargs to be passed to + :func:`~elasticsearch.helpers.bulk` + """ + target_client = client if target_client is None else target_client + docs = scan(client, query=query, index=source_index, scroll=scroll, **scan_kwargs) + + def _change_doc_index(hits, index): + for h in hits: + h["_index"] = index + if "fields" in h: + h.update(h.pop("fields")) + yield h + + kwargs = {"stats_only": True} + kwargs.update(bulk_kwargs) + return bulk( + target_client, + _change_doc_index(docs, target_index), + chunk_size=chunk_size, + **kwargs + ) diff --git a/Python3.6-ClsToElasticSearch/src/elasticsearch/helpers/errors.py b/Python3.6-ClsToElasticSearch/src/elasticsearch/helpers/errors.py new file mode 100755 index 000000000..6261822e5 --- /dev/null +++ b/Python3.6-ClsToElasticSearch/src/elasticsearch/helpers/errors.py @@ -0,0 +1,14 @@ +from ..exceptions import ElasticsearchException + + +class BulkIndexError(ElasticsearchException): + @property + def errors(self): + """ List of errors from execution of the last chunk. """ + return self.args[1] + + +class ScanError(ElasticsearchException): + def __init__(self, scroll_id, *args, **kwargs): + super(ScanError, self).__init__(*args, **kwargs) + self.scroll_id = scroll_id diff --git a/Python3.6-ClsToElasticSearch/src/elasticsearch/helpers/test.py b/Python3.6-ClsToElasticSearch/src/elasticsearch/helpers/test.py new file mode 100755 index 000000000..aa4b12111 --- /dev/null +++ b/Python3.6-ClsToElasticSearch/src/elasticsearch/helpers/test.py @@ -0,0 +1,70 @@ +import time +import os +from unittest import TestCase, SkipTest + +from elasticsearch import Elasticsearch +from elasticsearch.exceptions import ConnectionError + + +def get_test_client(nowait=False, **kwargs): + # construct kwargs from the environment + kw = {"timeout": 30, "ca_certs": ".ci/certs/ca.pem"} + + if "PYTHON_CONNECTION_CLASS" in os.environ: + from elasticsearch import connection + + kw["connection_class"] = getattr( + connection, os.environ["PYTHON_CONNECTION_CLASS"] + ) + + kw.update(kwargs) + client = Elasticsearch([os.environ.get("ELASTICSEARCH_HOST", {})], **kw) + + # wait for yellow status + for _ in range(1 if nowait else 100): + try: + client.cluster.health(wait_for_status="yellow") + return client + except ConnectionError: + time.sleep(0.1) + else: + # timeout + raise SkipTest("Elasticsearch failed to start.") + + +def _get_version(version_string): + if "." not in version_string: + return () + version = version_string.strip().split(".") + return tuple(int(v) if v.isdigit() else 999 for v in version) + + +class ElasticsearchTestCase(TestCase): + @staticmethod + def _get_client(): + return get_test_client() + + @classmethod + def setUpClass(cls): + super(ElasticsearchTestCase, cls).setUpClass() + cls.client = cls._get_client() + + def tearDown(self): + super(ElasticsearchTestCase, self).tearDown() + + # Hidden indices expanded in wildcards in ES 7.7 + expand_wildcards = ["open", "closed"] + if self.es_version >= (7, 7): + expand_wildcards.append("hidden") + + self.client.indices.delete( + index="*", ignore=404, expand_wildcards=expand_wildcards + ) + self.client.indices.delete_template(name="*", ignore=404) + + @property + def es_version(self): + if not hasattr(self, "_es_version"): + version_string = self.client.info()["version"]["number"] + self._es_version = _get_version(version_string) + return self._es_version diff --git a/Python3.6-ClsToElasticSearch/src/elasticsearch/serializer.py b/Python3.6-ClsToElasticSearch/src/elasticsearch/serializer.py new file mode 100755 index 000000000..2ab3191da --- /dev/null +++ b/Python3.6-ClsToElasticSearch/src/elasticsearch/serializer.py @@ -0,0 +1,139 @@ +try: + import simplejson as json +except ImportError: + import json + +import uuid +from datetime import date, datetime +from decimal import Decimal + +from .exceptions import SerializationError, ImproperlyConfigured +from .compat import string_types + +INTEGER_TYPES = () +FLOAT_TYPES = (Decimal,) +TIME_TYPES = (date, datetime) + +try: + import numpy as np + + INTEGER_TYPES += ( + np.int_, + np.intc, + np.int8, + np.int16, + np.int32, + np.int64, + np.uint8, + np.uint16, + np.uint32, + np.uint64, + ) + FLOAT_TYPES += ( + np.float_, + np.float16, + np.float32, + np.float64, + ) +except ImportError: + np = None + +try: + import pandas as pd + + TIME_TYPES += (pd.Timestamp,) +except ImportError: + pd = None + + +class TextSerializer(object): + mimetype = "text/plain" + + def loads(self, s): + return s + + def dumps(self, data): + if isinstance(data, string_types): + return data + + raise SerializationError("Cannot serialize %r into text." % data) + + +class JSONSerializer(object): + mimetype = "application/json" + + def default(self, data): + if isinstance(data, TIME_TYPES): + return data.isoformat() + elif isinstance(data, uuid.UUID): + return str(data) + elif isinstance(data, FLOAT_TYPES): + return float(data) + elif INTEGER_TYPES and isinstance(data, INTEGER_TYPES): + return int(data) + + # Special cases for numpy and pandas types + elif np: + if isinstance(data, np.bool_): + return bool(data) + elif isinstance(data, np.datetime64): + return data.item().isoformat() + elif isinstance(data, np.ndarray): + return data.tolist() + if pd: + if isinstance(data, (pd.Series, pd.Categorical)): + return data.tolist() + elif hasattr(pd, "NA") and pd.isna(data): + return None + + raise TypeError("Unable to serialize %r (type: %s)" % (data, type(data))) + + def loads(self, s): + try: + return json.loads(s) + except (ValueError, TypeError) as e: + raise SerializationError(s, e) + + def dumps(self, data): + # don't serialize strings + if isinstance(data, string_types): + return data + + try: + return json.dumps( + data, default=self.default, ensure_ascii=False, separators=(",", ":") + ) + except (ValueError, TypeError) as e: + raise SerializationError(data, e) + + +DEFAULT_SERIALIZERS = { + JSONSerializer.mimetype: JSONSerializer(), + TextSerializer.mimetype: TextSerializer(), +} + + +class Deserializer(object): + def __init__(self, serializers, default_mimetype="application/json"): + try: + self.default = serializers[default_mimetype] + except KeyError: + raise ImproperlyConfigured( + "Cannot find default serializer (%s)" % default_mimetype + ) + self.serializers = serializers + + def loads(self, s, mimetype=None): + if not mimetype: + deserializer = self.default + else: + # split out charset + mimetype, _, _ = mimetype.partition(";") + try: + deserializer = self.serializers[mimetype] + except KeyError: + raise SerializationError( + "Unknown mimetype, unable to deserialize: %s" % mimetype + ) + + return deserializer.loads(s) diff --git a/Python3.6-ClsToElasticSearch/src/elasticsearch/transport.py b/Python3.6-ClsToElasticSearch/src/elasticsearch/transport.py new file mode 100755 index 000000000..268b5f361 --- /dev/null +++ b/Python3.6-ClsToElasticSearch/src/elasticsearch/transport.py @@ -0,0 +1,403 @@ +import time +from itertools import chain + +from .connection import Urllib3HttpConnection +from .connection_pool import ConnectionPool, DummyConnectionPool +from .serializer import JSONSerializer, Deserializer, DEFAULT_SERIALIZERS +from .exceptions import ( + ConnectionError, + TransportError, + SerializationError, + ConnectionTimeout, +) + + +def get_host_info(node_info, host): + """ + Simple callback that takes the node info from `/_cluster/nodes` and a + parsed connection information and return the connection information. If + `None` is returned this node will be skipped. + + Useful for filtering nodes (by proximity for example) or if additional + information needs to be provided for the :class:`~elasticsearch.Connection` + class. By default master only nodes are filtered out since they shouldn't + typically be used for API operations. + + :arg node_info: node information from `/_cluster/nodes` + :arg host: connection information (host, port) extracted from the node info + """ + # ignore master only nodes + if node_info.get("roles", []) == ["master"]: + return None + return host + + +class Transport(object): + """ + Encapsulation of transport-related to logic. Handles instantiation of the + individual connections as well as creating a connection pool to hold them. + + Main interface is the `perform_request` method. + """ + + def __init__( + self, + hosts, + connection_class=Urllib3HttpConnection, + connection_pool_class=ConnectionPool, + host_info_callback=get_host_info, + sniff_on_start=False, + sniffer_timeout=None, + sniff_timeout=0.1, + sniff_on_connection_fail=False, + serializer=JSONSerializer(), + serializers=None, + default_mimetype="application/json", + max_retries=3, + retry_on_status=(502, 503, 504), + retry_on_timeout=False, + send_get_body_as="GET", + **kwargs + ): + """ + :arg hosts: list of dictionaries, each containing keyword arguments to + create a `connection_class` instance + :arg connection_class: subclass of :class:`~elasticsearch.Connection` to use + :arg connection_pool_class: subclass of :class:`~elasticsearch.ConnectionPool` to use + :arg host_info_callback: callback responsible for taking the node information from + `/_cluster/nodes`, along with already extracted information, and + producing a list of arguments (same as `hosts` parameter) + :arg sniff_on_start: flag indicating whether to obtain a list of nodes + from the cluster at startup time + :arg sniffer_timeout: number of seconds between automatic sniffs + :arg sniff_on_connection_fail: flag controlling if connection failure triggers a sniff + :arg sniff_timeout: timeout used for the sniff request - it should be a + fast api call and we are talking potentially to more nodes so we want + to fail quickly. Not used during initial sniffing (if + ``sniff_on_start`` is on) when the connection still isn't + initialized. + :arg serializer: serializer instance + :arg serializers: optional dict of serializer instances that will be + used for deserializing data coming from the server. (key is the mimetype) + :arg default_mimetype: when no mimetype is specified by the server + response assume this mimetype, defaults to `'application/json'` + :arg max_retries: maximum number of retries before an exception is propagated + :arg retry_on_status: set of HTTP status codes on which we should retry + on a different node. defaults to ``(502, 503, 504)`` + :arg retry_on_timeout: should timeout trigger a retry on different + node? (default `False`) + :arg send_get_body_as: for GET requests with body this option allows + you to specify an alternate way of execution for environments that + don't support passing bodies with GET requests. If you set this to + 'POST' a POST method will be used instead, if to 'source' then the body + will be serialized and passed as a query parameter `source`. + + Any extra keyword arguments will be passed to the `connection_class` + when creating and instance unless overridden by that connection's + options provided as part of the hosts parameter. + """ + + # serialization config + _serializers = DEFAULT_SERIALIZERS.copy() + # if a serializer has been specified, use it for deserialization as well + _serializers[serializer.mimetype] = serializer + # if custom serializers map has been supplied, override the defaults with it + if serializers: + _serializers.update(serializers) + # create a deserializer with our config + self.deserializer = Deserializer(_serializers, default_mimetype) + + self.max_retries = max_retries + self.retry_on_timeout = retry_on_timeout + self.retry_on_status = retry_on_status + self.send_get_body_as = send_get_body_as + + # data serializer + self.serializer = serializer + + # store all strategies... + self.connection_pool_class = connection_pool_class + self.connection_class = connection_class + + # ...save kwargs to be passed to the connections + self.kwargs = kwargs + self.hosts = hosts + + # ...and instantiate them + self.set_connections(hosts) + # retain the original connection instances for sniffing + self.seed_connections = self.connection_pool.connections[:] + + # Don't enable sniffing on Cloud instances. + if kwargs.get("cloud_id", False): + sniff_on_start = False + sniff_on_connection_fail = False + + # sniffing data + self.sniffer_timeout = sniffer_timeout + self.sniff_on_connection_fail = sniff_on_connection_fail + self.last_sniff = time.time() + self.sniff_timeout = sniff_timeout + + # callback to construct host dict from data in /_cluster/nodes + self.host_info_callback = host_info_callback + + if sniff_on_start: + self.sniff_hosts(True) + + def add_connection(self, host): + """ + Create a new :class:`~elasticsearch.Connection` instance and add it to the pool. + + :arg host: kwargs that will be used to create the instance + """ + self.hosts.append(host) + self.set_connections(self.hosts) + + def set_connections(self, hosts): + """ + Instantiate all the connections and create new connection pool to hold them. + Tries to identify unchanged hosts and re-use existing + :class:`~elasticsearch.Connection` instances. + + :arg hosts: same as `__init__` + """ + # construct the connections + def _create_connection(host): + # if this is not the initial setup look at the existing connection + # options and identify connections that haven't changed and can be + # kept around. + if hasattr(self, "connection_pool"): + for (connection, old_host) in self.connection_pool.connection_opts: + if old_host == host: + return connection + + # previously unseen params, create new connection + kwargs = self.kwargs.copy() + kwargs.update(host) + return self.connection_class(**kwargs) + + connections = map(_create_connection, hosts) + + connections = list(zip(connections, hosts)) + if len(connections) == 1: + self.connection_pool = DummyConnectionPool(connections) + else: + # pass the hosts dicts to the connection pool to optionally extract parameters from + self.connection_pool = self.connection_pool_class( + connections, **self.kwargs + ) + + def get_connection(self): + """ + Retrieve a :class:`~elasticsearch.Connection` instance from the + :class:`~elasticsearch.ConnectionPool` instance. + """ + if self.sniffer_timeout: + if time.time() >= self.last_sniff + self.sniffer_timeout: + self.sniff_hosts() + return self.connection_pool.get_connection() + + def _get_sniff_data(self, initial=False): + """ + Perform the request to get sniffing information. Returns a list of + dictionaries (one per node) containing all the information from the + cluster. + + It also sets the last_sniff attribute in case of a successful attempt. + + In rare cases it might be possible to override this method in your + custom Transport class to serve data from alternative source like + configuration management. + """ + previous_sniff = self.last_sniff + + try: + # reset last_sniff timestamp + self.last_sniff = time.time() + # go through all current connections as well as the + # seed_connections for good measure + for c in chain(self.connection_pool.connections, self.seed_connections): + try: + # use small timeout for the sniffing request, should be a fast api call + _, headers, node_info = c.perform_request( + "GET", + "/_nodes/_all/http", + timeout=self.sniff_timeout if not initial else None, + ) + node_info = self.deserializer.loads( + node_info, headers.get("content-type") + ) + break + except (ConnectionError, SerializationError): + pass + else: + raise TransportError("N/A", "Unable to sniff hosts.") + except Exception: + # keep the previous value on error + self.last_sniff = previous_sniff + raise + + return list(node_info["nodes"].values()) + + def _get_host_info(self, host_info): + host = {} + address = host_info.get("http", {}).get("publish_address") + + # malformed or no address given + if not address or ":" not in address: + return None + + if "/" in address: + # Support 7.x host/ip:port behavior where http.publish_host has been set. + fqdn, ipaddress = address.split("/", 1) + host["host"] = fqdn + _, host["port"] = ipaddress.rsplit(":", 1) + host["port"] = int(host["port"]) + + else: + host["host"], host["port"] = address.rsplit(":", 1) + host["port"] = int(host["port"]) + + return self.host_info_callback(host_info, host) + + def sniff_hosts(self, initial=False): + """ + Obtain a list of nodes from the cluster and create a new connection + pool using the information retrieved. + + To extract the node connection parameters use the ``nodes_to_host_callback``. + + :arg initial: flag indicating if this is during startup + (``sniff_on_start``), ignore the ``sniff_timeout`` if ``True`` + """ + node_info = self._get_sniff_data(initial) + + hosts = list(filter(None, (self._get_host_info(n) for n in node_info))) + + # we weren't able to get any nodes or host_info_callback blocked all - + # raise error. + if not hosts: + raise TransportError( + "N/A", "Unable to sniff hosts - no viable hosts found." + ) + + self.set_connections(hosts) + + def mark_dead(self, connection): + """ + Mark a connection as dead (failed) in the connection pool. If sniffing + on failure is enabled this will initiate the sniffing process. + + :arg connection: instance of :class:`~elasticsearch.Connection` that failed + """ + # mark as dead even when sniffing to avoid hitting this host during the sniff process + self.connection_pool.mark_dead(connection) + if self.sniff_on_connection_fail: + self.sniff_hosts() + + def perform_request(self, method, url, headers=None, params=None, body=None): + """ + Perform the actual request. Retrieve a connection from the connection + pool, pass all the information to it's perform_request method and + return the data. + + If an exception was raised, mark the connection as failed and retry (up + to `max_retries` times). + + If the operation was successful and the connection used was previously + marked as dead, mark it as live, resetting it's failure count. + + :arg method: HTTP method to use + :arg url: absolute url (without host) to target + :arg headers: dictionary of headers, will be handed over to the + underlying :class:`~elasticsearch.Connection` class + :arg params: dictionary of query parameters, will be handed over to the + underlying :class:`~elasticsearch.Connection` class for serialization + :arg body: body of the request, will be serialized using serializer and + passed to the connection + """ + if body is not None: + body = self.serializer.dumps(body) + + # some clients or environments don't support sending GET with body + if method in ("HEAD", "GET") and self.send_get_body_as != "GET": + # send it as post instead + if self.send_get_body_as == "POST": + method = "POST" + + # or as source parameter + elif self.send_get_body_as == "source": + if params is None: + params = {} + params["source"] = body + body = None + + if body is not None: + try: + body = body.encode("utf-8", "surrogatepass") + except (UnicodeDecodeError, AttributeError): + # bytes/str - no need to re-encode + pass + + ignore = () + timeout = None + if params: + timeout = params.pop("request_timeout", None) + ignore = params.pop("ignore", ()) + if isinstance(ignore, int): + ignore = (ignore,) + + for attempt in range(self.max_retries + 1): + connection = self.get_connection() + + try: + status, headers_response, data = connection.perform_request( + method, + url, + params, + body, + headers=headers, + ignore=ignore, + timeout=timeout, + ) + + except TransportError as e: + if method == "HEAD" and e.status_code == 404: + return False + + retry = False + if isinstance(e, ConnectionTimeout): + retry = self.retry_on_timeout + elif isinstance(e, ConnectionError): + retry = True + elif e.status_code in self.retry_on_status: + retry = True + + if retry: + # only mark as dead if we are retrying + self.mark_dead(connection) + # raise exception on last retry + if attempt == self.max_retries: + raise + else: + raise + + else: + # connection didn't fail, confirm it's live status + self.connection_pool.mark_live(connection) + + if method == "HEAD": + return 200 <= status < 300 + + if data: + data = self.deserializer.loads( + data, headers_response.get("content-type") + ) + return data + + def close(self): + """ + Explicitly closes connections + """ + self.connection_pool.close() diff --git a/Python3.6-ClsToElasticSearch/src/index.py b/Python3.6-ClsToElasticSearch/src/index.py new file mode 100644 index 000000000..b7c2b6c2e --- /dev/null +++ b/Python3.6-ClsToElasticSearch/src/index.py @@ -0,0 +1,54 @@ +#!/usr/bin/env python +# -*- coding: utf8 -*- +import base64 +import gzip +import json +import logging +import os + +from elasticsearch import Elasticsearch +from elasticsearch.helpers import bulk + +# 必填参数 +ES_ADDRESS = os.getenv('ES_ADDRESS') +ES_USER = os.getenv('ES_USER') +ES_PASSWORD = os.getenv('ES_PASSWORD') +ES_API_KEY = os.getenv('ES_API_KEY') +ES_INDEX = os.getenv('ES_INDEX') + +# 日志设置 +logger = logging.getLogger() +logger.setLevel(logging.DEBUG) # 日志等级 + +# 构建es客户端 +# es = Elasticsearch([ES_ADDRESS], api_key=ES_API_KEY) +es = Elasticsearch([ES_ADDRESS], http_auth=(ES_USER, ES_PASSWORD)) + + +# 写入es +def write_data_to_es(content): + try: + records = content['records'] + actions = [] + for record in records: + action = { + "_index": ES_INDEX, + "_type": "_doc", + "_source": record + } + actions.append(action) + bulk(es, actions, index=ES_INDEX) + except Exception as e: + logger.error("Error occurred when writing to es", e) + raise + + +def main_handler(event, context): + logger.debug("start main_handler") + logger.info(event) + debase = base64.b64decode(event['clslogs']['data']) + data = gzip.decompress(debase).decode() + print(data) + write_data_to_es(json.loads(data)) + + return 'success'