venv added, updated
This commit is contained in:
@@ -0,0 +1,56 @@
|
||||
# flake8: noqa
|
||||
|
||||
"""
|
||||
InfluxDB OSS API Service.
|
||||
|
||||
The InfluxDB v2 API provides a programmatic interface for all interactions with InfluxDB. Access the InfluxDB API using the `/api/v2/` endpoint. # noqa: E501
|
||||
|
||||
OpenAPI spec version: 2.0.0
|
||||
Generated by: https://openapi-generator.tech
|
||||
"""
|
||||
|
||||
|
||||
from __future__ import absolute_import
|
||||
|
||||
# import apis into api package
|
||||
from influxdb_client.service.authorizations_service import AuthorizationsService
|
||||
from influxdb_client.service.backup_service import BackupService
|
||||
from influxdb_client.service.bucket_schemas_service import BucketSchemasService
|
||||
from influxdb_client.service.buckets_service import BucketsService
|
||||
from influxdb_client.service.cells_service import CellsService
|
||||
from influxdb_client.service.checks_service import ChecksService
|
||||
from influxdb_client.service.config_service import ConfigService
|
||||
from influxdb_client.service.dbr_ps_service import DBRPsService
|
||||
from influxdb_client.service.dashboards_service import DashboardsService
|
||||
from influxdb_client.service.delete_service import DeleteService
|
||||
from influxdb_client.service.health_service import HealthService
|
||||
from influxdb_client.service.invokable_scripts_service import InvokableScriptsService
|
||||
from influxdb_client.service.labels_service import LabelsService
|
||||
from influxdb_client.service.legacy_authorizations_service import LegacyAuthorizationsService
|
||||
from influxdb_client.service.metrics_service import MetricsService
|
||||
from influxdb_client.service.notification_endpoints_service import NotificationEndpointsService
|
||||
from influxdb_client.service.notification_rules_service import NotificationRulesService
|
||||
from influxdb_client.service.organizations_service import OrganizationsService
|
||||
from influxdb_client.service.ping_service import PingService
|
||||
from influxdb_client.service.query_service import QueryService
|
||||
from influxdb_client.service.ready_service import ReadyService
|
||||
from influxdb_client.service.remote_connections_service import RemoteConnectionsService
|
||||
from influxdb_client.service.replications_service import ReplicationsService
|
||||
from influxdb_client.service.resources_service import ResourcesService
|
||||
from influxdb_client.service.restore_service import RestoreService
|
||||
from influxdb_client.service.routes_service import RoutesService
|
||||
from influxdb_client.service.rules_service import RulesService
|
||||
from influxdb_client.service.scraper_targets_service import ScraperTargetsService
|
||||
from influxdb_client.service.secrets_service import SecretsService
|
||||
from influxdb_client.service.setup_service import SetupService
|
||||
from influxdb_client.service.signin_service import SigninService
|
||||
from influxdb_client.service.signout_service import SignoutService
|
||||
from influxdb_client.service.sources_service import SourcesService
|
||||
from influxdb_client.service.tasks_service import TasksService
|
||||
from influxdb_client.service.telegraf_plugins_service import TelegrafPluginsService
|
||||
from influxdb_client.service.telegrafs_service import TelegrafsService
|
||||
from influxdb_client.service.templates_service import TemplatesService
|
||||
from influxdb_client.service.users_service import UsersService
|
||||
from influxdb_client.service.variables_service import VariablesService
|
||||
from influxdb_client.service.views_service import ViewsService
|
||||
from influxdb_client.service.write_service import WriteService
|
||||
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
@@ -0,0 +1,554 @@
|
||||
"""Commons function for Sync and Async client."""
|
||||
from __future__ import absolute_import
|
||||
|
||||
import base64
|
||||
import configparser
|
||||
import logging
|
||||
import os
|
||||
from datetime import datetime, timedelta
|
||||
from typing import List, Generator, Any, Union, Iterable, AsyncGenerator
|
||||
|
||||
from urllib3 import HTTPResponse
|
||||
|
||||
from influxdb_client import Configuration, Dialect, Query, OptionStatement, VariableAssignment, Identifier, \
|
||||
Expression, BooleanLiteral, IntegerLiteral, FloatLiteral, DateTimeLiteral, UnaryExpression, DurationLiteral, \
|
||||
Duration, StringLiteral, ArrayExpression, ImportDeclaration, MemberExpression, MemberAssignment, File, \
|
||||
WriteService, QueryService, DeleteService, DeletePredicateRequest
|
||||
from influxdb_client.client.flux_csv_parser import FluxResponseMetadataMode, FluxCsvParser, FluxSerializationMode
|
||||
from influxdb_client.client.flux_table import FluxRecord, TableList, CSVIterator
|
||||
from influxdb_client.client.util.date_utils import get_date_helper
|
||||
from influxdb_client.client.util.helpers import get_org_query_param
|
||||
from influxdb_client.client.warnings import MissingPivotFunction
|
||||
from influxdb_client.client.write.dataframe_serializer import DataframeSerializer
|
||||
from influxdb_client.rest import _UTF_8_encoding
|
||||
|
||||
try:
|
||||
import dataclasses
|
||||
|
||||
_HAS_DATACLASS = True
|
||||
except ModuleNotFoundError:
|
||||
_HAS_DATACLASS = False
|
||||
|
||||
LOGGERS_NAMES = [
|
||||
'influxdb_client.client.influxdb_client',
|
||||
'influxdb_client.client.influxdb_client_async',
|
||||
'influxdb_client.client.write_api',
|
||||
'influxdb_client.client.write_api_async',
|
||||
'influxdb_client.client.write.retry',
|
||||
'influxdb_client.client.write.dataframe_serializer',
|
||||
'influxdb_client.client.util.multiprocessing_helper',
|
||||
'influxdb_client.client.http',
|
||||
'influxdb_client.client.exceptions',
|
||||
]
|
||||
|
||||
|
||||
# noinspection PyMethodMayBeStatic
|
||||
class _BaseClient(object):
|
||||
def __init__(self, url, token, debug=None, timeout=10_000, enable_gzip=False, org: str = None,
|
||||
default_tags: dict = None, http_client_logger: str = None, **kwargs) -> None:
|
||||
self.url = url
|
||||
self.token = token
|
||||
self.org = org
|
||||
|
||||
self.default_tags = default_tags
|
||||
|
||||
self.conf = _Configuration()
|
||||
if self.url.endswith("/"):
|
||||
self.conf.host = self.url[:-1]
|
||||
else:
|
||||
self.conf.host = self.url
|
||||
self.conf.enable_gzip = enable_gzip
|
||||
self.conf.verify_ssl = kwargs.get('verify_ssl', True)
|
||||
self.conf.ssl_ca_cert = kwargs.get('ssl_ca_cert', None)
|
||||
self.conf.cert_file = kwargs.get('cert_file', None)
|
||||
self.conf.cert_key_file = kwargs.get('cert_key_file', None)
|
||||
self.conf.cert_key_password = kwargs.get('cert_key_password', None)
|
||||
self.conf.ssl_context = kwargs.get('ssl_context', None)
|
||||
self.conf.proxy = kwargs.get('proxy', None)
|
||||
self.conf.proxy_headers = kwargs.get('proxy_headers', None)
|
||||
self.conf.connection_pool_maxsize = kwargs.get('connection_pool_maxsize', self.conf.connection_pool_maxsize)
|
||||
self.conf.timeout = timeout
|
||||
# logging
|
||||
self.conf.loggers["http_client_logger"] = logging.getLogger(http_client_logger)
|
||||
for client_logger in LOGGERS_NAMES:
|
||||
self.conf.loggers[client_logger] = logging.getLogger(client_logger)
|
||||
self.conf.debug = debug
|
||||
|
||||
self.conf.username = kwargs.get('username', None)
|
||||
self.conf.password = kwargs.get('password', None)
|
||||
# defaults
|
||||
self.auth_header_name = None
|
||||
self.auth_header_value = None
|
||||
# by token
|
||||
if self.token:
|
||||
self.auth_header_name = "Authorization"
|
||||
self.auth_header_value = "Token " + self.token
|
||||
# by HTTP basic
|
||||
auth_basic = kwargs.get('auth_basic', False)
|
||||
if auth_basic:
|
||||
self.auth_header_name = "Authorization"
|
||||
self.auth_header_value = "Basic " + base64.b64encode(token.encode()).decode()
|
||||
# by username, password
|
||||
if self.conf.username and self.conf.password:
|
||||
self.auth_header_name = None
|
||||
self.auth_header_value = None
|
||||
|
||||
self.retries = kwargs.get('retries', False)
|
||||
|
||||
self.profilers = kwargs.get('profilers', None)
|
||||
pass
|
||||
|
||||
@classmethod
|
||||
def _from_config_file(cls, config_file: str = "config.ini", debug=None, enable_gzip=False, **kwargs):
|
||||
config = configparser.ConfigParser()
|
||||
config_name = kwargs.get('config_name', 'influx2')
|
||||
is_json = False
|
||||
try:
|
||||
config.read(config_file)
|
||||
except configparser.ParsingError:
|
||||
with open(config_file) as json_file:
|
||||
import json
|
||||
config = json.load(json_file)
|
||||
is_json = True
|
||||
|
||||
def _config_value(key: str):
|
||||
value = str(config[key]) if is_json else config[config_name][key]
|
||||
return value.strip('"')
|
||||
|
||||
def _has_option(key: str):
|
||||
return key in config if is_json else config.has_option(config_name, key)
|
||||
|
||||
def _has_section(key: str):
|
||||
return key in config if is_json else config.has_section(key)
|
||||
|
||||
url = _config_value('url')
|
||||
token = _config_value('token')
|
||||
|
||||
timeout = None
|
||||
if _has_option('timeout'):
|
||||
timeout = _config_value('timeout')
|
||||
|
||||
org = None
|
||||
if _has_option('org'):
|
||||
org = _config_value('org')
|
||||
|
||||
verify_ssl = True
|
||||
if _has_option('verify_ssl'):
|
||||
verify_ssl = _config_value('verify_ssl')
|
||||
|
||||
ssl_ca_cert = None
|
||||
if _has_option('ssl_ca_cert'):
|
||||
ssl_ca_cert = _config_value('ssl_ca_cert')
|
||||
|
||||
cert_file = None
|
||||
if _has_option('cert_file'):
|
||||
cert_file = _config_value('cert_file')
|
||||
|
||||
cert_key_file = None
|
||||
if _has_option('cert_key_file'):
|
||||
cert_key_file = _config_value('cert_key_file')
|
||||
|
||||
cert_key_password = None
|
||||
if _has_option('cert_key_password'):
|
||||
cert_key_password = _config_value('cert_key_password')
|
||||
|
||||
connection_pool_maxsize = None
|
||||
if _has_option('connection_pool_maxsize'):
|
||||
connection_pool_maxsize = _config_value('connection_pool_maxsize')
|
||||
|
||||
auth_basic = False
|
||||
if _has_option('auth_basic'):
|
||||
auth_basic = _config_value('auth_basic')
|
||||
|
||||
default_tags = None
|
||||
if _has_section('tags'):
|
||||
if is_json:
|
||||
default_tags = config['tags']
|
||||
else:
|
||||
tags = {k: v.strip('"') for k, v in config.items('tags')}
|
||||
default_tags = dict(tags)
|
||||
|
||||
profilers = None
|
||||
if _has_option('profilers'):
|
||||
profilers = [x.strip() for x in _config_value('profilers').split(',')]
|
||||
|
||||
proxy = None
|
||||
if _has_option('proxy'):
|
||||
proxy = _config_value('proxy')
|
||||
|
||||
return cls(url, token, debug=debug, timeout=_to_int(timeout), org=org, default_tags=default_tags,
|
||||
enable_gzip=enable_gzip, verify_ssl=_to_bool(verify_ssl), ssl_ca_cert=ssl_ca_cert,
|
||||
cert_file=cert_file, cert_key_file=cert_key_file, cert_key_password=cert_key_password,
|
||||
connection_pool_maxsize=_to_int(connection_pool_maxsize), auth_basic=_to_bool(auth_basic),
|
||||
profilers=profilers, proxy=proxy, **kwargs)
|
||||
|
||||
@classmethod
|
||||
def _from_env_properties(cls, debug=None, enable_gzip=False, **kwargs):
|
||||
url = os.getenv('INFLUXDB_V2_URL', "http://localhost:8086")
|
||||
token = os.getenv('INFLUXDB_V2_TOKEN', "my-token")
|
||||
timeout = os.getenv('INFLUXDB_V2_TIMEOUT', "10000")
|
||||
org = os.getenv('INFLUXDB_V2_ORG', "my-org")
|
||||
verify_ssl = os.getenv('INFLUXDB_V2_VERIFY_SSL', "True")
|
||||
ssl_ca_cert = os.getenv('INFLUXDB_V2_SSL_CA_CERT', None)
|
||||
cert_file = os.getenv('INFLUXDB_V2_CERT_FILE', None)
|
||||
cert_key_file = os.getenv('INFLUXDB_V2_CERT_KEY_FILE', None)
|
||||
cert_key_password = os.getenv('INFLUXDB_V2_CERT_KEY_PASSWORD', None)
|
||||
connection_pool_maxsize = os.getenv('INFLUXDB_V2_CONNECTION_POOL_MAXSIZE', None)
|
||||
auth_basic = os.getenv('INFLUXDB_V2_AUTH_BASIC', "False")
|
||||
|
||||
prof = os.getenv("INFLUXDB_V2_PROFILERS", None)
|
||||
profilers = None
|
||||
if prof is not None:
|
||||
profilers = [x.strip() for x in prof.split(',')]
|
||||
|
||||
default_tags = dict()
|
||||
|
||||
for key, value in os.environ.items():
|
||||
if key.startswith("INFLUXDB_V2_TAG_"):
|
||||
default_tags[key[16:].lower()] = value
|
||||
|
||||
return cls(url, token, debug=debug, timeout=_to_int(timeout), org=org, default_tags=default_tags,
|
||||
enable_gzip=enable_gzip, verify_ssl=_to_bool(verify_ssl), ssl_ca_cert=ssl_ca_cert,
|
||||
cert_file=cert_file, cert_key_file=cert_key_file, cert_key_password=cert_key_password,
|
||||
connection_pool_maxsize=_to_int(connection_pool_maxsize), auth_basic=_to_bool(auth_basic),
|
||||
profilers=profilers, **kwargs)
|
||||
|
||||
|
||||
# noinspection PyMethodMayBeStatic
|
||||
class _BaseQueryApi(object):
|
||||
default_dialect = Dialect(header=True, delimiter=",", comment_prefix="#",
|
||||
annotations=["datatype", "group", "default"], date_time_format="RFC3339")
|
||||
|
||||
def __init__(self, influxdb_client, query_options=None):
|
||||
from influxdb_client.client.query_api import QueryOptions
|
||||
self._query_options = QueryOptions() if query_options is None else query_options
|
||||
self._influxdb_client = influxdb_client
|
||||
self._query_api = QueryService(influxdb_client.api_client)
|
||||
|
||||
"""Base implementation for Queryable API."""
|
||||
|
||||
def _to_tables(self, response, query_options=None, response_metadata_mode:
|
||||
FluxResponseMetadataMode = FluxResponseMetadataMode.full) -> TableList:
|
||||
"""
|
||||
Parse HTTP response to TableList.
|
||||
|
||||
:param response: HTTP response from an HTTP client. Expected type: `urllib3.response.HTTPResponse`.
|
||||
"""
|
||||
_parser = self._to_tables_parser(response, query_options, response_metadata_mode)
|
||||
list(_parser.generator())
|
||||
return _parser.table_list()
|
||||
|
||||
async def _to_tables_async(self, response, query_options=None, response_metadata_mode:
|
||||
FluxResponseMetadataMode = FluxResponseMetadataMode.full) -> TableList:
|
||||
"""
|
||||
Parse HTTP response to TableList.
|
||||
|
||||
:param response: HTTP response from an HTTP client. Expected type: `aiohttp.client_reqrep.ClientResponse`.
|
||||
"""
|
||||
async with self._to_tables_parser(response, query_options, response_metadata_mode) as parser:
|
||||
async for _ in parser.generator_async():
|
||||
pass
|
||||
return parser.table_list()
|
||||
|
||||
def _to_csv(self, response: HTTPResponse) -> CSVIterator:
|
||||
"""Parse HTTP response to CSV."""
|
||||
return CSVIterator(response)
|
||||
|
||||
def _to_flux_record_stream(self, response, query_options=None,
|
||||
response_metadata_mode: FluxResponseMetadataMode = FluxResponseMetadataMode.full) -> \
|
||||
Generator[FluxRecord, Any, None]:
|
||||
"""
|
||||
Parse HTTP response to FluxRecord stream.
|
||||
|
||||
:param response: HTTP response from an HTTP client. Expected type: `urllib3.response.HTTPResponse`.
|
||||
"""
|
||||
_parser = self._to_flux_record_stream_parser(query_options, response, response_metadata_mode)
|
||||
return _parser.generator()
|
||||
|
||||
async def _to_flux_record_stream_async(self, response, query_options=None, response_metadata_mode:
|
||||
FluxResponseMetadataMode = FluxResponseMetadataMode.full) -> \
|
||||
AsyncGenerator['FluxRecord', None]:
|
||||
"""
|
||||
Parse HTTP response to FluxRecord stream.
|
||||
|
||||
:param response: HTTP response from an HTTP client. Expected type: `aiohttp.client_reqrep.ClientResponse`.
|
||||
"""
|
||||
_parser = self._to_flux_record_stream_parser(query_options, response, response_metadata_mode)
|
||||
return (await _parser.__aenter__()).generator_async()
|
||||
|
||||
def _to_data_frame_stream(self, data_frame_index, response, query_options=None,
|
||||
response_metadata_mode: FluxResponseMetadataMode = FluxResponseMetadataMode.full,
|
||||
use_extension_dtypes=False):
|
||||
"""
|
||||
Parse HTTP response to DataFrame stream.
|
||||
|
||||
:param response: HTTP response from an HTTP client. Expected type: `urllib3.response.HTTPResponse`.
|
||||
"""
|
||||
_parser = self._to_data_frame_stream_parser(data_frame_index, query_options, response, response_metadata_mode,
|
||||
use_extension_dtypes)
|
||||
return _parser.generator()
|
||||
|
||||
async def _to_data_frame_stream_async(self, data_frame_index, response, query_options=None, response_metadata_mode:
|
||||
FluxResponseMetadataMode = FluxResponseMetadataMode.full,
|
||||
use_extension_dtypes=False):
|
||||
"""
|
||||
Parse HTTP response to DataFrame stream.
|
||||
|
||||
:param response: HTTP response from an HTTP client. Expected type: `aiohttp.client_reqrep.ClientResponse`.
|
||||
"""
|
||||
_parser = self._to_data_frame_stream_parser(data_frame_index, query_options, response, response_metadata_mode,
|
||||
use_extension_dtypes)
|
||||
return (await _parser.__aenter__()).generator_async()
|
||||
|
||||
def _to_tables_parser(self, response, query_options, response_metadata_mode):
|
||||
return FluxCsvParser(response=response, serialization_mode=FluxSerializationMode.tables,
|
||||
query_options=query_options, response_metadata_mode=response_metadata_mode)
|
||||
|
||||
def _to_flux_record_stream_parser(self, query_options, response, response_metadata_mode):
|
||||
return FluxCsvParser(response=response, serialization_mode=FluxSerializationMode.stream,
|
||||
query_options=query_options, response_metadata_mode=response_metadata_mode)
|
||||
|
||||
def _to_data_frame_stream_parser(self, data_frame_index, query_options, response, response_metadata_mode,
|
||||
use_extension_dtypes):
|
||||
return FluxCsvParser(response=response, serialization_mode=FluxSerializationMode.dataFrame,
|
||||
data_frame_index=data_frame_index, query_options=query_options,
|
||||
response_metadata_mode=response_metadata_mode,
|
||||
use_extension_dtypes=use_extension_dtypes)
|
||||
|
||||
def _to_data_frames(self, _generator):
|
||||
"""Parse stream of DataFrames into expected type."""
|
||||
from ..extras import pd
|
||||
if isinstance(_generator, list):
|
||||
_dataFrames = _generator
|
||||
else:
|
||||
_dataFrames = list(_generator)
|
||||
|
||||
if len(_dataFrames) == 0:
|
||||
return pd.DataFrame(columns=[], index=None)
|
||||
elif len(_dataFrames) == 1:
|
||||
return _dataFrames[0]
|
||||
else:
|
||||
return _dataFrames
|
||||
|
||||
def _org_param(self, org):
|
||||
return get_org_query_param(org=org, client=self._influxdb_client)
|
||||
|
||||
def _get_query_options(self):
|
||||
if self._query_options and self._query_options.profilers:
|
||||
return self._query_options
|
||||
elif self._influxdb_client.profilers:
|
||||
from influxdb_client.client.query_api import QueryOptions
|
||||
return QueryOptions(profilers=self._influxdb_client.profilers)
|
||||
|
||||
def _create_query(self, query, dialect=default_dialect, params: dict = None, **kwargs):
|
||||
query_options = self._get_query_options()
|
||||
profilers = query_options.profilers if query_options is not None else None
|
||||
q = Query(query=query, dialect=dialect, extern=_BaseQueryApi._build_flux_ast(params, profilers))
|
||||
|
||||
if profilers:
|
||||
print("\n===============")
|
||||
print("Profiler: query")
|
||||
print("===============")
|
||||
print(query)
|
||||
|
||||
if kwargs.get('dataframe_query', False):
|
||||
MissingPivotFunction.print_warning(query)
|
||||
|
||||
return q
|
||||
|
||||
@staticmethod
|
||||
def _params_to_extern_ast(params: dict) -> List['OptionStatement']:
|
||||
|
||||
statements = []
|
||||
for key, value in params.items():
|
||||
expression = _BaseQueryApi._parm_to_extern_ast(value)
|
||||
if expression is None:
|
||||
continue
|
||||
|
||||
statements.append(OptionStatement("OptionStatement",
|
||||
VariableAssignment("VariableAssignment", Identifier("Identifier", key),
|
||||
expression)))
|
||||
return statements
|
||||
|
||||
@staticmethod
|
||||
def _parm_to_extern_ast(value) -> Union[Expression, None]:
|
||||
if value is None:
|
||||
return None
|
||||
if isinstance(value, bool):
|
||||
return BooleanLiteral("BooleanLiteral", value)
|
||||
elif isinstance(value, int):
|
||||
return IntegerLiteral("IntegerLiteral", str(value))
|
||||
elif isinstance(value, float):
|
||||
return FloatLiteral("FloatLiteral", value)
|
||||
elif isinstance(value, datetime):
|
||||
value = get_date_helper().to_utc(value)
|
||||
nanoseconds = getattr(value, 'nanosecond', 0)
|
||||
fraction = f'{(value.microsecond * 1000 + nanoseconds):09d}'
|
||||
return DateTimeLiteral("DateTimeLiteral", value.strftime('%Y-%m-%dT%H:%M:%S.') + fraction + 'Z')
|
||||
elif isinstance(value, timedelta):
|
||||
_micro_delta = int(value / timedelta(microseconds=1))
|
||||
if _micro_delta < 0:
|
||||
return UnaryExpression("UnaryExpression", argument=DurationLiteral("DurationLiteral", [
|
||||
Duration(magnitude=-_micro_delta, unit="us")]), operator="-")
|
||||
else:
|
||||
return DurationLiteral("DurationLiteral", [Duration(magnitude=_micro_delta, unit="us")])
|
||||
elif isinstance(value, str):
|
||||
return StringLiteral("StringLiteral", str(value))
|
||||
elif isinstance(value, Iterable):
|
||||
return ArrayExpression("ArrayExpression",
|
||||
elements=list(map(lambda it: _BaseQueryApi._parm_to_extern_ast(it), value)))
|
||||
else:
|
||||
return value
|
||||
|
||||
@staticmethod
|
||||
def _build_flux_ast(params: dict = None, profilers: List[str] = None):
|
||||
|
||||
imports = []
|
||||
body = []
|
||||
|
||||
if profilers is not None and len(profilers) > 0:
|
||||
imports.append(ImportDeclaration(
|
||||
"ImportDeclaration",
|
||||
path=StringLiteral("StringLiteral", "profiler")))
|
||||
|
||||
elements = []
|
||||
for profiler in profilers:
|
||||
elements.append(StringLiteral("StringLiteral", value=profiler))
|
||||
|
||||
member = MemberExpression(
|
||||
"MemberExpression",
|
||||
object=Identifier("Identifier", "profiler"),
|
||||
_property=Identifier("Identifier", "enabledProfilers"))
|
||||
|
||||
prof = OptionStatement(
|
||||
"OptionStatement",
|
||||
assignment=MemberAssignment(
|
||||
"MemberAssignment",
|
||||
member=member,
|
||||
init=ArrayExpression(
|
||||
"ArrayExpression",
|
||||
elements=elements)))
|
||||
|
||||
body.append(prof)
|
||||
|
||||
if params is not None:
|
||||
body.extend(_BaseQueryApi._params_to_extern_ast(params))
|
||||
|
||||
return File(package=None, name=None, type=None, imports=imports, body=body)
|
||||
|
||||
|
||||
class _BaseWriteApi(object):
|
||||
def __init__(self, influxdb_client, point_settings=None):
|
||||
self._influxdb_client = influxdb_client
|
||||
self._point_settings = point_settings
|
||||
self._write_service = WriteService(influxdb_client.api_client)
|
||||
if influxdb_client.default_tags:
|
||||
for key, value in influxdb_client.default_tags.items():
|
||||
self._point_settings.add_default_tag(key, value)
|
||||
|
||||
def _append_default_tag(self, key, val, record):
|
||||
from influxdb_client import Point
|
||||
if isinstance(record, bytes) or isinstance(record, str):
|
||||
pass
|
||||
elif isinstance(record, Point):
|
||||
record.tag(key, val)
|
||||
elif isinstance(record, dict):
|
||||
record.setdefault("tags", {})
|
||||
record.get("tags")[key] = val
|
||||
elif isinstance(record, Iterable):
|
||||
for item in record:
|
||||
self._append_default_tag(key, val, item)
|
||||
|
||||
def _append_default_tags(self, record):
|
||||
if self._point_settings.defaultTags and record is not None:
|
||||
for key, val in self._point_settings.defaultTags.items():
|
||||
self._append_default_tag(key, val, record)
|
||||
|
||||
def _serialize(self, record, write_precision, payload, **kwargs):
|
||||
from influxdb_client import Point
|
||||
if isinstance(record, bytes):
|
||||
payload[write_precision].append(record)
|
||||
|
||||
elif isinstance(record, str):
|
||||
self._serialize(record.encode(_UTF_8_encoding), write_precision, payload, **kwargs)
|
||||
|
||||
elif isinstance(record, Point):
|
||||
precision_from_point = kwargs.get('precision_from_point', True)
|
||||
precision = record.write_precision if precision_from_point else write_precision
|
||||
self._serialize(record.to_line_protocol(precision=precision), precision, payload, **kwargs)
|
||||
|
||||
elif isinstance(record, dict):
|
||||
self._serialize(Point.from_dict(record, write_precision=write_precision, **kwargs),
|
||||
write_precision, payload, **kwargs)
|
||||
elif 'DataFrame' in type(record).__name__:
|
||||
serializer = DataframeSerializer(record, self._point_settings, write_precision, **kwargs)
|
||||
self._serialize(serializer.serialize(), write_precision, payload, **kwargs)
|
||||
elif hasattr(record, "_asdict"):
|
||||
# noinspection PyProtectedMember
|
||||
self._serialize(record._asdict(), write_precision, payload, **kwargs)
|
||||
elif _HAS_DATACLASS and dataclasses.is_dataclass(record):
|
||||
self._serialize(dataclasses.asdict(record), write_precision, payload, **kwargs)
|
||||
elif isinstance(record, Iterable):
|
||||
for item in record:
|
||||
self._serialize(item, write_precision, payload, **kwargs)
|
||||
|
||||
|
||||
# noinspection PyMethodMayBeStatic
|
||||
class _BaseDeleteApi(object):
|
||||
def __init__(self, influxdb_client):
|
||||
self._influxdb_client = influxdb_client
|
||||
self._service = DeleteService(influxdb_client.api_client)
|
||||
|
||||
def _prepare_predicate_request(self, start, stop, predicate):
|
||||
date_helper = get_date_helper()
|
||||
if isinstance(start, datetime):
|
||||
start = date_helper.to_utc(start)
|
||||
if isinstance(stop, datetime):
|
||||
stop = date_helper.to_utc(stop)
|
||||
predicate_request = DeletePredicateRequest(start=start, stop=stop, predicate=predicate)
|
||||
return predicate_request
|
||||
|
||||
|
||||
class _Configuration(Configuration):
|
||||
def __init__(self):
|
||||
Configuration.__init__(self)
|
||||
self.enable_gzip = False
|
||||
self.username = None
|
||||
self.password = None
|
||||
|
||||
def update_request_header_params(self, path: str, params: dict):
|
||||
super().update_request_header_params(path, params)
|
||||
if self.enable_gzip:
|
||||
# GZIP Request
|
||||
if path == '/api/v2/write':
|
||||
params["Content-Encoding"] = "gzip"
|
||||
params["Accept-Encoding"] = "identity"
|
||||
pass
|
||||
# GZIP Response
|
||||
if path == '/api/v2/query':
|
||||
# params["Content-Encoding"] = "gzip"
|
||||
params["Accept-Encoding"] = "gzip"
|
||||
pass
|
||||
pass
|
||||
pass
|
||||
|
||||
def update_request_body(self, path: str, body):
|
||||
_body = super().update_request_body(path, body)
|
||||
if self.enable_gzip:
|
||||
# GZIP Request
|
||||
if path == '/api/v2/write':
|
||||
import gzip
|
||||
if isinstance(_body, bytes):
|
||||
return gzip.compress(data=_body)
|
||||
else:
|
||||
return gzip.compress(bytes(_body, _UTF_8_encoding))
|
||||
|
||||
return _body
|
||||
|
||||
|
||||
def _to_bool(bool_value):
|
||||
return str(bool_value).lower() in ("yes", "true")
|
||||
|
||||
|
||||
def _to_int(int_value):
|
||||
return int(int_value) if int_value is not None else None
|
||||
@@ -0,0 +1,66 @@
|
||||
|
||||
|
||||
class _Page:
|
||||
def __init__(self, values, has_next, next_after):
|
||||
self.has_next = has_next
|
||||
self.values = values
|
||||
self.next_after = next_after
|
||||
|
||||
@staticmethod
|
||||
def empty():
|
||||
return _Page([], False, None)
|
||||
|
||||
@staticmethod
|
||||
def initial(after):
|
||||
return _Page([], True, after)
|
||||
|
||||
|
||||
class _PageIterator:
|
||||
def __init__(self, page: _Page, get_next_page):
|
||||
self.page = page
|
||||
self.get_next_page = get_next_page
|
||||
|
||||
def __iter__(self):
|
||||
return self
|
||||
|
||||
def __next__(self):
|
||||
if not self.page.values:
|
||||
if self.page.has_next:
|
||||
self.page = self.get_next_page(self.page)
|
||||
if not self.page.values:
|
||||
raise StopIteration
|
||||
return self.page.values.pop(0)
|
||||
|
||||
|
||||
class _Paginated:
|
||||
def __init__(self, paginated_getter, pluck_page_resources_from_response):
|
||||
self.paginated_getter = paginated_getter
|
||||
self.pluck_page_resources_from_response = pluck_page_resources_from_response
|
||||
|
||||
def find_iter(self, **kwargs):
|
||||
"""Iterate over resources with pagination.
|
||||
|
||||
:key str org: The organization name.
|
||||
:key str org_id: The organization ID.
|
||||
:key str after: The last resource ID from which to seek from (but not including).
|
||||
:key int limit: the maximum number of items per page
|
||||
:return: resources iterator
|
||||
"""
|
||||
|
||||
def get_next_page(page: _Page):
|
||||
return self._find_next_page(page, **kwargs)
|
||||
|
||||
return iter(_PageIterator(_Page.initial(kwargs.get('after')), get_next_page))
|
||||
|
||||
def _find_next_page(self, page: _Page, **kwargs):
|
||||
if not page.has_next:
|
||||
return _Page.empty()
|
||||
|
||||
kw_args = {**kwargs, 'after': page.next_after} if page.next_after is not None else kwargs
|
||||
response = self.paginated_getter(**kw_args)
|
||||
|
||||
resources = self.pluck_page_resources_from_response(response)
|
||||
has_next = response.links.next is not None
|
||||
last_id = resources[-1].id if resources else None
|
||||
|
||||
return _Page(resources, has_next, last_id)
|
||||
@@ -0,0 +1,134 @@
|
||||
"""Authorization is about managing the security of your InfluxDB instance."""
|
||||
|
||||
from influxdb_client import Authorization, AuthorizationsService, User, Organization
|
||||
|
||||
|
||||
class AuthorizationsApi(object):
|
||||
"""Implementation for '/api/v2/authorizations' endpoint."""
|
||||
|
||||
def __init__(self, influxdb_client):
|
||||
"""Initialize defaults."""
|
||||
self._influxdb_client = influxdb_client
|
||||
self._authorizations_service = AuthorizationsService(influxdb_client.api_client)
|
||||
|
||||
def create_authorization(self, org_id=None, permissions: list = None,
|
||||
authorization: Authorization = None) -> Authorization:
|
||||
"""
|
||||
Create an authorization.
|
||||
|
||||
:type permissions: list of Permission
|
||||
:param org_id: organization id
|
||||
:param permissions: list of permissions
|
||||
:type authorization: authorization object
|
||||
|
||||
"""
|
||||
if authorization is not None:
|
||||
return self._authorizations_service.post_authorizations(authorization_post_request=authorization)
|
||||
|
||||
# if org_id is not None and permissions is not None:
|
||||
authorization = Authorization(org_id=org_id, permissions=permissions)
|
||||
return self._authorizations_service.post_authorizations(authorization_post_request=authorization)
|
||||
|
||||
def find_authorization_by_id(self, auth_id: str) -> Authorization:
|
||||
"""
|
||||
Find authorization by id.
|
||||
|
||||
:param auth_id: authorization id
|
||||
:return: Authorization
|
||||
"""
|
||||
return self._authorizations_service.get_authorizations_id(auth_id=auth_id)
|
||||
|
||||
def find_authorizations(self, **kwargs):
|
||||
"""
|
||||
Get a list of all authorizations.
|
||||
|
||||
:key str user_id: filter authorizations belonging to a user id
|
||||
:key str user: filter authorizations belonging to a user name
|
||||
:key str org_id: filter authorizations belonging to a org id
|
||||
:key str org: filter authorizations belonging to a org name
|
||||
:return: Authorizations
|
||||
"""
|
||||
authorizations = self._authorizations_service.get_authorizations(**kwargs)
|
||||
|
||||
return authorizations.authorizations
|
||||
|
||||
def find_authorizations_by_user(self, user: User):
|
||||
"""
|
||||
Find authorization by User.
|
||||
|
||||
:return: Authorization list
|
||||
"""
|
||||
return self.find_authorizations(user_id=user.id)
|
||||
|
||||
def find_authorizations_by_user_id(self, user_id: str):
|
||||
"""
|
||||
Find authorization by user id.
|
||||
|
||||
:return: Authorization list
|
||||
"""
|
||||
return self.find_authorizations(user_id=user_id)
|
||||
|
||||
def find_authorizations_by_user_name(self, user_name: str):
|
||||
"""
|
||||
Find authorization by user name.
|
||||
|
||||
:return: Authorization list
|
||||
"""
|
||||
return self.find_authorizations(user=user_name)
|
||||
|
||||
def find_authorizations_by_org(self, org: Organization):
|
||||
"""
|
||||
Find authorization by user name.
|
||||
|
||||
:return: Authorization list
|
||||
"""
|
||||
if isinstance(org, Organization):
|
||||
return self.find_authorizations(org_id=org.id)
|
||||
|
||||
def find_authorizations_by_org_name(self, org_name: str):
|
||||
"""
|
||||
Find authorization by org name.
|
||||
|
||||
:return: Authorization list
|
||||
"""
|
||||
return self.find_authorizations(org=org_name)
|
||||
|
||||
def find_authorizations_by_org_id(self, org_id: str):
|
||||
"""
|
||||
Find authorization by org id.
|
||||
|
||||
:return: Authorization list
|
||||
"""
|
||||
return self.find_authorizations(org_id=org_id)
|
||||
|
||||
def update_authorization(self, auth):
|
||||
"""
|
||||
Update authorization object.
|
||||
|
||||
:param auth:
|
||||
:return:
|
||||
"""
|
||||
return self._authorizations_service.patch_authorizations_id(auth_id=auth.id, authorization_update_request=auth)
|
||||
|
||||
def clone_authorization(self, auth) -> Authorization:
|
||||
"""Clone an authorization."""
|
||||
if isinstance(auth, Authorization):
|
||||
cloned = Authorization(org_id=auth.org_id, permissions=auth.permissions)
|
||||
# cloned.description = auth.description
|
||||
# cloned.status = auth.status
|
||||
return self.create_authorization(authorization=cloned)
|
||||
|
||||
if isinstance(auth, str):
|
||||
authorization = self.find_authorization_by_id(auth)
|
||||
return self.clone_authorization(auth=authorization)
|
||||
|
||||
raise ValueError("Invalid argument")
|
||||
|
||||
def delete_authorization(self, auth):
|
||||
"""Delete a authorization."""
|
||||
if isinstance(auth, Authorization):
|
||||
return self._authorizations_service.delete_authorizations_id(auth_id=auth.id)
|
||||
|
||||
if isinstance(auth, str):
|
||||
return self._authorizations_service.delete_authorizations_id(auth_id=auth)
|
||||
raise ValueError("Invalid argument")
|
||||
@@ -0,0 +1,132 @@
|
||||
"""
|
||||
A bucket is a named location where time series data is stored.
|
||||
|
||||
All buckets have a retention policy, a duration of time that each data point persists.
|
||||
A bucket belongs to an organization.
|
||||
"""
|
||||
import warnings
|
||||
|
||||
from influxdb_client import BucketsService, Bucket, PostBucketRequest, PatchBucketRequest
|
||||
from influxdb_client.client.util.helpers import get_org_query_param
|
||||
from influxdb_client.client._pages import _Paginated
|
||||
|
||||
|
||||
class BucketsApi(object):
|
||||
"""Implementation for '/api/v2/buckets' endpoint."""
|
||||
|
||||
def __init__(self, influxdb_client):
|
||||
"""Initialize defaults."""
|
||||
self._influxdb_client = influxdb_client
|
||||
self._buckets_service = BucketsService(influxdb_client.api_client)
|
||||
|
||||
def create_bucket(self, bucket=None, bucket_name=None, org_id=None, retention_rules=None,
|
||||
description=None, org=None) -> Bucket:
|
||||
"""Create a bucket.
|
||||
|
||||
:param Bucket|PostBucketRequest bucket: bucket to create
|
||||
:param bucket_name: bucket name
|
||||
:param description: bucket description
|
||||
:param org_id: org_id
|
||||
:param bucket_name: bucket name
|
||||
:param retention_rules: retention rules array or single BucketRetentionRules
|
||||
:param str, Organization org: specifies the organization for create the bucket;
|
||||
Take the ``ID``, ``Name`` or ``Organization``.
|
||||
If not specified the default value from ``InfluxDBClient.org`` is used.
|
||||
:return: Bucket
|
||||
If the method is called asynchronously,
|
||||
returns the request thread.
|
||||
"""
|
||||
if retention_rules is None:
|
||||
retention_rules = []
|
||||
|
||||
rules = []
|
||||
|
||||
if isinstance(retention_rules, list):
|
||||
rules.extend(retention_rules)
|
||||
else:
|
||||
rules.append(retention_rules)
|
||||
|
||||
if org_id is not None:
|
||||
warnings.warn("org_id is deprecated; use org", DeprecationWarning)
|
||||
|
||||
if bucket is None:
|
||||
bucket = PostBucketRequest(name=bucket_name,
|
||||
retention_rules=rules,
|
||||
description=description,
|
||||
org_id=get_org_query_param(org=(org_id if org is None else org),
|
||||
client=self._influxdb_client,
|
||||
required_id=True))
|
||||
|
||||
return self._buckets_service.post_buckets(post_bucket_request=bucket)
|
||||
|
||||
def update_bucket(self, bucket: Bucket) -> Bucket:
|
||||
"""Update a bucket.
|
||||
|
||||
:param bucket: Bucket update to apply (required)
|
||||
:return: Bucket
|
||||
"""
|
||||
request = PatchBucketRequest(name=bucket.name,
|
||||
description=bucket.description,
|
||||
retention_rules=bucket.retention_rules)
|
||||
|
||||
return self._buckets_service.patch_buckets_id(bucket_id=bucket.id, patch_bucket_request=request)
|
||||
|
||||
def delete_bucket(self, bucket):
|
||||
"""Delete a bucket.
|
||||
|
||||
:param bucket: bucket id or Bucket
|
||||
:return: Bucket
|
||||
"""
|
||||
if isinstance(bucket, Bucket):
|
||||
bucket_id = bucket.id
|
||||
else:
|
||||
bucket_id = bucket
|
||||
|
||||
return self._buckets_service.delete_buckets_id(bucket_id=bucket_id)
|
||||
|
||||
def find_bucket_by_id(self, id):
|
||||
"""Find bucket by ID.
|
||||
|
||||
:param id:
|
||||
:return:
|
||||
"""
|
||||
return self._buckets_service.get_buckets_id(id)
|
||||
|
||||
def find_bucket_by_name(self, bucket_name):
|
||||
"""Find bucket by name.
|
||||
|
||||
:param bucket_name: bucket name
|
||||
:return: Bucket
|
||||
"""
|
||||
buckets = self._buckets_service.get_buckets(name=bucket_name)
|
||||
|
||||
if len(buckets.buckets) > 0:
|
||||
return buckets.buckets[0]
|
||||
else:
|
||||
return None
|
||||
|
||||
def find_buckets(self, **kwargs):
|
||||
"""List buckets.
|
||||
|
||||
:key int offset: Offset for pagination
|
||||
:key int limit: Limit for pagination
|
||||
:key str after: The last resource ID from which to seek from (but not including).
|
||||
This is to be used instead of `offset`.
|
||||
:key str org: The organization name.
|
||||
:key str org_id: The organization ID.
|
||||
:key str name: Only returns buckets with a specific name.
|
||||
:return: Buckets
|
||||
"""
|
||||
return self._buckets_service.get_buckets(**kwargs)
|
||||
|
||||
def find_buckets_iter(self, **kwargs):
|
||||
"""Iterate over all buckets with pagination.
|
||||
|
||||
:key str name: Only returns buckets with the specified name
|
||||
:key str org: The organization name.
|
||||
:key str org_id: The organization ID.
|
||||
:key str after: The last resource ID from which to seek from (but not including).
|
||||
:key int limit: the maximum number of buckets in one page
|
||||
:return: Buckets iterator
|
||||
"""
|
||||
return _Paginated(self._buckets_service.get_buckets, lambda response: response.buckets).find_iter(**kwargs)
|
||||
@@ -0,0 +1,35 @@
|
||||
"""Delete time series data from InfluxDB."""
|
||||
|
||||
from datetime import datetime
|
||||
from typing import Union
|
||||
|
||||
from influxdb_client import Organization
|
||||
from influxdb_client.client._base import _BaseDeleteApi
|
||||
from influxdb_client.client.util.helpers import get_org_query_param
|
||||
|
||||
|
||||
class DeleteApi(_BaseDeleteApi):
|
||||
"""Implementation for '/api/v2/delete' endpoint."""
|
||||
|
||||
def __init__(self, influxdb_client):
|
||||
"""Initialize defaults."""
|
||||
super().__init__(influxdb_client)
|
||||
|
||||
def delete(self, start: Union[str, datetime], stop: Union[str, datetime], predicate: str, bucket: str,
|
||||
org: Union[str, Organization, None] = None) -> None:
|
||||
"""
|
||||
Delete Time series data from InfluxDB.
|
||||
|
||||
:param str, datetime.datetime start: start time
|
||||
:param str, datetime.datetime stop: stop time
|
||||
:param str predicate: predicate
|
||||
:param str bucket: bucket id or name from which data will be deleted
|
||||
:param str, Organization org: specifies the organization to delete data from.
|
||||
Take the ``ID``, ``Name`` or ``Organization``.
|
||||
If not specified the default value from ``InfluxDBClient.org`` is used.
|
||||
:return:
|
||||
"""
|
||||
predicate_request = self._prepare_predicate_request(start, stop, predicate)
|
||||
org_param = get_org_query_param(org=org, client=self._influxdb_client, required_id=False)
|
||||
|
||||
return self._service.post_delete(delete_predicate_request=predicate_request, bucket=bucket, org=org_param)
|
||||
@@ -0,0 +1,37 @@
|
||||
"""Delete time series data from InfluxDB."""
|
||||
|
||||
from datetime import datetime
|
||||
from typing import Union
|
||||
|
||||
from influxdb_client import Organization
|
||||
from influxdb_client.client._base import _BaseDeleteApi
|
||||
from influxdb_client.client.util.helpers import get_org_query_param
|
||||
|
||||
|
||||
class DeleteApiAsync(_BaseDeleteApi):
|
||||
"""Async implementation for '/api/v2/delete' endpoint."""
|
||||
|
||||
def __init__(self, influxdb_client):
|
||||
"""Initialize defaults."""
|
||||
super().__init__(influxdb_client)
|
||||
|
||||
async def delete(self, start: Union[str, datetime], stop: Union[str, datetime], predicate: str, bucket: str,
|
||||
org: Union[str, Organization, None] = None) -> bool:
|
||||
"""
|
||||
Delete Time series data from InfluxDB.
|
||||
|
||||
:param str, datetime.datetime start: start time
|
||||
:param str, datetime.datetime stop: stop time
|
||||
:param str predicate: predicate
|
||||
:param str bucket: bucket id or name from which data will be deleted
|
||||
:param str, Organization org: specifies the organization to delete data from.
|
||||
Take the ``ID``, ``Name`` or ``Organization``.
|
||||
If not specified the default value from ``InfluxDBClientAsync.org`` is used.
|
||||
:return: ``True`` for successfully deleted data, otherwise raise an exception
|
||||
"""
|
||||
predicate_request = self._prepare_predicate_request(start, stop, predicate)
|
||||
org_param = get_org_query_param(org=org, client=self._influxdb_client, required_id=False)
|
||||
|
||||
response = await self._service.post_delete_async(delete_predicate_request=predicate_request, bucket=bucket,
|
||||
org=org_param, _return_http_data_only=False)
|
||||
return response[1] == 204
|
||||
@@ -0,0 +1,47 @@
|
||||
"""Exceptions utils for InfluxDB."""
|
||||
|
||||
import logging
|
||||
|
||||
from urllib3 import HTTPResponse
|
||||
|
||||
logger = logging.getLogger('influxdb_client.client.exceptions')
|
||||
|
||||
|
||||
class InfluxDBError(Exception):
|
||||
"""Raised when a server error occurs."""
|
||||
|
||||
def __init__(self, response: HTTPResponse = None, message: str = None):
|
||||
"""Initialize the InfluxDBError handler."""
|
||||
if response is not None:
|
||||
self.response = response
|
||||
self.message = self._get_message(response)
|
||||
if isinstance(response, HTTPResponse): # response is HTTPResponse
|
||||
self.headers = response.headers
|
||||
self.retry_after = response.headers.get('Retry-After')
|
||||
else: # response is RESTResponse
|
||||
self.headers = response.getheaders()
|
||||
self.retry_after = response.getheader('Retry-After')
|
||||
else:
|
||||
self.response = None
|
||||
self.message = message or 'no response'
|
||||
self.retry_after = None
|
||||
super().__init__(self.message)
|
||||
|
||||
def _get_message(self, response):
|
||||
# Body
|
||||
if response.data:
|
||||
import json
|
||||
try:
|
||||
return json.loads(response.data)["message"]
|
||||
except Exception as e:
|
||||
logging.debug(f"Cannot parse error response to JSON: {response.data}, {e}")
|
||||
return response.data
|
||||
|
||||
# Header
|
||||
for header_key in ["X-Platform-Error-Code", "X-Influx-Error", "X-InfluxDb-Error"]:
|
||||
header_value = response.getheader(header_key)
|
||||
if header_value is not None:
|
||||
return header_value
|
||||
|
||||
# Http Status
|
||||
return response.reason
|
||||
@@ -0,0 +1,404 @@
|
||||
"""Parsing response from InfluxDB to FluxStructures or DataFrame."""
|
||||
|
||||
|
||||
import base64
|
||||
import codecs
|
||||
import csv as csv_parser
|
||||
import warnings
|
||||
from enum import Enum
|
||||
from typing import List
|
||||
|
||||
from influxdb_client.client.flux_table import FluxTable, FluxColumn, FluxRecord, TableList
|
||||
from influxdb_client.client.util.date_utils import get_date_helper
|
||||
from influxdb_client.rest import _UTF_8_encoding
|
||||
|
||||
ANNOTATION_DEFAULT = "#default"
|
||||
ANNOTATION_GROUP = "#group"
|
||||
ANNOTATION_DATATYPE = "#datatype"
|
||||
ANNOTATIONS = [ANNOTATION_DEFAULT, ANNOTATION_GROUP, ANNOTATION_DATATYPE]
|
||||
|
||||
|
||||
class FluxQueryException(Exception):
|
||||
"""The exception from InfluxDB."""
|
||||
|
||||
def __init__(self, message, reference) -> None:
|
||||
"""Initialize defaults."""
|
||||
self.message = message
|
||||
self.reference = reference
|
||||
|
||||
|
||||
class FluxCsvParserException(Exception):
|
||||
"""The exception for not parsable data."""
|
||||
|
||||
pass
|
||||
|
||||
|
||||
class FluxSerializationMode(Enum):
|
||||
"""The type how we want to serialize data."""
|
||||
|
||||
tables = 1
|
||||
stream = 2
|
||||
dataFrame = 3
|
||||
|
||||
|
||||
class FluxResponseMetadataMode(Enum):
|
||||
"""The configuration for expected amount of metadata response from InfluxDB."""
|
||||
|
||||
full = 1
|
||||
# useful for Invokable scripts
|
||||
only_names = 2
|
||||
|
||||
|
||||
class _FluxCsvParserMetadata(object):
|
||||
def __init__(self):
|
||||
self.table_index = 0
|
||||
self.table_id = -1
|
||||
self.start_new_table = False
|
||||
self.table = None
|
||||
self.groups = []
|
||||
self.parsing_state_error = False
|
||||
|
||||
|
||||
class FluxCsvParser(object):
|
||||
"""Parse to processing response from InfluxDB to FluxStructures or DataFrame."""
|
||||
|
||||
def __init__(self, response, serialization_mode: FluxSerializationMode,
|
||||
data_frame_index: List[str] = None, query_options=None,
|
||||
response_metadata_mode: FluxResponseMetadataMode = FluxResponseMetadataMode.full,
|
||||
use_extension_dtypes=False) -> None:
|
||||
"""
|
||||
Initialize defaults.
|
||||
|
||||
:param response: HTTP response from a HTTP client.
|
||||
Acceptable types: `urllib3.response.HTTPResponse`, `aiohttp.client_reqrep.ClientResponse`.
|
||||
"""
|
||||
self._response = response
|
||||
self.tables = TableList()
|
||||
self._serialization_mode = serialization_mode
|
||||
self._response_metadata_mode = response_metadata_mode
|
||||
self._use_extension_dtypes = use_extension_dtypes
|
||||
self._data_frame_index = data_frame_index
|
||||
self._data_frame_values = []
|
||||
self._profilers = query_options.profilers if query_options is not None else None
|
||||
self._profiler_callback = query_options.profiler_callback if query_options is not None else None
|
||||
self._async_mode = True if 'ClientResponse' in type(response).__name__ else False
|
||||
|
||||
def _close(self):
|
||||
self._response.close()
|
||||
|
||||
def __enter__(self):
|
||||
"""Initialize CSV reader."""
|
||||
# response can be exhausted by logger, so we have to use data that has already been read
|
||||
if hasattr(self._response, 'closed') and self._response.closed:
|
||||
from io import StringIO
|
||||
self._reader = csv_parser.reader(StringIO(self._response.data.decode(_UTF_8_encoding)))
|
||||
else:
|
||||
self._reader = csv_parser.reader(codecs.iterdecode(self._response, _UTF_8_encoding))
|
||||
return self
|
||||
|
||||
def __exit__(self, exc_type, exc_val, exc_tb):
|
||||
"""Close HTTP response."""
|
||||
self._close()
|
||||
|
||||
async def __aenter__(self) -> 'FluxCsvParser':
|
||||
"""Initialize CSV reader."""
|
||||
from aiocsv import AsyncReader
|
||||
self._reader = AsyncReader(_StreamReaderToWithAsyncRead(self._response.content))
|
||||
|
||||
return self
|
||||
|
||||
async def __aexit__(self, exc_type, exc_val, exc_tb) -> None:
|
||||
"""Shutdown the client."""
|
||||
self.__exit__(exc_type, exc_val, exc_tb)
|
||||
|
||||
def generator(self):
|
||||
"""Return Python generator."""
|
||||
with self as parser:
|
||||
for val in parser._parse_flux_response():
|
||||
yield val
|
||||
|
||||
def generator_async(self):
|
||||
"""Return Python async-generator."""
|
||||
return self._parse_flux_response_async()
|
||||
|
||||
def _parse_flux_response(self):
|
||||
metadata = _FluxCsvParserMetadata()
|
||||
|
||||
for csv in self._reader:
|
||||
for val in self._parse_flux_response_row(metadata, csv):
|
||||
yield val
|
||||
|
||||
# Return latest DataFrame
|
||||
if (self._serialization_mode is FluxSerializationMode.dataFrame) & hasattr(self, '_data_frame'):
|
||||
df = self._prepare_data_frame()
|
||||
if not self._is_profiler_table(metadata.table):
|
||||
yield df
|
||||
|
||||
async def _parse_flux_response_async(self):
|
||||
metadata = _FluxCsvParserMetadata()
|
||||
|
||||
try:
|
||||
async for csv in self._reader:
|
||||
for val in self._parse_flux_response_row(metadata, csv):
|
||||
yield val
|
||||
|
||||
# Return latest DataFrame
|
||||
if (self._serialization_mode is FluxSerializationMode.dataFrame) & hasattr(self, '_data_frame'):
|
||||
df = self._prepare_data_frame()
|
||||
if not self._is_profiler_table(metadata.table):
|
||||
yield df
|
||||
finally:
|
||||
self._close()
|
||||
|
||||
def _parse_flux_response_row(self, metadata, csv):
|
||||
if len(csv) < 1:
|
||||
# Skip empty line in results (new line is used as a delimiter between tables or table and error)
|
||||
pass
|
||||
|
||||
elif "error" == csv[1] and "reference" == csv[2]:
|
||||
metadata.parsing_state_error = True
|
||||
|
||||
else:
|
||||
# Throw InfluxException with error response
|
||||
if metadata.parsing_state_error:
|
||||
error = csv[1]
|
||||
reference_value = csv[2]
|
||||
raise FluxQueryException(error, reference_value)
|
||||
|
||||
token = csv[0]
|
||||
# start new table
|
||||
if (token in ANNOTATIONS and not metadata.start_new_table) or \
|
||||
(self._response_metadata_mode is FluxResponseMetadataMode.only_names and not metadata.table):
|
||||
|
||||
# Return already parsed DataFrame
|
||||
if (self._serialization_mode is FluxSerializationMode.dataFrame) & hasattr(self, '_data_frame'):
|
||||
df = self._prepare_data_frame()
|
||||
if not self._is_profiler_table(metadata.table):
|
||||
yield df
|
||||
|
||||
metadata.start_new_table = True
|
||||
metadata.table = FluxTable()
|
||||
self._insert_table(metadata.table, metadata.table_index)
|
||||
metadata.table_index = metadata.table_index + 1
|
||||
metadata.table_id = -1
|
||||
elif metadata.table is None:
|
||||
raise FluxCsvParserException("Unable to parse CSV response. FluxTable definition was not found.")
|
||||
|
||||
# # datatype,string,long,dateTime:RFC3339,dateTime:RFC3339,dateTime:RFC3339,double,string,string,string
|
||||
if ANNOTATION_DATATYPE == token:
|
||||
self.add_data_types(metadata.table, csv)
|
||||
|
||||
elif ANNOTATION_GROUP == token:
|
||||
metadata.groups = csv
|
||||
|
||||
elif ANNOTATION_DEFAULT == token:
|
||||
self.add_default_empty_values(metadata.table, csv)
|
||||
|
||||
else:
|
||||
# parse column names
|
||||
if metadata.start_new_table:
|
||||
# Invokable scripts doesn't supports dialect => all columns are string
|
||||
if not metadata.table.columns and \
|
||||
self._response_metadata_mode is FluxResponseMetadataMode.only_names:
|
||||
self.add_data_types(metadata.table, list(map(lambda column: 'string', csv)))
|
||||
metadata.groups = list(map(lambda column: 'false', csv))
|
||||
self.add_groups(metadata.table, metadata.groups)
|
||||
self.add_column_names_and_tags(metadata.table, csv)
|
||||
metadata.start_new_table = False
|
||||
# Create DataFrame with default values
|
||||
if self._serialization_mode is FluxSerializationMode.dataFrame:
|
||||
from ..extras import pd
|
||||
labels = list(map(lambda it: it.label, metadata.table.columns))
|
||||
self._data_frame = pd.DataFrame(data=[], columns=labels, index=None)
|
||||
pass
|
||||
else:
|
||||
|
||||
# to int conversions todo
|
||||
current_id = int(csv[2])
|
||||
if metadata.table_id == -1:
|
||||
metadata.table_id = current_id
|
||||
|
||||
if metadata.table_id != current_id:
|
||||
# create new table with previous column headers settings
|
||||
flux_columns = metadata.table.columns
|
||||
metadata.table = FluxTable()
|
||||
metadata.table.columns.extend(flux_columns)
|
||||
self._insert_table(metadata.table, metadata.table_index)
|
||||
metadata.table_index = metadata.table_index + 1
|
||||
metadata.table_id = current_id
|
||||
|
||||
flux_record = self.parse_record(metadata.table_index - 1, metadata.table, csv)
|
||||
|
||||
if self._is_profiler_record(flux_record):
|
||||
self._print_profiler_info(flux_record)
|
||||
else:
|
||||
if self._serialization_mode is FluxSerializationMode.tables:
|
||||
self.tables[metadata.table_index - 1].records.append(flux_record)
|
||||
|
||||
if self._serialization_mode is FluxSerializationMode.stream:
|
||||
yield flux_record
|
||||
|
||||
if self._serialization_mode is FluxSerializationMode.dataFrame:
|
||||
self._data_frame_values.append(flux_record.values)
|
||||
pass
|
||||
|
||||
def _prepare_data_frame(self):
|
||||
from ..extras import pd
|
||||
|
||||
# We have to create temporary DataFrame because we want to preserve default column values
|
||||
_temp_df = pd.DataFrame(self._data_frame_values)
|
||||
self._data_frame_values = []
|
||||
|
||||
# Custom DataFrame index
|
||||
if self._data_frame_index:
|
||||
self._data_frame = self._data_frame.set_index(self._data_frame_index)
|
||||
_temp_df = _temp_df.set_index(self._data_frame_index)
|
||||
|
||||
# Append data
|
||||
df = pd.concat([self._data_frame.astype(_temp_df.dtypes), _temp_df])
|
||||
|
||||
if self._use_extension_dtypes:
|
||||
return df.convert_dtypes()
|
||||
return df
|
||||
|
||||
def parse_record(self, table_index, table, csv):
|
||||
"""Parse one record."""
|
||||
record = FluxRecord(table_index)
|
||||
|
||||
for fluxColumn in table.columns:
|
||||
column_name = fluxColumn.label
|
||||
str_val = csv[fluxColumn.index + 1]
|
||||
record.values[column_name] = self._to_value(str_val, fluxColumn)
|
||||
record.row.append(record.values[column_name])
|
||||
|
||||
return record
|
||||
|
||||
def _to_value(self, str_val, column):
|
||||
|
||||
if str_val == '' or str_val is None:
|
||||
default_value = column.default_value
|
||||
if default_value == '' or default_value is None:
|
||||
if self._serialization_mode is FluxSerializationMode.dataFrame:
|
||||
if self._use_extension_dtypes:
|
||||
from ..extras import pd
|
||||
return pd.NA
|
||||
return None
|
||||
return None
|
||||
return self._to_value(default_value, column)
|
||||
|
||||
if "string" == column.data_type:
|
||||
return str_val
|
||||
|
||||
if "boolean" == column.data_type:
|
||||
return "true" == str_val
|
||||
|
||||
if "unsignedLong" == column.data_type or "long" == column.data_type:
|
||||
return int(str_val)
|
||||
|
||||
if "double" == column.data_type:
|
||||
return float(str_val)
|
||||
|
||||
if "base64Binary" == column.data_type:
|
||||
return base64.b64decode(str_val)
|
||||
|
||||
if "dateTime:RFC3339" == column.data_type or "dateTime:RFC3339Nano" == column.data_type:
|
||||
return get_date_helper().parse_date(str_val)
|
||||
|
||||
if "duration" == column.data_type:
|
||||
# todo better type ?
|
||||
return int(str_val)
|
||||
|
||||
@staticmethod
|
||||
def add_data_types(table, data_types):
|
||||
"""Add data types to columns."""
|
||||
for index in range(1, len(data_types)):
|
||||
column_def = FluxColumn(index=index - 1, data_type=data_types[index])
|
||||
table.columns.append(column_def)
|
||||
|
||||
@staticmethod
|
||||
def add_groups(table, csv):
|
||||
"""Add group keys to columns."""
|
||||
i = 1
|
||||
for column in table.columns:
|
||||
column.group = csv[i] == "true"
|
||||
i += 1
|
||||
|
||||
@staticmethod
|
||||
def add_default_empty_values(table, default_values):
|
||||
"""Add default values to columns."""
|
||||
i = 1
|
||||
for column in table.columns:
|
||||
column.default_value = default_values[i]
|
||||
i += 1
|
||||
|
||||
@staticmethod
|
||||
def add_column_names_and_tags(table, csv):
|
||||
"""Add labels to columns."""
|
||||
if len(csv) != len(set(csv)):
|
||||
message = f"""The response contains columns with duplicated names: '{csv}'.
|
||||
|
||||
You should use the 'record.row' to access your data instead of 'record.values' dictionary.
|
||||
"""
|
||||
warnings.warn(message, UserWarning)
|
||||
print(message)
|
||||
i = 1
|
||||
for column in table.columns:
|
||||
column.label = csv[i]
|
||||
i += 1
|
||||
|
||||
def _insert_table(self, table, table_index):
|
||||
if self._serialization_mode is FluxSerializationMode.tables:
|
||||
self.tables.insert(table_index, table)
|
||||
|
||||
def _is_profiler_record(self, flux_record: FluxRecord) -> bool:
|
||||
if not self._profilers:
|
||||
return False
|
||||
|
||||
for profiler in self._profilers:
|
||||
if "_measurement" in flux_record.values and flux_record["_measurement"] == "profiler/" + profiler:
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
def _is_profiler_table(self, table: FluxTable) -> bool:
|
||||
|
||||
if not self._profilers:
|
||||
return False
|
||||
|
||||
return any(filter(lambda column: (column.default_value == "_profiler" and column.label == "result"),
|
||||
table.columns))
|
||||
|
||||
def table_list(self) -> TableList:
|
||||
"""Get the list of flux tables."""
|
||||
if not self._profilers:
|
||||
return self.tables
|
||||
else:
|
||||
return TableList(filter(lambda table: not self._is_profiler_table(table), self.tables))
|
||||
|
||||
def _print_profiler_info(self, flux_record: FluxRecord):
|
||||
if flux_record.get_measurement().startswith("profiler/"):
|
||||
if self._profiler_callback:
|
||||
self._profiler_callback(flux_record)
|
||||
else:
|
||||
msg = "Profiler: " + flux_record.get_measurement()
|
||||
print("\n" + len(msg) * "=")
|
||||
print(msg)
|
||||
print(len(msg) * "=")
|
||||
for name in flux_record.values:
|
||||
val = flux_record[name]
|
||||
if isinstance(val, str) and len(val) > 50:
|
||||
print(f"{name:<20}: \n\n{val}")
|
||||
elif val is not None:
|
||||
print(f"{name:<20}: {val:<20}")
|
||||
|
||||
|
||||
class _StreamReaderToWithAsyncRead:
|
||||
def __init__(self, response):
|
||||
self.response = response
|
||||
self.decoder = codecs.getincrementaldecoder(_UTF_8_encoding)()
|
||||
|
||||
async def read(self, size: int) -> str:
|
||||
raw_bytes = (await self.response.read(size))
|
||||
if not raw_bytes:
|
||||
return self.decoder.decode(b'', final=True)
|
||||
return self.decoder.decode(raw_bytes, final=False)
|
||||
@@ -0,0 +1,290 @@
|
||||
"""
|
||||
Flux employs a basic data model built from basic data types.
|
||||
|
||||
The data model consists of tables, records, columns.
|
||||
"""
|
||||
import codecs
|
||||
import csv
|
||||
from http.client import HTTPResponse
|
||||
from json import JSONEncoder
|
||||
from typing import List, Iterator
|
||||
from influxdb_client.rest import _UTF_8_encoding
|
||||
|
||||
|
||||
class FluxStructure:
|
||||
"""The data model consists of tables, records, columns."""
|
||||
|
||||
pass
|
||||
|
||||
|
||||
class FluxStructureEncoder(JSONEncoder):
|
||||
"""The FluxStructure encoder to encode query results to JSON."""
|
||||
|
||||
def default(self, obj):
|
||||
"""Return serializable objects for JSONEncoder."""
|
||||
import datetime
|
||||
if isinstance(obj, FluxStructure):
|
||||
return obj.__dict__
|
||||
elif isinstance(obj, (datetime.datetime, datetime.date)):
|
||||
return obj.isoformat()
|
||||
return super().default(obj)
|
||||
|
||||
|
||||
class FluxTable(FluxStructure):
|
||||
"""
|
||||
A table is set of records with a common set of columns and a group key.
|
||||
|
||||
The table can be serialized into JSON by::
|
||||
|
||||
import json
|
||||
from influxdb_client.client.flux_table import FluxStructureEncoder
|
||||
|
||||
output = json.dumps(tables, cls=FluxStructureEncoder, indent=2)
|
||||
print(output)
|
||||
|
||||
"""
|
||||
|
||||
def __init__(self) -> None:
|
||||
"""Initialize defaults."""
|
||||
self.columns = []
|
||||
self.records = []
|
||||
|
||||
def get_group_key(self):
|
||||
"""
|
||||
Group key is a list of columns.
|
||||
|
||||
A table’s group key denotes which subset of the entire dataset is assigned to the table.
|
||||
"""
|
||||
return list(filter(lambda column: (column.group is True), self.columns))
|
||||
|
||||
def __str__(self):
|
||||
"""Return formatted output."""
|
||||
cls_name = type(self).__name__
|
||||
return cls_name + "() columns: " + str(len(self.columns)) + ", records: " + str(len(self.records))
|
||||
|
||||
def __repr__(self):
|
||||
"""Format for inspection."""
|
||||
return f"<{type(self).__name__}: {len(self.columns)} columns, {len(self.records)} records>"
|
||||
|
||||
def __iter__(self):
|
||||
"""Iterate over records."""
|
||||
return iter(self.records)
|
||||
|
||||
|
||||
class FluxColumn(FluxStructure):
|
||||
"""A column has a label and a data type."""
|
||||
|
||||
def __init__(self, index=None, label=None, data_type=None, group=None, default_value=None) -> None:
|
||||
"""Initialize defaults."""
|
||||
self.default_value = default_value
|
||||
self.group = group
|
||||
self.data_type = data_type
|
||||
self.label = label
|
||||
self.index = index
|
||||
|
||||
def __repr__(self):
|
||||
"""Format for inspection."""
|
||||
fields = [repr(self.index)] + [
|
||||
f'{name}={getattr(self, name)!r}' for name in (
|
||||
'label', 'data_type', 'group', 'default_value'
|
||||
) if getattr(self, name) is not None
|
||||
]
|
||||
return f"{type(self).__name__}({', '.join(fields)})"
|
||||
|
||||
|
||||
class FluxRecord(FluxStructure):
|
||||
"""A record is a tuple of named values and is represented using an object type."""
|
||||
|
||||
def __init__(self, table, values=None) -> None:
|
||||
"""Initialize defaults."""
|
||||
if values is None:
|
||||
values = {}
|
||||
self.table = table
|
||||
self.values = values
|
||||
self.row = []
|
||||
|
||||
def get_start(self):
|
||||
"""Get '_start' value."""
|
||||
return self["_start"]
|
||||
|
||||
def get_stop(self):
|
||||
"""Get '_stop' value."""
|
||||
return self["_stop"]
|
||||
|
||||
def get_time(self):
|
||||
"""Get timestamp."""
|
||||
return self["_time"]
|
||||
|
||||
def get_value(self):
|
||||
"""Get field value."""
|
||||
return self["_value"]
|
||||
|
||||
def get_field(self):
|
||||
"""Get field name."""
|
||||
return self["_field"]
|
||||
|
||||
def get_measurement(self):
|
||||
"""Get measurement name."""
|
||||
return self["_measurement"]
|
||||
|
||||
def __getitem__(self, key):
|
||||
"""Get value by key."""
|
||||
return self.values.__getitem__(key)
|
||||
|
||||
def __setitem__(self, key, value):
|
||||
"""Set value with key and value."""
|
||||
return self.values.__setitem__(key, value)
|
||||
|
||||
def __str__(self):
|
||||
"""Return formatted output."""
|
||||
cls_name = type(self).__name__
|
||||
return cls_name + "() table: " + str(self.table) + ", " + str(self.values)
|
||||
|
||||
def __repr__(self):
|
||||
"""Format for inspection."""
|
||||
return f"<{type(self).__name__}: field={self.values.get('_field')}, value={self.values.get('_value')}>"
|
||||
|
||||
|
||||
class TableList(List[FluxTable]):
|
||||
""":class:`~influxdb_client.client.flux_table.FluxTable` list with additionally functional to better handle of query result.""" # noqa: E501
|
||||
|
||||
def to_values(self, columns: List['str'] = None) -> List[List[object]]:
|
||||
"""
|
||||
Serialize query results to a flattened list of values.
|
||||
|
||||
:param columns: if not ``None`` then only specified columns are presented in results
|
||||
:return: :class:`~list` of values
|
||||
|
||||
Output example:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
[
|
||||
['New York', datetime.datetime(2022, 6, 7, 11, 3, 22, 917593, tzinfo=tzutc()), 24.3],
|
||||
['Prague', datetime.datetime(2022, 6, 7, 11, 3, 22, 917593, tzinfo=tzutc()), 25.3],
|
||||
...
|
||||
]
|
||||
|
||||
Configure required columns:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
from influxdb_client import InfluxDBClient
|
||||
|
||||
with InfluxDBClient(url="http://localhost:8086", token="my-token", org="my-org") as client:
|
||||
|
||||
# Query: using Table structure
|
||||
tables = client.query_api().query('from(bucket:"my-bucket") |> range(start: -10m)')
|
||||
|
||||
# Serialize to values
|
||||
output = tables.to_values(columns=['location', '_time', '_value'])
|
||||
print(output)
|
||||
"""
|
||||
|
||||
def filter_values(record):
|
||||
if columns is not None:
|
||||
return [record.values.get(k) for k in columns]
|
||||
return record.values.values()
|
||||
|
||||
return self._to_values(filter_values)
|
||||
|
||||
def to_json(self, columns: List['str'] = None, **kwargs) -> str:
|
||||
"""
|
||||
Serialize query results to a JSON formatted :class:`~str`.
|
||||
|
||||
:param columns: if not ``None`` then only specified columns are presented in results
|
||||
:return: :class:`~str`
|
||||
|
||||
The query results is flattened to array:
|
||||
|
||||
.. code-block:: javascript
|
||||
|
||||
[
|
||||
{
|
||||
"_measurement": "mem",
|
||||
"_start": "2021-06-23T06:50:11.897825+00:00",
|
||||
"_stop": "2021-06-25T06:50:11.897825+00:00",
|
||||
"_time": "2020-02-27T16:20:00.897825+00:00",
|
||||
"region": "north",
|
||||
"_field": "usage",
|
||||
"_value": 15
|
||||
},
|
||||
{
|
||||
"_measurement": "mem",
|
||||
"_start": "2021-06-23T06:50:11.897825+00:00",
|
||||
"_stop": "2021-06-25T06:50:11.897825+00:00",
|
||||
"_time": "2020-02-27T16:20:01.897825+00:00",
|
||||
"region": "west",
|
||||
"_field": "usage",
|
||||
"_value": 10
|
||||
},
|
||||
...
|
||||
]
|
||||
|
||||
The JSON format could be configured via ``**kwargs`` arguments:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
from influxdb_client import InfluxDBClient
|
||||
|
||||
with InfluxDBClient(url="http://localhost:8086", token="my-token", org="my-org") as client:
|
||||
|
||||
# Query: using Table structure
|
||||
tables = client.query_api().query('from(bucket:"my-bucket") |> range(start: -10m)')
|
||||
|
||||
# Serialize to JSON
|
||||
output = tables.to_json(indent=5)
|
||||
print(output)
|
||||
|
||||
For all available options see - `json.dump <https://docs.python.org/3/library/json.html#json.dump>`_.
|
||||
"""
|
||||
if 'indent' not in kwargs:
|
||||
kwargs['indent'] = 2
|
||||
|
||||
def filter_values(record):
|
||||
if columns is not None:
|
||||
return {k: v for (k, v) in record.values.items() if k in columns}
|
||||
return record.values
|
||||
|
||||
import json
|
||||
return json.dumps(self._to_values(filter_values), cls=FluxStructureEncoder, **kwargs)
|
||||
|
||||
def _to_values(self, mapping):
|
||||
return [mapping(record) for table in self for record in table.records]
|
||||
|
||||
|
||||
class CSVIterator(Iterator[List[str]]):
|
||||
""":class:`Iterator[List[str]]` with additionally functional to better handle of query result."""
|
||||
|
||||
def __init__(self, response: HTTPResponse) -> None:
|
||||
"""Initialize ``csv.reader``."""
|
||||
self.delegate = csv.reader(codecs.iterdecode(response, _UTF_8_encoding))
|
||||
|
||||
def __iter__(self):
|
||||
"""Return an iterator object."""
|
||||
return self
|
||||
|
||||
def __next__(self):
|
||||
"""Retrieve the next item from the iterator."""
|
||||
row = self.delegate.__next__()
|
||||
while not row:
|
||||
row = self.delegate.__next__()
|
||||
return row
|
||||
|
||||
def to_values(self) -> List[List[str]]:
|
||||
"""
|
||||
Serialize query results to a flattened list of values.
|
||||
|
||||
:return: :class:`~list` of values
|
||||
|
||||
Output example:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
[
|
||||
['New York', '2022-06-14T08:00:51.749072045Z', '24.3'],
|
||||
['Prague', '2022-06-14T08:00:51.749072045Z', '25.3'],
|
||||
...
|
||||
]
|
||||
"""
|
||||
return list(self.__iter__())
|
||||
@@ -0,0 +1,438 @@
|
||||
"""InfluxDBClient is client for API defined in https://github.com/influxdata/influxdb/blob/master/http/swagger.yml."""
|
||||
|
||||
from __future__ import absolute_import
|
||||
|
||||
import logging
|
||||
import warnings
|
||||
|
||||
from influxdb_client import HealthCheck, HealthService, Ready, ReadyService, PingService, \
|
||||
InvokableScriptsApi
|
||||
from influxdb_client.client._base import _BaseClient
|
||||
from influxdb_client.client.authorizations_api import AuthorizationsApi
|
||||
from influxdb_client.client.bucket_api import BucketsApi
|
||||
from influxdb_client.client.delete_api import DeleteApi
|
||||
from influxdb_client.client.labels_api import LabelsApi
|
||||
from influxdb_client.client.organizations_api import OrganizationsApi
|
||||
from influxdb_client.client.query_api import QueryApi, QueryOptions
|
||||
from influxdb_client.client.tasks_api import TasksApi
|
||||
from influxdb_client.client.users_api import UsersApi
|
||||
from influxdb_client.client.write_api import WriteApi, WriteOptions, PointSettings
|
||||
|
||||
logger = logging.getLogger('influxdb_client.client.influxdb_client')
|
||||
|
||||
|
||||
class InfluxDBClient(_BaseClient):
|
||||
"""InfluxDBClient is client for InfluxDB v2."""
|
||||
|
||||
def __init__(self, url, token: str = None, debug=None, timeout=10_000, enable_gzip=False, org: str = None,
|
||||
default_tags: dict = None, **kwargs) -> None:
|
||||
"""
|
||||
Initialize defaults.
|
||||
|
||||
:param url: InfluxDB server API url (ex. http://localhost:8086).
|
||||
:param token: ``token`` to authenticate to the InfluxDB API
|
||||
:param debug: enable verbose logging of http requests
|
||||
:param timeout: HTTP client timeout setting for a request specified in milliseconds.
|
||||
If one number provided, it will be total request timeout.
|
||||
It can also be a pair (tuple) of (connection, read) timeouts.
|
||||
:param enable_gzip: Enable Gzip compression for http requests. Currently, only the "Write" and "Query" endpoints
|
||||
supports the Gzip compression.
|
||||
:param org: organization name (used as a default in Query, Write and Delete API)
|
||||
:key bool verify_ssl: Set this to false to skip verifying SSL certificate when calling API from https server.
|
||||
:key str ssl_ca_cert: Set this to customize the certificate file to verify the peer.
|
||||
:key str cert_file: Path to the certificate that will be used for mTLS authentication.
|
||||
:key str cert_key_file: Path to the file contains private key for mTLS certificate.
|
||||
:key str cert_key_password: String or function which returns password for decrypting the mTLS private key.
|
||||
:key ssl.SSLContext ssl_context: Specify a custom Python SSL Context for the TLS/ mTLS handshake.
|
||||
Be aware that only delivered certificate/ key files or an SSL Context are
|
||||
possible.
|
||||
:key str proxy: Set this to configure the http proxy to be used (ex. http://localhost:3128)
|
||||
:key str proxy_headers: A dictionary containing headers that will be sent to the proxy. Could be used for proxy
|
||||
authentication.
|
||||
:key int connection_pool_maxsize: Number of connections to save that can be reused by urllib3.
|
||||
Defaults to "multiprocessing.cpu_count() * 5".
|
||||
:key urllib3.util.retry.Retry retries: Set the default retry strategy that is used for all HTTP requests
|
||||
except batching writes. As a default there is no one retry strategy.
|
||||
:key bool auth_basic: Set this to true to enable basic authentication when talking to a InfluxDB 1.8.x that
|
||||
does not use auth-enabled but is protected by a reverse proxy with basic authentication.
|
||||
(defaults to false, don't set to true when talking to InfluxDB 2)
|
||||
:key str username: ``username`` to authenticate via username and password credentials to the InfluxDB 2.x
|
||||
:key str password: ``password`` to authenticate via username and password credentials to the InfluxDB 2.x
|
||||
:key list[str] profilers: list of enabled Flux profilers
|
||||
"""
|
||||
super().__init__(url=url, token=token, debug=debug, timeout=timeout, enable_gzip=enable_gzip, org=org,
|
||||
default_tags=default_tags, http_client_logger="urllib3", **kwargs)
|
||||
|
||||
from .._sync.api_client import ApiClient
|
||||
self.api_client = ApiClient(configuration=self.conf, header_name=self.auth_header_name,
|
||||
header_value=self.auth_header_value, retries=self.retries)
|
||||
|
||||
def __enter__(self):
|
||||
"""
|
||||
Enter the runtime context related to this object.
|
||||
|
||||
It will bind this method’s return value to the target(s)
|
||||
specified in the `as` clause of the statement.
|
||||
|
||||
return: self instance
|
||||
"""
|
||||
return self
|
||||
|
||||
def __exit__(self, exc_type, exc_value, traceback):
|
||||
"""Exit the runtime context related to this object and close the client."""
|
||||
self.close()
|
||||
|
||||
@classmethod
|
||||
def from_config_file(cls, config_file: str = "config.ini", debug=None, enable_gzip=False, **kwargs):
|
||||
"""
|
||||
Configure client via configuration file. The configuration has to be under 'influx' section.
|
||||
|
||||
:param config_file: Path to configuration file
|
||||
:param debug: Enable verbose logging of http requests
|
||||
:param enable_gzip: Enable Gzip compression for http requests. Currently, only the "Write" and "Query" endpoints
|
||||
supports the Gzip compression.
|
||||
:key config_name: Name of the configuration section of the configuration file
|
||||
:key str proxy_headers: A dictionary containing headers that will be sent to the proxy. Could be used for proxy
|
||||
authentication.
|
||||
:key urllib3.util.retry.Retry retries: Set the default retry strategy that is used for all HTTP requests
|
||||
except batching writes. As a default there is no one retry strategy.
|
||||
:key ssl.SSLContext ssl_context: Specify a custom Python SSL Context for the TLS/ mTLS handshake.
|
||||
Be aware that only delivered certificate/ key files or an SSL Context are
|
||||
possible.
|
||||
|
||||
The supported formats:
|
||||
- https://docs.python.org/3/library/configparser.html
|
||||
- https://toml.io/en/
|
||||
- https://www.json.org/json-en.html
|
||||
|
||||
Configuration options:
|
||||
- url
|
||||
- org
|
||||
- token
|
||||
- timeout,
|
||||
- verify_ssl
|
||||
- ssl_ca_cert
|
||||
- cert_file
|
||||
- cert_key_file
|
||||
- cert_key_password
|
||||
- connection_pool_maxsize
|
||||
- auth_basic
|
||||
- profilers
|
||||
- proxy
|
||||
|
||||
|
||||
config.ini example::
|
||||
|
||||
[influx2]
|
||||
url=http://localhost:8086
|
||||
org=my-org
|
||||
token=my-token
|
||||
timeout=6000
|
||||
connection_pool_maxsize=25
|
||||
auth_basic=false
|
||||
profilers=query,operator
|
||||
proxy=http:proxy.domain.org:8080
|
||||
|
||||
[tags]
|
||||
id = 132-987-655
|
||||
customer = California Miner
|
||||
data_center = ${env.data_center}
|
||||
|
||||
config.toml example::
|
||||
|
||||
[influx2]
|
||||
url = "http://localhost:8086"
|
||||
token = "my-token"
|
||||
org = "my-org"
|
||||
timeout = 6000
|
||||
connection_pool_maxsize = 25
|
||||
auth_basic = false
|
||||
profilers="query, operator"
|
||||
proxy = "http://proxy.domain.org:8080"
|
||||
|
||||
[tags]
|
||||
id = "132-987-655"
|
||||
customer = "California Miner"
|
||||
data_center = "${env.data_center}"
|
||||
|
||||
config.json example::
|
||||
|
||||
{
|
||||
"url": "http://localhost:8086",
|
||||
"token": "my-token",
|
||||
"org": "my-org",
|
||||
"active": true,
|
||||
"timeout": 6000,
|
||||
"connection_pool_maxsize": 55,
|
||||
"auth_basic": false,
|
||||
"profilers": "query, operator",
|
||||
"tags": {
|
||||
"id": "132-987-655",
|
||||
"customer": "California Miner",
|
||||
"data_center": "${env.data_center}"
|
||||
}
|
||||
}
|
||||
|
||||
"""
|
||||
return InfluxDBClient._from_config_file(config_file=config_file, debug=debug, enable_gzip=enable_gzip, **kwargs)
|
||||
|
||||
@classmethod
|
||||
def from_env_properties(cls, debug=None, enable_gzip=False, **kwargs):
|
||||
"""
|
||||
Configure client via environment properties.
|
||||
|
||||
:param debug: Enable verbose logging of http requests
|
||||
:param enable_gzip: Enable Gzip compression for http requests. Currently, only the "Write" and "Query" endpoints
|
||||
supports the Gzip compression.
|
||||
:key str proxy: Set this to configure the http proxy to be used (ex. http://localhost:3128)
|
||||
:key str proxy_headers: A dictionary containing headers that will be sent to the proxy. Could be used for proxy
|
||||
authentication.
|
||||
:key urllib3.util.retry.Retry retries: Set the default retry strategy that is used for all HTTP requests
|
||||
except batching writes. As a default there is no one retry strategy.
|
||||
:key ssl.SSLContext ssl_context: Specify a custom Python SSL Context for the TLS/ mTLS handshake.
|
||||
Be aware that only delivered certificate/ key files or an SSL Context are
|
||||
possible.
|
||||
|
||||
Supported environment properties:
|
||||
- INFLUXDB_V2_URL
|
||||
- INFLUXDB_V2_ORG
|
||||
- INFLUXDB_V2_TOKEN
|
||||
- INFLUXDB_V2_TIMEOUT
|
||||
- INFLUXDB_V2_VERIFY_SSL
|
||||
- INFLUXDB_V2_SSL_CA_CERT
|
||||
- INFLUXDB_V2_CERT_FILE
|
||||
- INFLUXDB_V2_CERT_KEY_FILE
|
||||
- INFLUXDB_V2_CERT_KEY_PASSWORD
|
||||
- INFLUXDB_V2_CONNECTION_POOL_MAXSIZE
|
||||
- INFLUXDB_V2_AUTH_BASIC
|
||||
- INFLUXDB_V2_PROFILERS
|
||||
- INFLUXDB_V2_TAG
|
||||
"""
|
||||
return InfluxDBClient._from_env_properties(debug=debug, enable_gzip=enable_gzip, **kwargs)
|
||||
|
||||
def write_api(self, write_options=WriteOptions(), point_settings=PointSettings(), **kwargs) -> WriteApi:
|
||||
"""
|
||||
Create Write API instance.
|
||||
|
||||
Example:
|
||||
.. code-block:: python
|
||||
|
||||
from influxdb_client import InfluxDBClient
|
||||
from influxdb_client.client.write_api import SYNCHRONOUS
|
||||
|
||||
|
||||
# Initialize SYNCHRONOUS instance of WriteApi
|
||||
with InfluxDBClient(url="http://localhost:8086", token="my-token", org="my-org") as client:
|
||||
write_api = client.write_api(write_options=SYNCHRONOUS)
|
||||
|
||||
If you would like to use a **background batching**, you have to configure client like this:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
from influxdb_client import InfluxDBClient
|
||||
|
||||
# Initialize background batching instance of WriteApi
|
||||
with InfluxDBClient(url="http://localhost:8086", token="my-token", org="my-org") as client:
|
||||
with client.write_api() as write_api:
|
||||
pass
|
||||
|
||||
There is also possibility to use callbacks to notify about state of background batches:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
from influxdb_client import InfluxDBClient
|
||||
from influxdb_client.client.exceptions import InfluxDBError
|
||||
|
||||
|
||||
class BatchingCallback(object):
|
||||
|
||||
def success(self, conf: (str, str, str), data: str):
|
||||
print(f"Written batch: {conf}, data: {data}")
|
||||
|
||||
def error(self, conf: (str, str, str), data: str, exception: InfluxDBError):
|
||||
print(f"Cannot write batch: {conf}, data: {data} due: {exception}")
|
||||
|
||||
def retry(self, conf: (str, str, str), data: str, exception: InfluxDBError):
|
||||
print(f"Retryable error occurs for batch: {conf}, data: {data} retry: {exception}")
|
||||
|
||||
|
||||
with InfluxDBClient(url="http://localhost:8086", token="my-token", org="my-org") as client:
|
||||
callback = BatchingCallback()
|
||||
with client.write_api(success_callback=callback.success,
|
||||
error_callback=callback.error,
|
||||
retry_callback=callback.retry) as write_api:
|
||||
pass
|
||||
|
||||
:param write_options: Write API configuration
|
||||
:param point_settings: settings to store default tags
|
||||
:key success_callback: The callable ``callback`` to run after successfully writen a batch.
|
||||
|
||||
The callable must accept two arguments:
|
||||
- `Tuple`: ``(bucket, organization, precision)``
|
||||
- `str`: written data
|
||||
|
||||
**[batching mode]**
|
||||
|
||||
:key error_callback: The callable ``callback`` to run after unsuccessfully writen a batch.
|
||||
|
||||
The callable must accept three arguments:
|
||||
- `Tuple`: ``(bucket, organization, precision)``
|
||||
- `str`: written data
|
||||
- `Exception`: an occurred error
|
||||
|
||||
**[batching mode]**
|
||||
:key retry_callback: The callable ``callback`` to run after retryable error occurred.
|
||||
|
||||
The callable must accept three arguments:
|
||||
- `Tuple`: ``(bucket, organization, precision)``
|
||||
- `str`: written data
|
||||
- `Exception`: an retryable error
|
||||
|
||||
**[batching mode]**
|
||||
:return: write api instance
|
||||
"""
|
||||
return WriteApi(influxdb_client=self, write_options=write_options, point_settings=point_settings, **kwargs)
|
||||
|
||||
def query_api(self, query_options: QueryOptions = QueryOptions()) -> QueryApi:
|
||||
"""
|
||||
Create an Query API instance.
|
||||
|
||||
:param query_options: optional query api configuration
|
||||
:return: Query api instance
|
||||
"""
|
||||
return QueryApi(self, query_options)
|
||||
|
||||
def invokable_scripts_api(self) -> InvokableScriptsApi:
|
||||
"""
|
||||
Create an InvokableScripts API instance.
|
||||
|
||||
:return: InvokableScripts API instance
|
||||
"""
|
||||
return InvokableScriptsApi(self)
|
||||
|
||||
def close(self):
|
||||
"""Shutdown the client."""
|
||||
self.__del__()
|
||||
|
||||
def __del__(self):
|
||||
"""Shutdown the client."""
|
||||
if self.api_client:
|
||||
self.api_client.__del__()
|
||||
self.api_client = None
|
||||
|
||||
def buckets_api(self) -> BucketsApi:
|
||||
"""
|
||||
Create the Bucket API instance.
|
||||
|
||||
:return: buckets api
|
||||
"""
|
||||
return BucketsApi(self)
|
||||
|
||||
def authorizations_api(self) -> AuthorizationsApi:
|
||||
"""
|
||||
Create the Authorizations API instance.
|
||||
|
||||
:return: authorizations api
|
||||
"""
|
||||
return AuthorizationsApi(self)
|
||||
|
||||
def users_api(self) -> UsersApi:
|
||||
"""
|
||||
Create the Users API instance.
|
||||
|
||||
:return: users api
|
||||
"""
|
||||
return UsersApi(self)
|
||||
|
||||
def organizations_api(self) -> OrganizationsApi:
|
||||
"""
|
||||
Create the Organizations API instance.
|
||||
|
||||
:return: organizations api
|
||||
"""
|
||||
return OrganizationsApi(self)
|
||||
|
||||
def tasks_api(self) -> TasksApi:
|
||||
"""
|
||||
Create the Tasks API instance.
|
||||
|
||||
:return: tasks api
|
||||
"""
|
||||
return TasksApi(self)
|
||||
|
||||
def labels_api(self) -> LabelsApi:
|
||||
"""
|
||||
Create the Labels API instance.
|
||||
|
||||
:return: labels api
|
||||
"""
|
||||
return LabelsApi(self)
|
||||
|
||||
def health(self) -> HealthCheck:
|
||||
"""
|
||||
Get the health of an instance.
|
||||
|
||||
:return: HealthCheck
|
||||
"""
|
||||
warnings.warn("This method is deprecated. Call 'ping()' instead.", DeprecationWarning)
|
||||
health_service = HealthService(self.api_client)
|
||||
|
||||
try:
|
||||
health = health_service.get_health()
|
||||
return health
|
||||
except Exception as e:
|
||||
return HealthCheck(name="influxdb", message=str(e), status="fail")
|
||||
|
||||
def ping(self) -> bool:
|
||||
"""
|
||||
Return the status of InfluxDB instance.
|
||||
|
||||
:return: The status of InfluxDB.
|
||||
"""
|
||||
ping_service = PingService(self.api_client)
|
||||
|
||||
try:
|
||||
ping_service.get_ping()
|
||||
return True
|
||||
except Exception as ex:
|
||||
logger.debug("Unexpected error during /ping: %s", ex)
|
||||
return False
|
||||
|
||||
def version(self) -> str:
|
||||
"""
|
||||
Return the version of the connected InfluxDB Server.
|
||||
|
||||
:return: The version of InfluxDB.
|
||||
"""
|
||||
ping_service = PingService(self.api_client)
|
||||
|
||||
response = ping_service.get_ping_with_http_info(_return_http_data_only=False)
|
||||
|
||||
return ping_service.response_header(response)
|
||||
|
||||
def build(self) -> str:
|
||||
"""
|
||||
Return the build type of the connected InfluxDB Server.
|
||||
|
||||
:return: The type of InfluxDB build.
|
||||
"""
|
||||
ping_service = PingService(self.api_client)
|
||||
|
||||
return ping_service.build_type()
|
||||
|
||||
def ready(self) -> Ready:
|
||||
"""
|
||||
Get The readiness of the InfluxDB 2.0.
|
||||
|
||||
:return: Ready
|
||||
"""
|
||||
ready_service = ReadyService(self.api_client)
|
||||
return ready_service.get_ready()
|
||||
|
||||
def delete_api(self) -> DeleteApi:
|
||||
"""
|
||||
Get the delete metrics API instance.
|
||||
|
||||
:return: delete api
|
||||
"""
|
||||
return DeleteApi(self)
|
||||
@@ -0,0 +1,301 @@
|
||||
"""InfluxDBClientAsync is client for API defined in https://github.com/influxdata/openapi/blob/master/contracts/oss.yml.""" # noqa: E501
|
||||
import logging
|
||||
import sys
|
||||
|
||||
from influxdb_client import PingService
|
||||
from influxdb_client.client._base import _BaseClient
|
||||
from influxdb_client.client.delete_api_async import DeleteApiAsync
|
||||
from influxdb_client.client.query_api import QueryOptions
|
||||
from influxdb_client.client.query_api_async import QueryApiAsync
|
||||
from influxdb_client.client.write_api import PointSettings
|
||||
from influxdb_client.client.write_api_async import WriteApiAsync
|
||||
|
||||
logger = logging.getLogger('influxdb_client.client.influxdb_client_async')
|
||||
|
||||
|
||||
class InfluxDBClientAsync(_BaseClient):
|
||||
"""InfluxDBClientAsync is client for InfluxDB v2."""
|
||||
|
||||
def __init__(self, url, token: str = None, org: str = None, debug=None, timeout=10_000, enable_gzip=False,
|
||||
**kwargs) -> None:
|
||||
"""
|
||||
Initialize defaults.
|
||||
|
||||
:param url: InfluxDB server API url (ex. http://localhost:8086).
|
||||
:param token: ``token`` to authenticate to the InfluxDB 2.x
|
||||
:param org: organization name (used as a default in Query, Write and Delete API)
|
||||
:param debug: enable verbose logging of http requests
|
||||
:param timeout: The maximal number of milliseconds for the whole HTTP request including
|
||||
connection establishment, request sending and response reading.
|
||||
It can also be a :class:`~aiohttp.ClientTimeout` which is directly pass to ``aiohttp``.
|
||||
:param enable_gzip: Enable Gzip compression for http requests. Currently, only the "Write" and "Query" endpoints
|
||||
supports the Gzip compression.
|
||||
:key bool verify_ssl: Set this to false to skip verifying SSL certificate when calling API from https server.
|
||||
:key str ssl_ca_cert: Set this to customize the certificate file to verify the peer.
|
||||
:key str cert_file: Path to the certificate that will be used for mTLS authentication.
|
||||
:key str cert_key_file: Path to the file contains private key for mTLS certificate.
|
||||
:key str cert_key_password: String or function which returns password for decrypting the mTLS private key.
|
||||
:key ssl.SSLContext ssl_context: Specify a custom Python SSL Context for the TLS/ mTLS handshake.
|
||||
Be aware that only delivered certificate/ key files or an SSL Context are
|
||||
possible.
|
||||
:key str proxy: Set this to configure the http proxy to be used (ex. http://localhost:3128)
|
||||
:key str proxy_headers: A dictionary containing headers that will be sent to the proxy. Could be used for proxy
|
||||
authentication.
|
||||
:key int connection_pool_maxsize: The total number of simultaneous connections.
|
||||
Defaults to "multiprocessing.cpu_count() * 5".
|
||||
:key bool auth_basic: Set this to true to enable basic authentication when talking to a InfluxDB 1.8.x that
|
||||
does not use auth-enabled but is protected by a reverse proxy with basic authentication.
|
||||
(defaults to false, don't set to true when talking to InfluxDB 2)
|
||||
:key str username: ``username`` to authenticate via username and password credentials to the InfluxDB 2.x
|
||||
:key str password: ``password`` to authenticate via username and password credentials to the InfluxDB 2.x
|
||||
:key bool allow_redirects: If set to ``False``, do not follow HTTP redirects. ``True`` by default.
|
||||
:key int max_redirects: Maximum number of HTTP redirects to follow. ``10`` by default.
|
||||
:key dict client_session_kwargs: Additional configuration arguments for :class:`~aiohttp.ClientSession`
|
||||
:key type client_session_type: Type of aiohttp client to use. Useful for third party wrappers like
|
||||
``aiohttp-retry``. :class:`~aiohttp.ClientSession` by default.
|
||||
:key list[str] profilers: list of enabled Flux profilers
|
||||
"""
|
||||
super().__init__(url=url, token=token, org=org, debug=debug, timeout=timeout, enable_gzip=enable_gzip,
|
||||
http_client_logger="aiohttp.client", **kwargs)
|
||||
|
||||
# compatibility with Python 3.6
|
||||
if sys.version_info[:2] >= (3, 7):
|
||||
from asyncio import get_running_loop
|
||||
else:
|
||||
from asyncio import _get_running_loop as get_running_loop
|
||||
|
||||
# check present asynchronous context
|
||||
try:
|
||||
loop = get_running_loop()
|
||||
# compatibility with Python 3.6
|
||||
if loop is None:
|
||||
raise RuntimeError('no running event loop')
|
||||
except RuntimeError:
|
||||
from influxdb_client.client.exceptions import InfluxDBError
|
||||
message = "The async client should be initialised inside async coroutine " \
|
||||
"otherwise there can be unexpected behaviour."
|
||||
raise InfluxDBError(response=None, message=message)
|
||||
|
||||
from .._async.api_client import ApiClientAsync
|
||||
self.api_client = ApiClientAsync(configuration=self.conf, header_name=self.auth_header_name,
|
||||
header_value=self.auth_header_value, **kwargs)
|
||||
|
||||
async def __aenter__(self) -> 'InfluxDBClientAsync':
|
||||
"""
|
||||
Enter the runtime context related to this object.
|
||||
|
||||
return: self instance
|
||||
"""
|
||||
return self
|
||||
|
||||
async def __aexit__(self, exc_type, exc, tb) -> None:
|
||||
"""Shutdown the client."""
|
||||
await self.close()
|
||||
|
||||
async def close(self):
|
||||
"""Shutdown the client."""
|
||||
if self.api_client:
|
||||
await self.api_client.close()
|
||||
self.api_client = None
|
||||
|
||||
@classmethod
|
||||
def from_config_file(cls, config_file: str = "config.ini", debug=None, enable_gzip=False, **kwargs):
|
||||
"""
|
||||
Configure client via configuration file. The configuration has to be under 'influx' section.
|
||||
|
||||
:param config_file: Path to configuration file
|
||||
:param debug: Enable verbose logging of http requests
|
||||
:param enable_gzip: Enable Gzip compression for http requests. Currently, only the "Write" and "Query" endpoints
|
||||
supports the Gzip compression.
|
||||
:key config_name: Name of the configuration section of the configuration file
|
||||
:key str proxy_headers: A dictionary containing headers that will be sent to the proxy. Could be used for proxy
|
||||
authentication.
|
||||
:key urllib3.util.retry.Retry retries: Set the default retry strategy that is used for all HTTP requests
|
||||
except batching writes. As a default there is no one retry strategy.
|
||||
:key ssl.SSLContext ssl_context: Specify a custom Python SSL Context for the TLS/ mTLS handshake.
|
||||
Be aware that only delivered certificate/ key files or an SSL Context are
|
||||
possible.
|
||||
|
||||
The supported formats:
|
||||
- https://docs.python.org/3/library/configparser.html
|
||||
- https://toml.io/en/
|
||||
- https://www.json.org/json-en.html
|
||||
|
||||
Configuration options:
|
||||
- url
|
||||
- org
|
||||
- token
|
||||
- timeout,
|
||||
- verify_ssl
|
||||
- ssl_ca_cert
|
||||
- cert_file
|
||||
- cert_key_file
|
||||
- cert_key_password
|
||||
- connection_pool_maxsize
|
||||
- auth_basic
|
||||
- profilers
|
||||
- proxy
|
||||
|
||||
|
||||
config.ini example::
|
||||
|
||||
[influx2]
|
||||
url=http://localhost:8086
|
||||
org=my-org
|
||||
token=my-token
|
||||
timeout=6000
|
||||
connection_pool_maxsize=25
|
||||
auth_basic=false
|
||||
profilers=query,operator
|
||||
proxy=http:proxy.domain.org:8080
|
||||
|
||||
[tags]
|
||||
id = 132-987-655
|
||||
customer = California Miner
|
||||
data_center = ${env.data_center}
|
||||
|
||||
config.toml example::
|
||||
|
||||
[influx2]
|
||||
url = "http://localhost:8086"
|
||||
token = "my-token"
|
||||
org = "my-org"
|
||||
timeout = 6000
|
||||
connection_pool_maxsize = 25
|
||||
auth_basic = false
|
||||
profilers="query, operator"
|
||||
proxy = "http://proxy.domain.org:8080"
|
||||
|
||||
[tags]
|
||||
id = "132-987-655"
|
||||
customer = "California Miner"
|
||||
data_center = "${env.data_center}"
|
||||
|
||||
config.json example::
|
||||
|
||||
{
|
||||
"url": "http://localhost:8086",
|
||||
"token": "my-token",
|
||||
"org": "my-org",
|
||||
"active": true,
|
||||
"timeout": 6000,
|
||||
"connection_pool_maxsize": 55,
|
||||
"auth_basic": false,
|
||||
"profilers": "query, operator",
|
||||
"tags": {
|
||||
"id": "132-987-655",
|
||||
"customer": "California Miner",
|
||||
"data_center": "${env.data_center}"
|
||||
}
|
||||
}
|
||||
|
||||
"""
|
||||
return InfluxDBClientAsync._from_config_file(config_file=config_file, debug=debug,
|
||||
enable_gzip=enable_gzip, **kwargs)
|
||||
|
||||
@classmethod
|
||||
def from_env_properties(cls, debug=None, enable_gzip=False, **kwargs):
|
||||
"""
|
||||
Configure client via environment properties.
|
||||
|
||||
:param debug: Enable verbose logging of http requests
|
||||
:param enable_gzip: Enable Gzip compression for http requests. Currently, only the "Write" and "Query" endpoints
|
||||
supports the Gzip compression.
|
||||
:key str proxy: Set this to configure the http proxy to be used (ex. http://localhost:3128)
|
||||
:key str proxy_headers: A dictionary containing headers that will be sent to the proxy. Could be used for proxy
|
||||
authentication.
|
||||
:key urllib3.util.retry.Retry retries: Set the default retry strategy that is used for all HTTP requests
|
||||
except batching writes. As a default there is no one retry strategy.
|
||||
:key ssl.SSLContext ssl_context: Specify a custom Python SSL Context for the TLS/ mTLS handshake.
|
||||
Be aware that only delivered certificate/ key files or an SSL Context are
|
||||
possible.
|
||||
|
||||
|
||||
Supported environment properties:
|
||||
- INFLUXDB_V2_URL
|
||||
- INFLUXDB_V2_ORG
|
||||
- INFLUXDB_V2_TOKEN
|
||||
- INFLUXDB_V2_TIMEOUT
|
||||
- INFLUXDB_V2_VERIFY_SSL
|
||||
- INFLUXDB_V2_SSL_CA_CERT
|
||||
- INFLUXDB_V2_CERT_FILE
|
||||
- INFLUXDB_V2_CERT_KEY_FILE
|
||||
- INFLUXDB_V2_CERT_KEY_PASSWORD
|
||||
- INFLUXDB_V2_CONNECTION_POOL_MAXSIZE
|
||||
- INFLUXDB_V2_AUTH_BASIC
|
||||
- INFLUXDB_V2_PROFILERS
|
||||
- INFLUXDB_V2_TAG
|
||||
"""
|
||||
return InfluxDBClientAsync._from_env_properties(debug=debug, enable_gzip=enable_gzip, **kwargs)
|
||||
|
||||
async def ping(self) -> bool:
|
||||
"""
|
||||
Return the status of InfluxDB instance.
|
||||
|
||||
:return: The status of InfluxDB.
|
||||
"""
|
||||
ping_service = PingService(self.api_client)
|
||||
|
||||
try:
|
||||
await ping_service.get_ping_async()
|
||||
return True
|
||||
except Exception as ex:
|
||||
logger.debug("Unexpected error during /ping: %s", ex)
|
||||
raise ex
|
||||
|
||||
async def version(self) -> str:
|
||||
"""
|
||||
Return the version of the connected InfluxDB Server.
|
||||
|
||||
:return: The version of InfluxDB.
|
||||
"""
|
||||
ping_service = PingService(self.api_client)
|
||||
|
||||
response = await ping_service.get_ping_async(_return_http_data_only=False)
|
||||
return ping_service.response_header(response)
|
||||
|
||||
async def build(self) -> str:
|
||||
"""
|
||||
Return the build type of the connected InfluxDB Server.
|
||||
|
||||
:return: The type of InfluxDB build.
|
||||
"""
|
||||
ping_service = PingService(self.api_client)
|
||||
|
||||
return await ping_service.build_type_async()
|
||||
|
||||
def query_api(self, query_options: QueryOptions = QueryOptions()) -> QueryApiAsync:
|
||||
"""
|
||||
Create an asynchronous Query API instance.
|
||||
|
||||
:param query_options: optional query api configuration
|
||||
:return: Query api instance
|
||||
"""
|
||||
return QueryApiAsync(self, query_options)
|
||||
|
||||
def write_api(self, point_settings=PointSettings()) -> WriteApiAsync:
|
||||
"""
|
||||
Create an asynchronous Write API instance.
|
||||
|
||||
Example:
|
||||
.. code-block:: python
|
||||
|
||||
from influxdb_client_async import InfluxDBClientAsync
|
||||
|
||||
|
||||
# Initialize async/await instance of Write API
|
||||
async with InfluxDBClientAsync(url="http://localhost:8086", token="my-token", org="my-org") as client:
|
||||
write_api = client.write_api()
|
||||
|
||||
:param point_settings: settings to store default tags
|
||||
:return: write api instance
|
||||
"""
|
||||
return WriteApiAsync(influxdb_client=self, point_settings=point_settings)
|
||||
|
||||
def delete_api(self) -> DeleteApiAsync:
|
||||
"""
|
||||
Get the asynchronous delete metrics API instance.
|
||||
|
||||
:return: delete api
|
||||
"""
|
||||
return DeleteApiAsync(self)
|
||||
@@ -0,0 +1,293 @@
|
||||
"""
|
||||
Use API invokable scripts to create custom InfluxDB API endpoints that query, process, and shape data.
|
||||
|
||||
API invokable scripts let you assign scripts to API endpoints and then execute them as standard REST operations
|
||||
in InfluxDB Cloud.
|
||||
"""
|
||||
|
||||
from typing import List, Iterator, Generator, Any
|
||||
|
||||
from influxdb_client import Script, InvokableScriptsService, ScriptCreateRequest, ScriptUpdateRequest, \
|
||||
ScriptInvocationParams
|
||||
from influxdb_client.client._base import _BaseQueryApi
|
||||
from influxdb_client.client.flux_csv_parser import FluxResponseMetadataMode
|
||||
from influxdb_client.client.flux_table import FluxRecord, TableList, CSVIterator
|
||||
|
||||
|
||||
class InvokableScriptsApi(_BaseQueryApi):
|
||||
"""Use API invokable scripts to create custom InfluxDB API endpoints that query, process, and shape data."""
|
||||
|
||||
def __init__(self, influxdb_client):
|
||||
"""Initialize defaults."""
|
||||
self._influxdb_client = influxdb_client
|
||||
self._invokable_scripts_service = InvokableScriptsService(influxdb_client.api_client)
|
||||
|
||||
def create_script(self, create_request: ScriptCreateRequest) -> Script:
|
||||
"""Create a script.
|
||||
|
||||
:param ScriptCreateRequest create_request: The script to create. (required)
|
||||
:return: The created script.
|
||||
"""
|
||||
return self._invokable_scripts_service.post_scripts(script_create_request=create_request)
|
||||
|
||||
def update_script(self, script_id: str, update_request: ScriptUpdateRequest) -> Script:
|
||||
"""Update a script.
|
||||
|
||||
:param str script_id: The ID of the script to update. (required)
|
||||
:param ScriptUpdateRequest update_request: Script updates to apply (required)
|
||||
:return: The updated.
|
||||
"""
|
||||
return self._invokable_scripts_service.patch_scripts_id(script_id=script_id,
|
||||
script_update_request=update_request)
|
||||
|
||||
def delete_script(self, script_id: str) -> None:
|
||||
"""Delete a script.
|
||||
|
||||
:param str script_id: The ID of the script to delete. (required)
|
||||
:return: None
|
||||
"""
|
||||
self._invokable_scripts_service.delete_scripts_id(script_id=script_id)
|
||||
|
||||
def find_scripts(self, **kwargs):
|
||||
"""List scripts.
|
||||
|
||||
:key int limit: The number of scripts to return.
|
||||
:key int offset: The offset for pagination.
|
||||
:return: List of scripts.
|
||||
:rtype: list[Script]
|
||||
"""
|
||||
return self._invokable_scripts_service.get_scripts(**kwargs).scripts
|
||||
|
||||
def invoke_script(self, script_id: str, params: dict = None) -> TableList:
|
||||
"""
|
||||
Invoke synchronously a script and return result as a TableList.
|
||||
|
||||
The bind parameters referenced in the script are substitutes with `params` key-values sent in the request body.
|
||||
|
||||
:param str script_id: The ID of the script to invoke. (required)
|
||||
:param params: bind parameters
|
||||
:return: :class:`~influxdb_client.client.flux_table.FluxTable` list wrapped into
|
||||
:class:`~influxdb_client.client.flux_table.TableList`
|
||||
:rtype: TableList
|
||||
|
||||
Serialization the query results to flattened list of values via :func:`~influxdb_client.client.flux_table.TableList.to_values`:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
from influxdb_client import InfluxDBClient
|
||||
|
||||
with InfluxDBClient(url="https://us-west-2-1.aws.cloud2.influxdata.com", token="my-token", org="my-org") as client:
|
||||
|
||||
# Query: using Table structure
|
||||
tables = client.invokable_scripts_api().invoke_script(script_id="script-id")
|
||||
|
||||
# Serialize to values
|
||||
output = tables.to_values(columns=['location', '_time', '_value'])
|
||||
print(output)
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
[
|
||||
['New York', datetime.datetime(2022, 6, 7, 11, 3, 22, 917593, tzinfo=tzutc()), 24.3],
|
||||
['Prague', datetime.datetime(2022, 6, 7, 11, 3, 22, 917593, tzinfo=tzutc()), 25.3],
|
||||
...
|
||||
]
|
||||
|
||||
Serialization the query results to JSON via :func:`~influxdb_client.client.flux_table.TableList.to_json`:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
from influxdb_client import InfluxDBClient
|
||||
|
||||
with InfluxDBClient(url="https://us-west-2-1.aws.cloud2.influxdata.com", token="my-token", org="my-org") as client:
|
||||
|
||||
# Query: using Table structure
|
||||
tables = client.invokable_scripts_api().invoke_script(script_id="script-id")
|
||||
|
||||
# Serialize to JSON
|
||||
output = tables.to_json(indent=5)
|
||||
print(output)
|
||||
|
||||
.. code-block:: javascript
|
||||
|
||||
[
|
||||
{
|
||||
"_measurement": "mem",
|
||||
"_start": "2021-06-23T06:50:11.897825+00:00",
|
||||
"_stop": "2021-06-25T06:50:11.897825+00:00",
|
||||
"_time": "2020-02-27T16:20:00.897825+00:00",
|
||||
"region": "north",
|
||||
"_field": "usage",
|
||||
"_value": 15
|
||||
},
|
||||
{
|
||||
"_measurement": "mem",
|
||||
"_start": "2021-06-23T06:50:11.897825+00:00",
|
||||
"_stop": "2021-06-25T06:50:11.897825+00:00",
|
||||
"_time": "2020-02-27T16:20:01.897825+00:00",
|
||||
"region": "west",
|
||||
"_field": "usage",
|
||||
"_value": 10
|
||||
},
|
||||
...
|
||||
]
|
||||
""" # noqa: E501
|
||||
response = self._invokable_scripts_service \
|
||||
.post_scripts_id_invoke(script_id=script_id,
|
||||
script_invocation_params=ScriptInvocationParams(params=params),
|
||||
async_req=False,
|
||||
_preload_content=False,
|
||||
_return_http_data_only=False)
|
||||
return self._to_tables(response, query_options=None, response_metadata_mode=FluxResponseMetadataMode.only_names)
|
||||
|
||||
def invoke_script_stream(self, script_id: str, params: dict = None) -> Generator['FluxRecord', Any, None]:
|
||||
"""
|
||||
Invoke synchronously a script and return result as a Generator['FluxRecord'].
|
||||
|
||||
The bind parameters referenced in the script are substitutes with `params` key-values sent in the request body.
|
||||
|
||||
:param str script_id: The ID of the script to invoke. (required)
|
||||
:param params: bind parameters
|
||||
:return: Stream of FluxRecord.
|
||||
:rtype: Generator['FluxRecord']
|
||||
"""
|
||||
response = self._invokable_scripts_service \
|
||||
.post_scripts_id_invoke(script_id=script_id,
|
||||
script_invocation_params=ScriptInvocationParams(params=params),
|
||||
async_req=False,
|
||||
_preload_content=False,
|
||||
_return_http_data_only=False)
|
||||
|
||||
return self._to_flux_record_stream(response, query_options=None,
|
||||
response_metadata_mode=FluxResponseMetadataMode.only_names)
|
||||
|
||||
def invoke_script_data_frame(self, script_id: str, params: dict = None, data_frame_index: List[str] = None):
|
||||
"""
|
||||
Invoke synchronously a script and return Pandas DataFrame.
|
||||
|
||||
The bind parameters referenced in the script are substitutes with `params` key-values sent in the request body.
|
||||
|
||||
.. note:: If the ``script`` returns tables with differing schemas than the client generates a :class:`~DataFrame` for each of them.
|
||||
|
||||
:param str script_id: The ID of the script to invoke. (required)
|
||||
:param List[str] data_frame_index: The list of columns that are used as DataFrame index.
|
||||
:param params: bind parameters
|
||||
:return: :class:`~DataFrame` or :class:`~List[DataFrame]`
|
||||
|
||||
.. warning:: For the optimal processing of the query results use the ``pivot() function`` which align results as a table.
|
||||
|
||||
.. code-block:: text
|
||||
|
||||
from(bucket:"my-bucket")
|
||||
|> range(start: -5m, stop: now())
|
||||
|> filter(fn: (r) => r._measurement == "mem")
|
||||
|> pivot(rowKey:["_time"], columnKey: ["_field"], valueColumn: "_value")
|
||||
|
||||
For more info see:
|
||||
- https://docs.influxdata.com/resources/videos/pivots-in-flux/
|
||||
- https://docs.influxdata.com/flux/latest/stdlib/universe/pivot/
|
||||
- https://docs.influxdata.com/flux/latest/stdlib/influxdata/influxdb/schema/fieldsascols/
|
||||
""" # noqa: E501
|
||||
_generator = self.invoke_script_data_frame_stream(script_id=script_id,
|
||||
params=params,
|
||||
data_frame_index=data_frame_index)
|
||||
return self._to_data_frames(_generator)
|
||||
|
||||
def invoke_script_data_frame_stream(self, script_id: str, params: dict = None, data_frame_index: List[str] = None):
|
||||
"""
|
||||
Invoke synchronously a script and return stream of Pandas DataFrame as a Generator['pd.DataFrame'].
|
||||
|
||||
The bind parameters referenced in the script are substitutes with `params` key-values sent in the request body.
|
||||
|
||||
.. note:: If the ``script`` returns tables with differing schemas than the client generates a :class:`~DataFrame` for each of them.
|
||||
|
||||
:param str script_id: The ID of the script to invoke. (required)
|
||||
:param List[str] data_frame_index: The list of columns that are used as DataFrame index.
|
||||
:param params: bind parameters
|
||||
:return: :class:`~Generator[DataFrame]`
|
||||
|
||||
.. warning:: For the optimal processing of the query results use the ``pivot() function`` which align results as a table.
|
||||
|
||||
.. code-block:: text
|
||||
|
||||
from(bucket:"my-bucket")
|
||||
|> range(start: -5m, stop: now())
|
||||
|> filter(fn: (r) => r._measurement == "mem")
|
||||
|> pivot(rowKey:["_time"], columnKey: ["_field"], valueColumn: "_value")
|
||||
|
||||
For more info see:
|
||||
- https://docs.influxdata.com/resources/videos/pivots-in-flux/
|
||||
- https://docs.influxdata.com/flux/latest/stdlib/universe/pivot/
|
||||
- https://docs.influxdata.com/flux/latest/stdlib/influxdata/influxdb/schema/fieldsascols/
|
||||
""" # noqa: E501
|
||||
response = self._invokable_scripts_service \
|
||||
.post_scripts_id_invoke(script_id=script_id,
|
||||
script_invocation_params=ScriptInvocationParams(params=params),
|
||||
async_req=False,
|
||||
_preload_content=False,
|
||||
_return_http_data_only=False)
|
||||
|
||||
return self._to_data_frame_stream(data_frame_index, response, query_options=None,
|
||||
response_metadata_mode=FluxResponseMetadataMode.only_names)
|
||||
|
||||
def invoke_script_csv(self, script_id: str, params: dict = None) -> CSVIterator:
|
||||
"""
|
||||
Invoke synchronously a script and return result as a CSV iterator. Each iteration returns a row of the CSV file.
|
||||
|
||||
The bind parameters referenced in the script are substitutes with `params` key-values sent in the request body.
|
||||
|
||||
:param str script_id: The ID of the script to invoke. (required)
|
||||
:param params: bind parameters
|
||||
:return: :class:`~Iterator[List[str]]` wrapped into :class:`~influxdb_client.client.flux_table.CSVIterator`
|
||||
:rtype: CSVIterator
|
||||
|
||||
Serialization the query results to flattened list of values via :func:`~influxdb_client.client.flux_table.CSVIterator.to_values`:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
from influxdb_client import InfluxDBClient
|
||||
|
||||
with InfluxDBClient(url="http://localhost:8086", token="my-token", org="my-org") as client:
|
||||
|
||||
# Query: using CSV iterator
|
||||
csv_iterator = client.invokable_scripts_api().invoke_script_csv(script_id="script-id")
|
||||
|
||||
# Serialize to values
|
||||
output = csv_iterator.to_values()
|
||||
print(output)
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
[
|
||||
['', 'result', 'table', '_start', '_stop', '_time', '_value', '_field', '_measurement', 'location']
|
||||
['', '', '0', '2022-06-16', '2022-06-16', '2022-06-16', '24.3', 'temperature', 'my_measurement', 'New York']
|
||||
['', '', '1', '2022-06-16', '2022-06-16', '2022-06-16', '25.3', 'temperature', 'my_measurement', 'Prague']
|
||||
...
|
||||
]
|
||||
|
||||
""" # noqa: E501
|
||||
response = self._invokable_scripts_service \
|
||||
.post_scripts_id_invoke(script_id=script_id,
|
||||
script_invocation_params=ScriptInvocationParams(params=params),
|
||||
async_req=False,
|
||||
_preload_content=False)
|
||||
|
||||
return self._to_csv(response)
|
||||
|
||||
def invoke_script_raw(self, script_id: str, params: dict = None) -> Iterator[List[str]]:
|
||||
"""
|
||||
Invoke synchronously a script and return result as raw unprocessed result as a str.
|
||||
|
||||
The bind parameters referenced in the script are substitutes with `params` key-values sent in the request body.
|
||||
|
||||
:param str script_id: The ID of the script to invoke. (required)
|
||||
:param params: bind parameters
|
||||
:return: Result as a str.
|
||||
"""
|
||||
response = self._invokable_scripts_service \
|
||||
.post_scripts_id_invoke(script_id=script_id,
|
||||
script_invocation_params=ScriptInvocationParams(params=params),
|
||||
async_req=False,
|
||||
_preload_content=True)
|
||||
|
||||
return response
|
||||
@@ -0,0 +1,96 @@
|
||||
"""Labels are a way to add visual metadata to dashboards, tasks, and other items in the InfluxDB UI."""
|
||||
|
||||
from typing import List, Dict, Union
|
||||
|
||||
from influxdb_client import LabelsService, LabelCreateRequest, Label, LabelUpdate
|
||||
|
||||
|
||||
class LabelsApi(object):
|
||||
"""Implementation for '/api/v2/labels' endpoint."""
|
||||
|
||||
def __init__(self, influxdb_client):
|
||||
"""Initialize defaults."""
|
||||
self._influxdb_client = influxdb_client
|
||||
self._service = LabelsService(influxdb_client.api_client)
|
||||
|
||||
def create_label(self, name: str, org_id: str, properties: Dict[str, str] = None) -> Label:
|
||||
"""
|
||||
Create a new label.
|
||||
|
||||
:param name: label name
|
||||
:param org_id: organization id
|
||||
:param properties: optional label properties
|
||||
:return: created label
|
||||
"""
|
||||
label_request = LabelCreateRequest(org_id=org_id, name=name, properties=properties)
|
||||
return self._service.post_labels(label_create_request=label_request).label
|
||||
|
||||
def update_label(self, label: Label):
|
||||
"""
|
||||
Update an existing label name and properties.
|
||||
|
||||
:param label: label
|
||||
:return: the updated label
|
||||
"""
|
||||
label_update = LabelUpdate()
|
||||
label_update.properties = label.properties
|
||||
label_update.name = label.name
|
||||
return self._service.patch_labels_id(label_id=label.id, label_update=label_update).label
|
||||
|
||||
def delete_label(self, label: Union[str, Label]):
|
||||
"""
|
||||
Delete the label.
|
||||
|
||||
:param label: label id or Label
|
||||
"""
|
||||
label_id = None
|
||||
|
||||
if isinstance(label, str):
|
||||
label_id = label
|
||||
|
||||
if isinstance(label, Label):
|
||||
label_id = label.id
|
||||
|
||||
return self._service.delete_labels_id(label_id=label_id)
|
||||
|
||||
def clone_label(self, cloned_name: str, label: Label) -> Label:
|
||||
"""
|
||||
Create the new instance of the label as a copy existing label.
|
||||
|
||||
:param cloned_name: new label name
|
||||
:param label: existing label
|
||||
:return: clonned Label
|
||||
"""
|
||||
cloned_properties = None
|
||||
if label.properties is not None:
|
||||
cloned_properties = label.properties.copy()
|
||||
|
||||
return self.create_label(name=cloned_name, properties=cloned_properties, org_id=label.org_id)
|
||||
|
||||
def find_labels(self, **kwargs) -> List['Label']:
|
||||
"""
|
||||
Get all available labels.
|
||||
|
||||
:key str org_id: The organization ID.
|
||||
|
||||
:return: labels
|
||||
"""
|
||||
return self._service.get_labels(**kwargs).labels
|
||||
|
||||
def find_label_by_id(self, label_id: str):
|
||||
"""
|
||||
Retrieve the label by id.
|
||||
|
||||
:param label_id:
|
||||
:return: Label
|
||||
"""
|
||||
return self._service.get_labels_id(label_id=label_id).label
|
||||
|
||||
def find_label_by_org(self, org_id) -> List['Label']:
|
||||
"""
|
||||
Get the list of all labels for given organization.
|
||||
|
||||
:param org_id: organization id
|
||||
:return: list of labels
|
||||
"""
|
||||
return self._service.get_labels(org_id=org_id).labels
|
||||
@@ -0,0 +1,64 @@
|
||||
"""Use the influxdb_client with python native logging."""
|
||||
import logging
|
||||
|
||||
from influxdb_client import InfluxDBClient
|
||||
|
||||
|
||||
class InfluxLoggingHandler(logging.Handler):
|
||||
"""
|
||||
InfluxLoggingHandler instances dispatch logging events to influx.
|
||||
|
||||
There is no need to set a Formatter.
|
||||
The raw input will be passed on to the influx write api.
|
||||
"""
|
||||
|
||||
DEFAULT_LOG_RECORD_KEYS = list(logging.makeLogRecord({}).__dict__.keys()) + ['message']
|
||||
|
||||
def __init__(self, *, url, token, org, bucket, client_args=None, write_api_args=None):
|
||||
"""
|
||||
Initialize defaults.
|
||||
|
||||
The arguments `client_args` and `write_api_args` can be dicts of kwargs.
|
||||
They are passed on to the InfluxDBClient and write_api calls respectively.
|
||||
"""
|
||||
super().__init__()
|
||||
|
||||
self.bucket = bucket
|
||||
|
||||
client_args = {} if client_args is None else client_args
|
||||
self.client = InfluxDBClient(url=url, token=token, org=org, **client_args)
|
||||
|
||||
write_api_args = {} if write_api_args is None else write_api_args
|
||||
self.write_api = self.client.write_api(**write_api_args)
|
||||
|
||||
def __del__(self):
|
||||
"""Make sure all resources are closed."""
|
||||
self.close()
|
||||
|
||||
def close(self) -> None:
|
||||
"""Close the write_api, client and logger."""
|
||||
self.write_api.close()
|
||||
self.client.close()
|
||||
super().close()
|
||||
|
||||
def emit(self, record: logging.LogRecord) -> None:
|
||||
"""Emit a record via the influxDB WriteApi."""
|
||||
try:
|
||||
message = self.format(record)
|
||||
extra = self._get_extra_values(record)
|
||||
return self.write_api.write(record=message, **extra)
|
||||
except (KeyboardInterrupt, SystemExit):
|
||||
raise
|
||||
except (Exception,):
|
||||
self.handleError(record)
|
||||
|
||||
def _get_extra_values(self, record: logging.LogRecord) -> dict:
|
||||
"""
|
||||
Extract all items from the record that were injected via extra.
|
||||
|
||||
Example: `logging.debug(msg, extra={key: value, ...})`.
|
||||
"""
|
||||
extra = {'bucket': self.bucket}
|
||||
extra.update({key: value for key, value in record.__dict__.items()
|
||||
if key not in self.DEFAULT_LOG_RECORD_KEYS})
|
||||
return extra
|
||||
@@ -0,0 +1,60 @@
|
||||
"""
|
||||
An organization is a workspace for a group of users.
|
||||
|
||||
All dashboards, tasks, buckets, members, etc., belong to an organization.
|
||||
"""
|
||||
|
||||
from influxdb_client import OrganizationsService, UsersService, Organization, PatchOrganizationRequest
|
||||
|
||||
|
||||
class OrganizationsApi(object):
|
||||
"""Implementation for '/api/v2/orgs' endpoint."""
|
||||
|
||||
def __init__(self, influxdb_client):
|
||||
"""Initialize defaults."""
|
||||
self._influxdb_client = influxdb_client
|
||||
self._organizations_service = OrganizationsService(influxdb_client.api_client)
|
||||
self._users_service = UsersService(influxdb_client.api_client)
|
||||
|
||||
def me(self):
|
||||
"""Return the current authenticated user."""
|
||||
user = self._users_service.get_me()
|
||||
return user
|
||||
|
||||
def find_organization(self, org_id):
|
||||
"""Retrieve an organization."""
|
||||
return self._organizations_service.get_orgs_id(org_id=org_id)
|
||||
|
||||
def find_organizations(self, **kwargs):
|
||||
"""
|
||||
List all organizations.
|
||||
|
||||
:key int offset: Offset for pagination
|
||||
:key int limit: Limit for pagination
|
||||
:key bool descending:
|
||||
:key str org: Filter organizations to a specific organization name.
|
||||
:key str org_id: Filter organizations to a specific organization ID.
|
||||
:key str user_id: Filter organizations to a specific user ID.
|
||||
"""
|
||||
return self._organizations_service.get_orgs(**kwargs).orgs
|
||||
|
||||
def create_organization(self, name: str = None, organization: Organization = None) -> Organization:
|
||||
"""Create an organization."""
|
||||
if organization is None:
|
||||
organization = Organization(name=name)
|
||||
return self._organizations_service.post_orgs(post_organization_request=organization)
|
||||
|
||||
def update_organization(self, organization: Organization) -> Organization:
|
||||
"""Update an organization.
|
||||
|
||||
:param organization: Organization update to apply (required)
|
||||
:return: Organization
|
||||
"""
|
||||
request = PatchOrganizationRequest(name=organization.name,
|
||||
description=organization.description)
|
||||
|
||||
return self._organizations_service.patch_orgs_id(org_id=organization.id, patch_organization_request=request)
|
||||
|
||||
def delete_organization(self, org_id: str):
|
||||
"""Delete an organization."""
|
||||
return self._organizations_service.delete_orgs_id(org_id=org_id)
|
||||
@@ -0,0 +1,310 @@
|
||||
"""
|
||||
Querying InfluxDB by FluxLang.
|
||||
|
||||
Flux is InfluxData’s functional data scripting language designed for querying, analyzing, and acting on data.
|
||||
"""
|
||||
|
||||
from typing import List, Generator, Any, Callable
|
||||
|
||||
from influxdb_client import Dialect
|
||||
from influxdb_client.client._base import _BaseQueryApi
|
||||
from influxdb_client.client.flux_table import FluxRecord, TableList, CSVIterator
|
||||
|
||||
|
||||
class QueryOptions(object):
|
||||
"""Query options."""
|
||||
|
||||
def __init__(self, profilers: List[str] = None, profiler_callback: Callable = None) -> None:
|
||||
"""
|
||||
Initialize query options.
|
||||
|
||||
:param profilers: list of enabled flux profilers
|
||||
:param profiler_callback: callback function return profilers (FluxRecord)
|
||||
"""
|
||||
self.profilers = profilers
|
||||
self.profiler_callback = profiler_callback
|
||||
|
||||
|
||||
class QueryApi(_BaseQueryApi):
|
||||
"""Implementation for '/api/v2/query' endpoint."""
|
||||
|
||||
def __init__(self, influxdb_client, query_options=QueryOptions()):
|
||||
"""
|
||||
Initialize query client.
|
||||
|
||||
:param influxdb_client: influxdb client
|
||||
"""
|
||||
super().__init__(influxdb_client=influxdb_client, query_options=query_options)
|
||||
|
||||
def query_csv(self, query: str, org=None, dialect: Dialect = _BaseQueryApi.default_dialect, params: dict = None) \
|
||||
-> CSVIterator:
|
||||
"""
|
||||
Execute the Flux query and return results as a CSV iterator. Each iteration returns a row of the CSV file.
|
||||
|
||||
:param query: a Flux query
|
||||
:param str, Organization org: specifies the organization for executing the query;
|
||||
Take the ``ID``, ``Name`` or ``Organization``.
|
||||
If not specified the default value from ``InfluxDBClient.org`` is used.
|
||||
:param dialect: csv dialect format
|
||||
:param params: bind parameters
|
||||
:return: :class:`~Iterator[List[str]]` wrapped into :class:`~influxdb_client.client.flux_table.CSVIterator`
|
||||
:rtype: CSVIterator
|
||||
|
||||
Serialization the query results to flattened list of values via :func:`~influxdb_client.client.flux_table.CSVIterator.to_values`:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
from influxdb_client import InfluxDBClient
|
||||
|
||||
with InfluxDBClient(url="http://localhost:8086", token="my-token", org="my-org") as client:
|
||||
|
||||
# Query: using CSV iterator
|
||||
csv_iterator = client.query_api().query_csv('from(bucket:"my-bucket") |> range(start: -10m)')
|
||||
|
||||
# Serialize to values
|
||||
output = csv_iterator.to_values()
|
||||
print(output)
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
[
|
||||
['#datatype', 'string', 'long', 'dateTime:RFC3339', 'dateTime:RFC3339', 'dateTime:RFC3339', 'double', 'string', 'string', 'string']
|
||||
['#group', 'false', 'false', 'true', 'true', 'false', 'false', 'true', 'true', 'true']
|
||||
['#default', '_result', '', '', '', '', '', '', '', '']
|
||||
['', 'result', 'table', '_start', '_stop', '_time', '_value', '_field', '_measurement', 'location']
|
||||
['', '', '0', '2022-06-16', '2022-06-16', '2022-06-16', '24.3', 'temperature', 'my_measurement', 'New York']
|
||||
['', '', '1', '2022-06-16', '2022-06-16', '2022-06-16', '25.3', 'temperature', 'my_measurement', 'Prague']
|
||||
...
|
||||
]
|
||||
|
||||
If you would like to turn off `Annotated CSV header's <https://docs.influxdata.com/influxdb/latest/reference/syntax/annotated-csv/>`_ you can use following code:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
from influxdb_client import InfluxDBClient, Dialect
|
||||
|
||||
with InfluxDBClient(url="http://localhost:8086", token="my-token", org="my-org") as client:
|
||||
|
||||
# Query: using CSV iterator
|
||||
csv_iterator = client.query_api().query_csv('from(bucket:"my-bucket") |> range(start: -10m)',
|
||||
dialect=Dialect(header=False, annotations=[]))
|
||||
|
||||
for csv_line in csv_iterator:
|
||||
print(csv_line)
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
[
|
||||
['', '_result', '0', '2022-06-16', '2022-06-16', '2022-06-16', '24.3', 'temperature', 'my_measurement', 'New York']
|
||||
['', '_result', '1', '2022-06-16', '2022-06-16', '2022-06-16', '25.3', 'temperature', 'my_measurement', 'Prague']
|
||||
...
|
||||
]
|
||||
""" # noqa: E501
|
||||
org = self._org_param(org)
|
||||
response = self._query_api.post_query(org=org, query=self._create_query(query, dialect, params),
|
||||
async_req=False, _preload_content=False)
|
||||
|
||||
return self._to_csv(response)
|
||||
|
||||
def query_raw(self, query: str, org=None, dialect=_BaseQueryApi.default_dialect, params: dict = None):
|
||||
"""
|
||||
Execute synchronous Flux query and return result as raw unprocessed result as a str.
|
||||
|
||||
:param query: a Flux query
|
||||
:param str, Organization org: specifies the organization for executing the query;
|
||||
Take the ``ID``, ``Name`` or ``Organization``.
|
||||
If not specified the default value from ``InfluxDBClient.org`` is used.
|
||||
:param dialect: csv dialect format
|
||||
:param params: bind parameters
|
||||
:return: str
|
||||
"""
|
||||
org = self._org_param(org)
|
||||
result = self._query_api.post_query(org=org, query=self._create_query(query, dialect, params), async_req=False,
|
||||
_preload_content=False)
|
||||
|
||||
return result
|
||||
|
||||
def query(self, query: str, org=None, params: dict = None) -> TableList:
|
||||
"""Execute synchronous Flux query and return result as a :class:`~influxdb_client.client.flux_table.FluxTable` list.
|
||||
|
||||
:param query: the Flux query
|
||||
:param str, Organization org: specifies the organization for executing the query;
|
||||
Take the ``ID``, ``Name`` or ``Organization``.
|
||||
If not specified the default value from ``InfluxDBClient.org`` is used.
|
||||
:param params: bind parameters
|
||||
:return: :class:`~influxdb_client.client.flux_table.FluxTable` list wrapped into
|
||||
:class:`~influxdb_client.client.flux_table.TableList`
|
||||
:rtype: TableList
|
||||
|
||||
Serialization the query results to flattened list of values via :func:`~influxdb_client.client.flux_table.TableList.to_values`:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
from influxdb_client import InfluxDBClient
|
||||
|
||||
with InfluxDBClient(url="http://localhost:8086", token="my-token", org="my-org") as client:
|
||||
|
||||
# Query: using Table structure
|
||||
tables = client.query_api().query('from(bucket:"my-bucket") |> range(start: -10m)')
|
||||
|
||||
# Serialize to values
|
||||
output = tables.to_values(columns=['location', '_time', '_value'])
|
||||
print(output)
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
[
|
||||
['New York', datetime.datetime(2022, 6, 7, 11, 3, 22, 917593, tzinfo=tzutc()), 24.3],
|
||||
['Prague', datetime.datetime(2022, 6, 7, 11, 3, 22, 917593, tzinfo=tzutc()), 25.3],
|
||||
...
|
||||
]
|
||||
|
||||
Serialization the query results to JSON via :func:`~influxdb_client.client.flux_table.TableList.to_json`:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
from influxdb_client import InfluxDBClient
|
||||
|
||||
with InfluxDBClient(url="http://localhost:8086", token="my-token", org="my-org") as client:
|
||||
|
||||
# Query: using Table structure
|
||||
tables = client.query_api().query('from(bucket:"my-bucket") |> range(start: -10m)')
|
||||
|
||||
# Serialize to JSON
|
||||
output = tables.to_json(indent=5)
|
||||
print(output)
|
||||
|
||||
.. code-block:: javascript
|
||||
|
||||
[
|
||||
{
|
||||
"_measurement": "mem",
|
||||
"_start": "2021-06-23T06:50:11.897825+00:00",
|
||||
"_stop": "2021-06-25T06:50:11.897825+00:00",
|
||||
"_time": "2020-02-27T16:20:00.897825+00:00",
|
||||
"region": "north",
|
||||
"_field": "usage",
|
||||
"_value": 15
|
||||
},
|
||||
{
|
||||
"_measurement": "mem",
|
||||
"_start": "2021-06-23T06:50:11.897825+00:00",
|
||||
"_stop": "2021-06-25T06:50:11.897825+00:00",
|
||||
"_time": "2020-02-27T16:20:01.897825+00:00",
|
||||
"region": "west",
|
||||
"_field": "usage",
|
||||
"_value": 10
|
||||
},
|
||||
...
|
||||
]
|
||||
""" # noqa: E501
|
||||
org = self._org_param(org)
|
||||
|
||||
response = self._query_api.post_query(org=org, query=self._create_query(query, self.default_dialect, params),
|
||||
async_req=False, _preload_content=False, _return_http_data_only=False)
|
||||
|
||||
return self._to_tables(response, query_options=self._get_query_options())
|
||||
|
||||
def query_stream(self, query: str, org=None, params: dict = None) -> Generator['FluxRecord', Any, None]:
|
||||
"""
|
||||
Execute synchronous Flux query and return stream of FluxRecord as a Generator['FluxRecord'].
|
||||
|
||||
:param query: the Flux query
|
||||
:param str, Organization org: specifies the organization for executing the query;
|
||||
Take the ``ID``, ``Name`` or ``Organization``.
|
||||
If not specified the default value from ``InfluxDBClient.org`` is used.
|
||||
:param params: bind parameters
|
||||
:return: Generator['FluxRecord']
|
||||
"""
|
||||
org = self._org_param(org)
|
||||
|
||||
response = self._query_api.post_query(org=org, query=self._create_query(query, self.default_dialect, params),
|
||||
async_req=False, _preload_content=False, _return_http_data_only=False)
|
||||
return self._to_flux_record_stream(response, query_options=self._get_query_options())
|
||||
|
||||
def query_data_frame(self, query: str, org=None, data_frame_index: List[str] = None, params: dict = None,
|
||||
use_extension_dtypes: bool = False):
|
||||
"""
|
||||
Execute synchronous Flux query and return Pandas DataFrame.
|
||||
|
||||
.. note:: If the ``query`` returns tables with differing schemas than the client generates a :class:`~DataFrame` for each of them.
|
||||
|
||||
:param query: the Flux query
|
||||
:param str, Organization org: specifies the organization for executing the query;
|
||||
Take the ``ID``, ``Name`` or ``Organization``.
|
||||
If not specified the default value from ``InfluxDBClient.org`` is used.
|
||||
:param data_frame_index: the list of columns that are used as DataFrame index
|
||||
:param params: bind parameters
|
||||
:param use_extension_dtypes: set to ``True`` to use panda's extension data types.
|
||||
Useful for queries with ``pivot`` function.
|
||||
When data has missing values, column data type may change (to ``object`` or ``float64``).
|
||||
Nullable extension types (``Int64``, ``Float64``, ``boolean``) support ``panda.NA`` value.
|
||||
For more info, see https://pandas.pydata.org/docs/user_guide/missing_data.html.
|
||||
:return: :class:`~DataFrame` or :class:`~List[DataFrame]`
|
||||
|
||||
.. warning:: For the optimal processing of the query results use the ``pivot() function`` which align results as a table.
|
||||
|
||||
.. code-block:: text
|
||||
|
||||
from(bucket:"my-bucket")
|
||||
|> range(start: -5m, stop: now())
|
||||
|> filter(fn: (r) => r._measurement == "mem")
|
||||
|> pivot(rowKey:["_time"], columnKey: ["_field"], valueColumn: "_value")
|
||||
|
||||
For more info see:
|
||||
- https://docs.influxdata.com/resources/videos/pivots-in-flux/
|
||||
- https://docs.influxdata.com/flux/latest/stdlib/universe/pivot/
|
||||
- https://docs.influxdata.com/flux/latest/stdlib/influxdata/influxdb/schema/fieldsascols/
|
||||
""" # noqa: E501
|
||||
_generator = self.query_data_frame_stream(query, org=org, data_frame_index=data_frame_index, params=params,
|
||||
use_extension_dtypes=use_extension_dtypes)
|
||||
return self._to_data_frames(_generator)
|
||||
|
||||
def query_data_frame_stream(self, query: str, org=None, data_frame_index: List[str] = None, params: dict = None,
|
||||
use_extension_dtypes: bool = False):
|
||||
"""
|
||||
Execute synchronous Flux query and return stream of Pandas DataFrame as a :class:`~Generator[DataFrame]`.
|
||||
|
||||
.. note:: If the ``query`` returns tables with differing schemas than the client generates a :class:`~DataFrame` for each of them.
|
||||
|
||||
:param query: the Flux query
|
||||
:param str, Organization org: specifies the organization for executing the query;
|
||||
Take the ``ID``, ``Name`` or ``Organization``.
|
||||
If not specified the default value from ``InfluxDBClient.org`` is used.
|
||||
:param data_frame_index: the list of columns that are used as DataFrame index
|
||||
:param params: bind parameters
|
||||
:param use_extension_dtypes: set to ``True`` to use panda's extension data types.
|
||||
Useful for queries with ``pivot`` function.
|
||||
When data has missing values, column data type may change (to ``object`` or ``float64``).
|
||||
Nullable extension types (``Int64``, ``Float64``, ``boolean``) support ``panda.NA`` value.
|
||||
For more info, see https://pandas.pydata.org/docs/user_guide/missing_data.html.
|
||||
:return: :class:`~Generator[DataFrame]`
|
||||
|
||||
.. warning:: For the optimal processing of the query results use the ``pivot() function`` which align results as a table.
|
||||
|
||||
.. code-block:: text
|
||||
|
||||
from(bucket:"my-bucket")
|
||||
|> range(start: -5m, stop: now())
|
||||
|> filter(fn: (r) => r._measurement == "mem")
|
||||
|> pivot(rowKey:["_time"], columnKey: ["_field"], valueColumn: "_value")
|
||||
|
||||
For more info see:
|
||||
- https://docs.influxdata.com/resources/videos/pivots-in-flux/
|
||||
- https://docs.influxdata.com/flux/latest/stdlib/universe/pivot/
|
||||
- https://docs.influxdata.com/flux/latest/stdlib/influxdata/influxdb/schema/fieldsascols/
|
||||
""" # noqa: E501
|
||||
org = self._org_param(org)
|
||||
|
||||
response = self._query_api.post_query(org=org, query=self._create_query(query, self.default_dialect, params,
|
||||
dataframe_query=True),
|
||||
async_req=False, _preload_content=False, _return_http_data_only=False)
|
||||
|
||||
return self._to_data_frame_stream(data_frame_index=data_frame_index,
|
||||
response=response,
|
||||
query_options=self._get_query_options(),
|
||||
use_extension_dtypes=use_extension_dtypes)
|
||||
|
||||
def __del__(self):
|
||||
"""Close QueryAPI."""
|
||||
pass
|
||||
@@ -0,0 +1,236 @@
|
||||
"""
|
||||
Querying InfluxDB by FluxLang.
|
||||
|
||||
Flux is InfluxData’s functional data scripting language designed for querying, analyzing, and acting on data.
|
||||
"""
|
||||
from typing import List, AsyncGenerator
|
||||
|
||||
from influxdb_client.client._base import _BaseQueryApi
|
||||
from influxdb_client.client.flux_table import FluxRecord, TableList
|
||||
from influxdb_client.client.query_api import QueryOptions
|
||||
from influxdb_client.rest import _UTF_8_encoding, ApiException
|
||||
from .._async.rest import RESTResponseAsync
|
||||
|
||||
|
||||
class QueryApiAsync(_BaseQueryApi):
|
||||
"""Asynchronous implementation for '/api/v2/query' endpoint."""
|
||||
|
||||
def __init__(self, influxdb_client, query_options=QueryOptions()):
|
||||
"""
|
||||
Initialize query client.
|
||||
|
||||
:param influxdb_client: influxdb client
|
||||
"""
|
||||
super().__init__(influxdb_client=influxdb_client, query_options=query_options)
|
||||
|
||||
async def query(self, query: str, org=None, params: dict = None) -> TableList:
|
||||
"""
|
||||
Execute asynchronous Flux query and return result as a :class:`~influxdb_client.client.flux_table.FluxTable` list.
|
||||
|
||||
:param query: the Flux query
|
||||
:param str, Organization org: specifies the organization for executing the query;
|
||||
Take the ``ID``, ``Name`` or ``Organization``.
|
||||
If not specified the default value from ``InfluxDBClientAsync.org`` is used.
|
||||
:param params: bind parameters
|
||||
:return: :class:`~influxdb_client.client.flux_table.FluxTable` list wrapped into
|
||||
:class:`~influxdb_client.client.flux_table.TableList`
|
||||
:rtype: TableList
|
||||
|
||||
Serialization the query results to flattened list of values via :func:`~influxdb_client.client.flux_table.TableList.to_values`:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
from influxdb_client import InfluxDBClient
|
||||
|
||||
async with InfluxDBClientAsync(url="http://localhost:8086", token="my-token", org="my-org") as client:
|
||||
|
||||
# Query: using Table structure
|
||||
tables = await client.query_api().query('from(bucket:"my-bucket") |> range(start: -10m)')
|
||||
|
||||
# Serialize to values
|
||||
output = tables.to_values(columns=['location', '_time', '_value'])
|
||||
print(output)
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
[
|
||||
['New York', datetime.datetime(2022, 6, 7, 11, 3, 22, 917593, tzinfo=tzutc()), 24.3],
|
||||
['Prague', datetime.datetime(2022, 6, 7, 11, 3, 22, 917593, tzinfo=tzutc()), 25.3],
|
||||
...
|
||||
]
|
||||
|
||||
Serialization the query results to JSON via :func:`~influxdb_client.client.flux_table.TableList.to_json`:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
from influxdb_client.client.influxdb_client_async import InfluxDBClientAsync
|
||||
|
||||
async with InfluxDBClientAsync(url="http://localhost:8086", token="my-token", org="my-org") as client:
|
||||
# Query: using Table structure
|
||||
tables = await client.query_api().query('from(bucket:"my-bucket") |> range(start: -10m)')
|
||||
|
||||
# Serialize to JSON
|
||||
output = tables.to_json(indent=5)
|
||||
print(output)
|
||||
|
||||
.. code-block:: javascript
|
||||
|
||||
[
|
||||
{
|
||||
"_measurement": "mem",
|
||||
"_start": "2021-06-23T06:50:11.897825+00:00",
|
||||
"_stop": "2021-06-25T06:50:11.897825+00:00",
|
||||
"_time": "2020-02-27T16:20:00.897825+00:00",
|
||||
"region": "north",
|
||||
"_field": "usage",
|
||||
"_value": 15
|
||||
},
|
||||
{
|
||||
"_measurement": "mem",
|
||||
"_start": "2021-06-23T06:50:11.897825+00:00",
|
||||
"_stop": "2021-06-25T06:50:11.897825+00:00",
|
||||
"_time": "2020-02-27T16:20:01.897825+00:00",
|
||||
"region": "west",
|
||||
"_field": "usage",
|
||||
"_value": 10
|
||||
},
|
||||
...
|
||||
]
|
||||
""" # noqa: E501
|
||||
org = self._org_param(org)
|
||||
|
||||
response = await self._post_query(org=org, query=self._create_query(query, self.default_dialect, params))
|
||||
|
||||
return await self._to_tables_async(response, query_options=self._get_query_options())
|
||||
|
||||
async def query_stream(self, query: str, org=None, params: dict = None) -> AsyncGenerator['FluxRecord', None]:
|
||||
"""
|
||||
Execute asynchronous Flux query and return stream of :class:`~influxdb_client.client.flux_table.FluxRecord` as an AsyncGenerator[:class:`~influxdb_client.client.flux_table.FluxRecord`].
|
||||
|
||||
:param query: the Flux query
|
||||
:param str, Organization org: specifies the organization for executing the query;
|
||||
Take the ``ID``, ``Name`` or ``Organization``.
|
||||
If not specified the default value from ``InfluxDBClientAsync.org`` is used.
|
||||
:param params: bind parameters
|
||||
:return: AsyncGenerator[:class:`~influxdb_client.client.flux_table.FluxRecord`]
|
||||
""" # noqa: E501
|
||||
org = self._org_param(org)
|
||||
|
||||
response = await self._post_query(org=org, query=self._create_query(query, self.default_dialect, params))
|
||||
|
||||
return await self._to_flux_record_stream_async(response, query_options=self._get_query_options())
|
||||
|
||||
async def query_data_frame(self, query: str, org=None, data_frame_index: List[str] = None, params: dict = None,
|
||||
use_extension_dtypes: bool = False):
|
||||
"""
|
||||
Execute asynchronous Flux query and return :class:`~pandas.core.frame.DataFrame`.
|
||||
|
||||
.. note:: If the ``query`` returns tables with differing schemas than the client generates a :class:`~DataFrame` for each of them.
|
||||
|
||||
:param query: the Flux query
|
||||
:param str, Organization org: specifies the organization for executing the query;
|
||||
Take the ``ID``, ``Name`` or ``Organization``.
|
||||
If not specified the default value from ``InfluxDBClientAsync.org`` is used.
|
||||
:param data_frame_index: the list of columns that are used as DataFrame index
|
||||
:param params: bind parameters
|
||||
:param use_extension_dtypes: set to ``True`` to use panda's extension data types.
|
||||
Useful for queries with ``pivot`` function.
|
||||
When data has missing values, column data type may change (to ``object`` or ``float64``).
|
||||
Nullable extension types (``Int64``, ``Float64``, ``boolean``) support ``panda.NA`` value.
|
||||
For more info, see https://pandas.pydata.org/docs/user_guide/missing_data.html.
|
||||
:return: :class:`~DataFrame` or :class:`~List[DataFrame]`
|
||||
|
||||
.. warning:: For the optimal processing of the query results use the ``pivot() function`` which align results as a table.
|
||||
|
||||
.. code-block:: text
|
||||
|
||||
from(bucket:"my-bucket")
|
||||
|> range(start: -5m, stop: now())
|
||||
|> filter(fn: (r) => r._measurement == "mem")
|
||||
|> pivot(rowKey:["_time"], columnKey: ["_field"], valueColumn: "_value")
|
||||
|
||||
For more info see:
|
||||
- https://docs.influxdata.com/resources/videos/pivots-in-flux/
|
||||
- https://docs.influxdata.com/flux/latest/stdlib/universe/pivot/
|
||||
- https://docs.influxdata.com/flux/latest/stdlib/influxdata/influxdb/schema/fieldsascols/
|
||||
""" # noqa: E501
|
||||
_generator = await self.query_data_frame_stream(query, org=org, data_frame_index=data_frame_index,
|
||||
params=params, use_extension_dtypes=use_extension_dtypes)
|
||||
|
||||
dataframes = []
|
||||
async for dataframe in _generator:
|
||||
dataframes.append(dataframe)
|
||||
|
||||
return self._to_data_frames(dataframes)
|
||||
|
||||
async def query_data_frame_stream(self, query: str, org=None, data_frame_index: List[str] = None,
|
||||
params: dict = None, use_extension_dtypes: bool = False):
|
||||
"""
|
||||
Execute asynchronous Flux query and return stream of :class:`~pandas.core.frame.DataFrame` as an AsyncGenerator[:class:`~pandas.core.frame.DataFrame`].
|
||||
|
||||
.. note:: If the ``query`` returns tables with differing schemas than the client generates a :class:`~DataFrame` for each of them.
|
||||
|
||||
:param query: the Flux query
|
||||
:param str, Organization org: specifies the organization for executing the query;
|
||||
Take the ``ID``, ``Name`` or ``Organization``.
|
||||
If not specified the default value from ``InfluxDBClientAsync.org`` is used.
|
||||
:param data_frame_index: the list of columns that are used as DataFrame index
|
||||
:param params: bind parameters
|
||||
:param use_extension_dtypes: set to ``True`` to use panda's extension data types.
|
||||
Useful for queries with ``pivot`` function.
|
||||
When data has missing values, column data type may change (to ``object`` or ``float64``).
|
||||
Nullable extension types (``Int64``, ``Float64``, ``boolean``) support ``panda.NA`` value.
|
||||
For more info, see https://pandas.pydata.org/docs/user_guide/missing_data.html.
|
||||
:return: :class:`AsyncGenerator[:class:`DataFrame`]`
|
||||
|
||||
.. warning:: For the optimal processing of the query results use the ``pivot() function`` which align results as a table.
|
||||
|
||||
.. code-block:: text
|
||||
|
||||
from(bucket:"my-bucket")
|
||||
|> range(start: -5m, stop: now())
|
||||
|> filter(fn: (r) => r._measurement == "mem")
|
||||
|> pivot(rowKey:["_time"], columnKey: ["_field"], valueColumn: "_value")
|
||||
|
||||
For more info see:
|
||||
- https://docs.influxdata.com/resources/videos/pivots-in-flux/
|
||||
- https://docs.influxdata.com/flux/latest/stdlib/universe/pivot/
|
||||
- https://docs.influxdata.com/flux/latest/stdlib/influxdata/influxdb/schema/fieldsascols/
|
||||
""" # noqa: E501
|
||||
org = self._org_param(org)
|
||||
|
||||
response = await self._post_query(org=org, query=self._create_query(query, self.default_dialect, params,
|
||||
dataframe_query=True))
|
||||
|
||||
return await self._to_data_frame_stream_async(data_frame_index=data_frame_index, response=response,
|
||||
query_options=self._get_query_options(),
|
||||
use_extension_dtypes=use_extension_dtypes)
|
||||
|
||||
async def query_raw(self, query: str, org=None, dialect=_BaseQueryApi.default_dialect, params: dict = None):
|
||||
"""
|
||||
Execute asynchronous Flux query and return result as raw unprocessed result as a str.
|
||||
|
||||
:param query: a Flux query
|
||||
:param str, Organization org: specifies the organization for executing the query;
|
||||
Take the ``ID``, ``Name`` or ``Organization``.
|
||||
If not specified the default value from ``InfluxDBClientAsync.org`` is used.
|
||||
:param dialect: csv dialect format
|
||||
:param params: bind parameters
|
||||
:return: :class:`~str`
|
||||
"""
|
||||
org = self._org_param(org)
|
||||
result = await self._post_query(org=org, query=self._create_query(query, dialect, params))
|
||||
raw_bytes = await result.read()
|
||||
return raw_bytes.decode(_UTF_8_encoding)
|
||||
|
||||
async def _post_query(self, org, query):
|
||||
response = await self._query_api.post_query_async(org=org,
|
||||
query=query,
|
||||
async_req=False,
|
||||
_preload_content=False,
|
||||
_return_http_data_only=True)
|
||||
if not 200 <= response.status <= 299:
|
||||
data = await response.read()
|
||||
raise ApiException(http_resp=RESTResponseAsync(response, data))
|
||||
|
||||
return response
|
||||
@@ -0,0 +1,226 @@
|
||||
"""
|
||||
Process and analyze your data with tasks in the InfluxDB task engine.
|
||||
|
||||
Use tasks (scheduled Flux queries) to input a data stream and then analyze, modify, and act on the data accordingly.
|
||||
"""
|
||||
|
||||
import datetime
|
||||
from typing import List
|
||||
|
||||
from influxdb_client import TasksService, Task, TaskCreateRequest, TaskUpdateRequest, LabelResponse, LabelMapping, \
|
||||
AddResourceMemberRequestBody, RunManually, Run, LogEvent
|
||||
from influxdb_client.client._pages import _Paginated
|
||||
|
||||
|
||||
class TasksApi(object):
|
||||
"""Implementation for '/api/v2/tasks' endpoint."""
|
||||
|
||||
def __init__(self, influxdb_client):
|
||||
"""Initialize defaults."""
|
||||
self._influxdb_client = influxdb_client
|
||||
self._service = TasksService(influxdb_client.api_client)
|
||||
|
||||
def find_task_by_id(self, task_id) -> Task:
|
||||
"""Retrieve a task."""
|
||||
task = self._service.get_tasks_id(task_id)
|
||||
return task
|
||||
|
||||
def find_tasks(self, **kwargs):
|
||||
"""List all tasks up to set limit (max 500).
|
||||
|
||||
:key str name: only returns tasks with the specified name
|
||||
:key str after: returns tasks after specified ID
|
||||
:key str user: filter tasks to a specific user ID
|
||||
:key str org: filter tasks to a specific organization name
|
||||
:key str org_id: filter tasks to a specific organization ID
|
||||
:key int limit: the number of tasks to return
|
||||
:return: Tasks
|
||||
"""
|
||||
return self._service.get_tasks(**kwargs).tasks
|
||||
|
||||
def find_tasks_iter(self, **kwargs):
|
||||
"""Iterate over all tasks with pagination.
|
||||
|
||||
:key str name: only returns tasks with the specified name
|
||||
:key str after: returns tasks after specified ID
|
||||
:key str user: filter tasks to a specific user ID
|
||||
:key str org: filter tasks to a specific organization name
|
||||
:key str org_id: filter tasks to a specific organization ID
|
||||
:key int limit: the number of tasks in one page
|
||||
:return: Tasks iterator
|
||||
"""
|
||||
return _Paginated(self._service.get_tasks, lambda response: response.tasks).find_iter(**kwargs)
|
||||
|
||||
def create_task(self, task: Task = None, task_create_request: TaskCreateRequest = None) -> Task:
|
||||
"""Create a new task."""
|
||||
if task_create_request is not None:
|
||||
return self._service.post_tasks(task_create_request)
|
||||
|
||||
if task is not None:
|
||||
request = TaskCreateRequest(flux=task.flux, org_id=task.org_id, org=task.org, description=task.description,
|
||||
status=task.status)
|
||||
|
||||
return self.create_task(task_create_request=request)
|
||||
|
||||
raise ValueError("task or task_create_request must be not None")
|
||||
|
||||
@staticmethod
|
||||
def _create_task(name: str, flux: str, every, cron, org_id: str) -> Task:
|
||||
|
||||
task = Task(id=0, name=name, org_id=org_id, status="active", flux=flux)
|
||||
|
||||
repetition = ""
|
||||
if every is not None:
|
||||
repetition += "every: "
|
||||
repetition += every
|
||||
|
||||
if cron is not None:
|
||||
repetition += "cron: "
|
||||
repetition += '"' + cron + '"'
|
||||
|
||||
flux_with_options = '{} \n\noption task = {{name: "{}", {}}}'.format(flux, name, repetition)
|
||||
task.flux = flux_with_options
|
||||
|
||||
return task
|
||||
|
||||
def create_task_every(self, name, flux, every, organization) -> Task:
|
||||
"""Create a new task with every repetition schedule."""
|
||||
task = self._create_task(name, flux, every, None, organization.id)
|
||||
return self.create_task(task)
|
||||
|
||||
def create_task_cron(self, name: str, flux: str, cron: str, org_id: str) -> Task:
|
||||
"""Create a new task with cron repetition schedule."""
|
||||
task = self._create_task(name=name, flux=flux, cron=cron, org_id=org_id, every=None)
|
||||
return self.create_task(task)
|
||||
|
||||
def delete_task(self, task_id: str):
|
||||
"""Delete a task."""
|
||||
if task_id is not None:
|
||||
return self._service.delete_tasks_id(task_id=task_id)
|
||||
|
||||
def update_task(self, task: Task) -> Task:
|
||||
"""Update a task."""
|
||||
req = TaskUpdateRequest(flux=task.flux, description=task.description, every=task.every, cron=task.cron,
|
||||
status=task.status, offset=task.offset)
|
||||
|
||||
return self.update_task_request(task_id=task.id, task_update_request=req)
|
||||
|
||||
def update_task_request(self, task_id, task_update_request: TaskUpdateRequest) -> Task:
|
||||
"""Update a task."""
|
||||
return self._service.patch_tasks_id(task_id=task_id, task_update_request=task_update_request)
|
||||
|
||||
def clone_task(self, task: Task) -> Task:
|
||||
"""Clone a task."""
|
||||
cloned = Task(id=0, name=task.name, org_id=task.org_id, org=task.org, flux=task.flux, status="active")
|
||||
|
||||
created = self.create_task(cloned)
|
||||
if task.id:
|
||||
labels = self.get_labels(task.id)
|
||||
for label in labels.labels:
|
||||
self.add_label(label.id, created.id)
|
||||
return created
|
||||
|
||||
def get_labels(self, task_id):
|
||||
"""List all labels for a task."""
|
||||
return self._service.get_tasks_id_labels(task_id=task_id)
|
||||
|
||||
def add_label(self, label_id: str, task_id: str) -> LabelResponse:
|
||||
"""Add a label to a task."""
|
||||
label_mapping = LabelMapping(label_id=label_id)
|
||||
return self._service.post_tasks_id_labels(task_id=task_id, label_mapping=label_mapping)
|
||||
|
||||
def delete_label(self, label_id: str, task_id: str):
|
||||
"""Delete a label from a task."""
|
||||
return self._service.delete_tasks_id_labels_id(task_id=task_id, label_id=label_id)
|
||||
|
||||
def get_members(self, task_id: str):
|
||||
"""List all task members."""
|
||||
return self._service.get_tasks_id_members(task_id=task_id).users
|
||||
|
||||
def add_member(self, member_id, task_id):
|
||||
"""Add a member to a task."""
|
||||
user = AddResourceMemberRequestBody(id=member_id)
|
||||
return self._service.post_tasks_id_members(task_id=task_id, add_resource_member_request_body=user)
|
||||
|
||||
def delete_member(self, member_id, task_id):
|
||||
"""Remove a member from a task."""
|
||||
return self._service.delete_tasks_id_members_id(user_id=member_id, task_id=task_id)
|
||||
|
||||
def get_owners(self, task_id):
|
||||
"""List all owners of a task."""
|
||||
return self._service.get_tasks_id_owners(task_id=task_id).users
|
||||
|
||||
def add_owner(self, owner_id, task_id):
|
||||
"""Add an owner to a task."""
|
||||
user = AddResourceMemberRequestBody(id=owner_id)
|
||||
return self._service.post_tasks_id_owners(task_id=task_id, add_resource_member_request_body=user)
|
||||
|
||||
def delete_owner(self, owner_id, task_id):
|
||||
"""Remove an owner from a task."""
|
||||
return self._service.delete_tasks_id_owners_id(user_id=owner_id, task_id=task_id)
|
||||
|
||||
def get_runs(self, task_id, **kwargs) -> List['Run']:
|
||||
"""
|
||||
Retrieve list of run records for a task.
|
||||
|
||||
:param task_id: task id
|
||||
:key str after: returns runs after specified ID
|
||||
:key int limit: the number of runs to return
|
||||
:key datetime after_time: filter runs to those scheduled after this time, RFC3339
|
||||
:key datetime before_time: filter runs to those scheduled before this time, RFC3339
|
||||
"""
|
||||
return self._service.get_tasks_id_runs(task_id=task_id, **kwargs).runs
|
||||
|
||||
def get_run(self, task_id: str, run_id: str) -> Run:
|
||||
"""
|
||||
Get run record for specific task and run id.
|
||||
|
||||
:param task_id: task id
|
||||
:param run_id: run id
|
||||
:return: Run for specified task and run id
|
||||
"""
|
||||
return self._service.get_tasks_id_runs_id(task_id=task_id, run_id=run_id)
|
||||
|
||||
def get_run_logs(self, task_id: str, run_id: str) -> List['LogEvent']:
|
||||
"""Retrieve all logs for a run."""
|
||||
return self._service.get_tasks_id_runs_id_logs(task_id=task_id, run_id=run_id).events
|
||||
|
||||
def run_manually(self, task_id: str, scheduled_for: datetime = None):
|
||||
"""
|
||||
Manually start a run of the task now overriding the current schedule.
|
||||
|
||||
:param task_id:
|
||||
:param scheduled_for: planned execution
|
||||
"""
|
||||
r = RunManually(scheduled_for=scheduled_for)
|
||||
return self._service.post_tasks_id_runs(task_id=task_id, run_manually=r)
|
||||
|
||||
def retry_run(self, task_id: str, run_id: str):
|
||||
"""
|
||||
Retry a task run.
|
||||
|
||||
:param task_id: task id
|
||||
:param run_id: run id
|
||||
"""
|
||||
return self._service.post_tasks_id_runs_id_retry(task_id=task_id, run_id=run_id)
|
||||
|
||||
def cancel_run(self, task_id: str, run_id: str):
|
||||
"""
|
||||
Cancel a currently running run.
|
||||
|
||||
:param task_id:
|
||||
:param run_id:
|
||||
"""
|
||||
return self._service.delete_tasks_id_runs_id(task_id=task_id, run_id=run_id)
|
||||
|
||||
def get_logs(self, task_id: str) -> List['LogEvent']:
|
||||
"""
|
||||
Retrieve all logs for a task.
|
||||
|
||||
:param task_id: task id
|
||||
"""
|
||||
return self._service.get_tasks_id_logs(task_id=task_id).events
|
||||
|
||||
def find_tasks_by_user(self, task_user_id):
|
||||
"""List all tasks by user."""
|
||||
return self.find_tasks(user=task_user_id)
|
||||
@@ -0,0 +1,80 @@
|
||||
"""
|
||||
Users are those with access to InfluxDB.
|
||||
|
||||
To grant a user permission to access data, add them as a member of an organization
|
||||
and provide them with an authentication token.
|
||||
"""
|
||||
|
||||
from typing import Union
|
||||
from influxdb_client import UsersService, User, Users, UserResponse, PasswordResetBody
|
||||
|
||||
|
||||
class UsersApi(object):
|
||||
"""Implementation for '/api/v2/users' endpoint."""
|
||||
|
||||
def __init__(self, influxdb_client):
|
||||
"""Initialize defaults."""
|
||||
self._influxdb_client = influxdb_client
|
||||
self._service = UsersService(influxdb_client.api_client)
|
||||
|
||||
def me(self) -> User:
|
||||
"""Return the current authenticated user."""
|
||||
user = self._service.get_me()
|
||||
return user
|
||||
|
||||
def create_user(self, name: str) -> User:
|
||||
"""Create a user."""
|
||||
user = User(name=name)
|
||||
|
||||
return self._service.post_users(user=user)
|
||||
|
||||
def update_user(self, user: User) -> UserResponse:
|
||||
"""Update a user.
|
||||
|
||||
:param user: User update to apply (required)
|
||||
:return: User
|
||||
"""
|
||||
return self._service.patch_users_id(user_id=user.id, user=user)
|
||||
|
||||
def update_password(self, user: Union[str, User, UserResponse], password: str) -> None:
|
||||
"""Update a password.
|
||||
|
||||
:param user: User to update password (required)
|
||||
:param password: New password (required)
|
||||
:return: None
|
||||
"""
|
||||
user_id = self._user_id(user)
|
||||
|
||||
return self._service.post_users_id_password(user_id=user_id, password_reset_body=PasswordResetBody(password))
|
||||
|
||||
def delete_user(self, user: Union[str, User, UserResponse]) -> None:
|
||||
"""Delete a user.
|
||||
|
||||
:param user: user id or User
|
||||
:return: None
|
||||
"""
|
||||
user_id = self._user_id(user)
|
||||
|
||||
return self._service.delete_users_id(user_id=user_id)
|
||||
|
||||
def find_users(self, **kwargs) -> Users:
|
||||
"""List all users.
|
||||
|
||||
:key int offset: The offset for pagination. The number of records to skip.
|
||||
:key int limit: Limits the number of records returned. Default is `20`.
|
||||
:key str after: The last resource ID from which to seek from (but not including).
|
||||
This is to be used instead of `offset`.
|
||||
:key str name: The user name.
|
||||
:key str id: The user ID.
|
||||
:return: Buckets
|
||||
"""
|
||||
return self._service.get_users(**kwargs)
|
||||
|
||||
def _user_id(self, user: Union[str, User, UserResponse]):
|
||||
if isinstance(user, User):
|
||||
user_id = user.id
|
||||
elif isinstance(user, UserResponse):
|
||||
user_id = user.id
|
||||
else:
|
||||
user_id = user
|
||||
return user_id
|
||||
@@ -0,0 +1 @@
|
||||
"""Utils package."""
|
||||
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
@@ -0,0 +1,101 @@
|
||||
"""Utils to get right Date parsing function."""
|
||||
import datetime
|
||||
from sys import version_info
|
||||
import threading
|
||||
from datetime import timezone as tz
|
||||
|
||||
from dateutil import parser
|
||||
|
||||
date_helper = None
|
||||
|
||||
lock_ = threading.Lock()
|
||||
|
||||
|
||||
class DateHelper:
|
||||
"""
|
||||
DateHelper to groups different implementations of date operations.
|
||||
|
||||
If you would like to serialize the query results to custom timezone, you can use following code:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
from influxdb_client.client.util import date_utils
|
||||
from influxdb_client.client.util.date_utils import DateHelper
|
||||
import dateutil.parser
|
||||
from dateutil import tz
|
||||
|
||||
def parse_date(date_string: str):
|
||||
return dateutil.parser.parse(date_string).astimezone(tz.gettz('ETC/GMT+2'))
|
||||
|
||||
date_utils.date_helper = DateHelper()
|
||||
date_utils.date_helper.parse_date = parse_date
|
||||
"""
|
||||
|
||||
def __init__(self, timezone: datetime.tzinfo = tz.utc) -> None:
|
||||
"""
|
||||
Initialize defaults.
|
||||
|
||||
:param timezone: Default timezone used for serialization "datetime" without "tzinfo".
|
||||
Default value is "UTC".
|
||||
"""
|
||||
self.timezone = timezone
|
||||
|
||||
def parse_date(self, date_string: str):
|
||||
"""
|
||||
Parse string into Date or Timestamp.
|
||||
|
||||
:return: Returns a :class:`datetime.datetime` object or compliant implementation
|
||||
like :class:`class 'pandas._libs.tslibs.timestamps.Timestamp`
|
||||
"""
|
||||
pass
|
||||
|
||||
def to_nanoseconds(self, delta):
|
||||
"""
|
||||
Get number of nanoseconds in timedelta.
|
||||
|
||||
Solution comes from v1 client. Thx.
|
||||
https://github.com/influxdata/influxdb-python/pull/811
|
||||
"""
|
||||
nanoseconds_in_days = delta.days * 86400 * 10 ** 9
|
||||
nanoseconds_in_seconds = delta.seconds * 10 ** 9
|
||||
nanoseconds_in_micros = delta.microseconds * 10 ** 3
|
||||
|
||||
return nanoseconds_in_days + nanoseconds_in_seconds + nanoseconds_in_micros
|
||||
|
||||
def to_utc(self, value: datetime):
|
||||
"""
|
||||
Convert datetime to UTC timezone.
|
||||
|
||||
:param value: datetime
|
||||
:return: datetime in UTC
|
||||
"""
|
||||
if not value.tzinfo:
|
||||
return self.to_utc(value.replace(tzinfo=self.timezone))
|
||||
else:
|
||||
return value.astimezone(tz.utc)
|
||||
|
||||
|
||||
def get_date_helper() -> DateHelper:
|
||||
"""
|
||||
Return DateHelper with proper implementation.
|
||||
|
||||
If there is a 'ciso8601' than use 'ciso8601.parse_datetime' else
|
||||
use 'datetime.fromisoformat' (Python >= 3.11) or 'dateutil.parse' (Python < 3.11).
|
||||
"""
|
||||
global date_helper
|
||||
if date_helper is None:
|
||||
with lock_:
|
||||
# avoid duplicate initialization
|
||||
if date_helper is None:
|
||||
_date_helper = DateHelper()
|
||||
try:
|
||||
import ciso8601
|
||||
_date_helper.parse_date = ciso8601.parse_datetime
|
||||
except ModuleNotFoundError:
|
||||
if (version_info.major, version_info.minor) >= (3, 11):
|
||||
_date_helper.parse_date = datetime.datetime.fromisoformat
|
||||
else:
|
||||
_date_helper.parse_date = parser.parse
|
||||
date_helper = _date_helper
|
||||
|
||||
return date_helper
|
||||
@@ -0,0 +1,15 @@
|
||||
"""Pandas date utils."""
|
||||
from influxdb_client.client.util.date_utils import DateHelper
|
||||
from influxdb_client.extras import pd
|
||||
|
||||
|
||||
class PandasDateTimeHelper(DateHelper):
|
||||
"""DateHelper that use Pandas library with nanosecond precision."""
|
||||
|
||||
def parse_date(self, date_string: str):
|
||||
"""Parse date string into `class 'pandas._libs.tslibs.timestamps.Timestamp`."""
|
||||
return pd.to_datetime(date_string)
|
||||
|
||||
def to_nanoseconds(self, delta):
|
||||
"""Get number of nanoseconds with nanos precision."""
|
||||
return super().to_nanoseconds(delta) + (delta.nanoseconds if hasattr(delta, 'nanoseconds') else 0)
|
||||
@@ -0,0 +1,50 @@
|
||||
"""Functions to share utility across client classes."""
|
||||
from influxdb_client.rest import ApiException
|
||||
|
||||
|
||||
def _is_id(value):
|
||||
"""
|
||||
Check if the value is valid InfluxDB ID.
|
||||
|
||||
:param value: to check
|
||||
:return: True if provided parameter is valid InfluxDB ID.
|
||||
"""
|
||||
if value and len(value) == 16:
|
||||
try:
|
||||
int(value, 16)
|
||||
return True
|
||||
except ValueError:
|
||||
return False
|
||||
return False
|
||||
|
||||
|
||||
def get_org_query_param(org, client, required_id=False):
|
||||
"""
|
||||
Get required type of Org query parameter.
|
||||
|
||||
:param str, Organization org: value provided as a parameter into API (optional)
|
||||
:param InfluxDBClient client: with default value for Org parameter
|
||||
:param bool required_id: true if the query param has to be a ID
|
||||
:return: request type of org query parameter or None
|
||||
"""
|
||||
_org = client.org if org is None else org
|
||||
if 'Organization' in type(_org).__name__:
|
||||
_org = _org.id
|
||||
if required_id and _org and not _is_id(_org):
|
||||
try:
|
||||
organizations = client.organizations_api().find_organizations(org=_org)
|
||||
if len(organizations) < 1:
|
||||
from influxdb_client.client.exceptions import InfluxDBError
|
||||
message = f"The client cannot find organization with name: '{_org}' " \
|
||||
"to determine their ID. Are you using token with sufficient permission?"
|
||||
raise InfluxDBError(response=None, message=message)
|
||||
return organizations[0].id
|
||||
except ApiException as e:
|
||||
if e.status == 404:
|
||||
from influxdb_client.client.exceptions import InfluxDBError
|
||||
message = f"The client cannot find organization with name: '{_org}' " \
|
||||
"to determine their ID."
|
||||
raise InfluxDBError(response=None, message=message)
|
||||
raise e
|
||||
|
||||
return _org
|
||||
@@ -0,0 +1,205 @@
|
||||
"""
|
||||
Helpers classes to make easier use the client in multiprocessing environment.
|
||||
|
||||
For more information how the multiprocessing works see Python's
|
||||
`reference docs <https://docs.python.org/3/library/multiprocessing.html>`_.
|
||||
"""
|
||||
import logging
|
||||
import multiprocessing
|
||||
|
||||
from influxdb_client import InfluxDBClient, WriteOptions
|
||||
from influxdb_client.client.exceptions import InfluxDBError
|
||||
|
||||
logger = logging.getLogger('influxdb_client.client.util.multiprocessing_helper')
|
||||
|
||||
|
||||
def _success_callback(conf: (str, str, str), data: str):
|
||||
"""Successfully writen batch."""
|
||||
logger.debug(f"Written batch: {conf}, data: {data}")
|
||||
|
||||
|
||||
def _error_callback(conf: (str, str, str), data: str, exception: InfluxDBError):
|
||||
"""Unsuccessfully writen batch."""
|
||||
logger.debug(f"Cannot write batch: {conf}, data: {data} due: {exception}")
|
||||
|
||||
|
||||
def _retry_callback(conf: (str, str, str), data: str, exception: InfluxDBError):
|
||||
"""Retryable error."""
|
||||
logger.debug(f"Retryable error occurs for batch: {conf}, data: {data} retry: {exception}")
|
||||
|
||||
|
||||
class _PoisonPill:
|
||||
"""To notify process to terminate."""
|
||||
|
||||
pass
|
||||
|
||||
|
||||
class MultiprocessingWriter(multiprocessing.Process):
|
||||
"""
|
||||
The Helper class to write data into InfluxDB in independent OS process.
|
||||
|
||||
Example:
|
||||
.. code-block:: python
|
||||
|
||||
from influxdb_client import WriteOptions
|
||||
from influxdb_client.client.util.multiprocessing_helper import MultiprocessingWriter
|
||||
|
||||
|
||||
def main():
|
||||
writer = MultiprocessingWriter(url="http://localhost:8086", token="my-token", org="my-org",
|
||||
write_options=WriteOptions(batch_size=100))
|
||||
writer.start()
|
||||
|
||||
for x in range(1, 1000):
|
||||
writer.write(bucket="my-bucket", record=f"mem,tag=a value={x}i {x}")
|
||||
|
||||
writer.__del__()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
|
||||
|
||||
How to use with context_manager:
|
||||
.. code-block:: python
|
||||
|
||||
from influxdb_client import WriteOptions
|
||||
from influxdb_client.client.util.multiprocessing_helper import MultiprocessingWriter
|
||||
|
||||
|
||||
def main():
|
||||
with MultiprocessingWriter(url="http://localhost:8086", token="my-token", org="my-org",
|
||||
write_options=WriteOptions(batch_size=100)) as writer:
|
||||
for x in range(1, 1000):
|
||||
writer.write(bucket="my-bucket", record=f"mem,tag=a value={x}i {x}")
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
|
||||
|
||||
How to handle batch events:
|
||||
.. code-block:: python
|
||||
|
||||
from influxdb_client import WriteOptions
|
||||
from influxdb_client.client.exceptions import InfluxDBError
|
||||
from influxdb_client.client.util.multiprocessing_helper import MultiprocessingWriter
|
||||
|
||||
|
||||
class BatchingCallback(object):
|
||||
|
||||
def success(self, conf: (str, str, str), data: str):
|
||||
print(f"Written batch: {conf}, data: {data}")
|
||||
|
||||
def error(self, conf: (str, str, str), data: str, exception: InfluxDBError):
|
||||
print(f"Cannot write batch: {conf}, data: {data} due: {exception}")
|
||||
|
||||
def retry(self, conf: (str, str, str), data: str, exception: InfluxDBError):
|
||||
print(f"Retryable error occurs for batch: {conf}, data: {data} retry: {exception}")
|
||||
|
||||
|
||||
def main():
|
||||
callback = BatchingCallback()
|
||||
with MultiprocessingWriter(url="http://localhost:8086", token="my-token", org="my-org",
|
||||
success_callback=callback.success,
|
||||
error_callback=callback.error,
|
||||
retry_callback=callback.retry) as writer:
|
||||
|
||||
for x in range(1, 1000):
|
||||
writer.write(bucket="my-bucket", record=f"mem,tag=a value={x}i {x}")
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
|
||||
|
||||
"""
|
||||
|
||||
__started__ = False
|
||||
__disposed__ = False
|
||||
|
||||
def __init__(self, **kwargs) -> None:
|
||||
"""
|
||||
Initialize defaults.
|
||||
|
||||
For more information how to initialize the writer see the examples above.
|
||||
|
||||
:param kwargs: arguments are passed into ``__init__`` function of ``InfluxDBClient`` and ``write_api``.
|
||||
"""
|
||||
multiprocessing.Process.__init__(self)
|
||||
self.kwargs = kwargs
|
||||
self.client = None
|
||||
self.write_api = None
|
||||
self.queue_ = multiprocessing.Manager().Queue()
|
||||
|
||||
def write(self, **kwargs) -> None:
|
||||
"""
|
||||
Append time-series data into underlying queue.
|
||||
|
||||
For more information how to pass arguments see the examples above.
|
||||
|
||||
:param kwargs: arguments are passed into ``write`` function of ``WriteApi``
|
||||
:return: None
|
||||
"""
|
||||
assert self.__disposed__ is False, 'Cannot write data: the writer is closed.'
|
||||
assert self.__started__ is True, 'Cannot write data: the writer is not started.'
|
||||
self.queue_.put(kwargs)
|
||||
|
||||
def run(self):
|
||||
"""Initialize ``InfluxDBClient`` and waits for data to writes into InfluxDB."""
|
||||
# Initialize Client and Write API
|
||||
self.client = InfluxDBClient(**self.kwargs)
|
||||
self.write_api = self.client.write_api(write_options=self.kwargs.get('write_options', WriteOptions()),
|
||||
success_callback=self.kwargs.get('success_callback', _success_callback),
|
||||
error_callback=self.kwargs.get('error_callback', _error_callback),
|
||||
retry_callback=self.kwargs.get('retry_callback', _retry_callback))
|
||||
# Infinite loop - until poison pill
|
||||
while True:
|
||||
next_record = self.queue_.get()
|
||||
if type(next_record) is _PoisonPill:
|
||||
# Poison pill means break the loop
|
||||
self.terminate()
|
||||
self.queue_.task_done()
|
||||
break
|
||||
self.write_api.write(**next_record)
|
||||
self.queue_.task_done()
|
||||
|
||||
def start(self) -> None:
|
||||
"""Start independent process for writing data into InfluxDB."""
|
||||
super().start()
|
||||
self.__started__ = True
|
||||
|
||||
def terminate(self) -> None:
|
||||
"""
|
||||
Cleanup resources in independent process.
|
||||
|
||||
This function **cannot be used** to terminate the ``MultiprocessingWriter``.
|
||||
If you want to finish your writes please call: ``__del__``.
|
||||
"""
|
||||
if self.write_api:
|
||||
logger.info("flushing data...")
|
||||
self.write_api.__del__()
|
||||
self.write_api = None
|
||||
if self.client:
|
||||
self.client.__del__()
|
||||
self.client = None
|
||||
logger.info("closed")
|
||||
|
||||
def __enter__(self):
|
||||
"""Enter the runtime context related to this object."""
|
||||
self.start()
|
||||
return self
|
||||
|
||||
def __exit__(self, exc_type, exc_value, traceback):
|
||||
"""Exit the runtime context related to this object."""
|
||||
self.__del__()
|
||||
|
||||
def __del__(self):
|
||||
"""Dispose the client and write_api."""
|
||||
if self.__started__:
|
||||
self.queue_.put(_PoisonPill())
|
||||
self.queue_.join()
|
||||
self.join()
|
||||
self.queue_ = None
|
||||
self.__started__ = False
|
||||
self.__disposed__ = True
|
||||
@@ -0,0 +1,52 @@
|
||||
"""The warnings message definition."""
|
||||
import warnings
|
||||
|
||||
|
||||
class MissingPivotFunction(UserWarning):
|
||||
"""User warning about missing pivot() function."""
|
||||
|
||||
@staticmethod
|
||||
def print_warning(query: str):
|
||||
"""Print warning about missing pivot() function and how to deal with that."""
|
||||
if 'fieldsAsCols' in query or 'pivot' in query:
|
||||
return
|
||||
|
||||
message = f"""The query doesn't contains the pivot() function.
|
||||
|
||||
The result will not be shaped to optimal processing by pandas.DataFrame. Use the pivot() function by:
|
||||
|
||||
{query} |> pivot(rowKey:["_time"], columnKey: ["_field"], valueColumn: "_value")
|
||||
|
||||
You can disable this warning by:
|
||||
import warnings
|
||||
from influxdb_client.client.warnings import MissingPivotFunction
|
||||
|
||||
warnings.simplefilter("ignore", MissingPivotFunction)
|
||||
|
||||
For more info see:
|
||||
- https://docs.influxdata.com/resources/videos/pivots-in-flux/
|
||||
- https://docs.influxdata.com/flux/latest/stdlib/universe/pivot/
|
||||
- https://docs.influxdata.com/flux/latest/stdlib/influxdata/influxdb/schema/fieldsascols/
|
||||
"""
|
||||
warnings.warn(message, MissingPivotFunction)
|
||||
|
||||
|
||||
class CloudOnlyWarning(UserWarning):
|
||||
"""User warning about availability only on the InfluxDB Cloud."""
|
||||
|
||||
@staticmethod
|
||||
def print_warning(api_name: str, doc_url: str):
|
||||
"""Print warning about availability only on the InfluxDB Cloud."""
|
||||
message = f"""The '{api_name}' is available only on the InfluxDB Cloud.
|
||||
|
||||
For more info see:
|
||||
- {doc_url}
|
||||
- https://docs.influxdata.com/influxdb/cloud/
|
||||
|
||||
You can disable this warning by:
|
||||
import warnings
|
||||
from influxdb_client.client.warnings import CloudOnlyWarning
|
||||
|
||||
warnings.simplefilter("ignore", CloudOnlyWarning)
|
||||
"""
|
||||
warnings.warn(message, CloudOnlyWarning)
|
||||
@@ -0,0 +1,56 @@
|
||||
# flake8: noqa
|
||||
|
||||
"""
|
||||
InfluxDB OSS API Service.
|
||||
|
||||
The InfluxDB v2 API provides a programmatic interface for all interactions with InfluxDB. Access the InfluxDB API using the `/api/v2/` endpoint. # noqa: E501
|
||||
|
||||
OpenAPI spec version: 2.0.0
|
||||
Generated by: https://openapi-generator.tech
|
||||
"""
|
||||
|
||||
|
||||
from __future__ import absolute_import
|
||||
|
||||
# import apis into api package
|
||||
from influxdb_client.service.authorizations_service import AuthorizationsService
|
||||
from influxdb_client.service.backup_service import BackupService
|
||||
from influxdb_client.service.bucket_schemas_service import BucketSchemasService
|
||||
from influxdb_client.service.buckets_service import BucketsService
|
||||
from influxdb_client.service.cells_service import CellsService
|
||||
from influxdb_client.service.checks_service import ChecksService
|
||||
from influxdb_client.service.config_service import ConfigService
|
||||
from influxdb_client.service.dbr_ps_service import DBRPsService
|
||||
from influxdb_client.service.dashboards_service import DashboardsService
|
||||
from influxdb_client.service.delete_service import DeleteService
|
||||
from influxdb_client.service.health_service import HealthService
|
||||
from influxdb_client.service.invokable_scripts_service import InvokableScriptsService
|
||||
from influxdb_client.service.labels_service import LabelsService
|
||||
from influxdb_client.service.legacy_authorizations_service import LegacyAuthorizationsService
|
||||
from influxdb_client.service.metrics_service import MetricsService
|
||||
from influxdb_client.service.notification_endpoints_service import NotificationEndpointsService
|
||||
from influxdb_client.service.notification_rules_service import NotificationRulesService
|
||||
from influxdb_client.service.organizations_service import OrganizationsService
|
||||
from influxdb_client.service.ping_service import PingService
|
||||
from influxdb_client.service.query_service import QueryService
|
||||
from influxdb_client.service.ready_service import ReadyService
|
||||
from influxdb_client.service.remote_connections_service import RemoteConnectionsService
|
||||
from influxdb_client.service.replications_service import ReplicationsService
|
||||
from influxdb_client.service.resources_service import ResourcesService
|
||||
from influxdb_client.service.restore_service import RestoreService
|
||||
from influxdb_client.service.routes_service import RoutesService
|
||||
from influxdb_client.service.rules_service import RulesService
|
||||
from influxdb_client.service.scraper_targets_service import ScraperTargetsService
|
||||
from influxdb_client.service.secrets_service import SecretsService
|
||||
from influxdb_client.service.setup_service import SetupService
|
||||
from influxdb_client.service.signin_service import SigninService
|
||||
from influxdb_client.service.signout_service import SignoutService
|
||||
from influxdb_client.service.sources_service import SourcesService
|
||||
from influxdb_client.service.tasks_service import TasksService
|
||||
from influxdb_client.service.telegraf_plugins_service import TelegrafPluginsService
|
||||
from influxdb_client.service.telegrafs_service import TelegrafsService
|
||||
from influxdb_client.service.templates_service import TemplatesService
|
||||
from influxdb_client.service.users_service import UsersService
|
||||
from influxdb_client.service.variables_service import VariablesService
|
||||
from influxdb_client.service.views_service import ViewsService
|
||||
from influxdb_client.service.write_service import WriteService
|
||||
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
@@ -0,0 +1,290 @@
|
||||
"""
|
||||
Functions for serialize Pandas DataFrame.
|
||||
|
||||
Much of the code here is inspired by that in the aioinflux packet found here: https://github.com/gusutabopb/aioinflux
|
||||
"""
|
||||
|
||||
import logging
|
||||
import math
|
||||
import re
|
||||
|
||||
from influxdb_client import WritePrecision
|
||||
from influxdb_client.client.write.point import _ESCAPE_KEY, _ESCAPE_STRING, _ESCAPE_MEASUREMENT, DEFAULT_WRITE_PRECISION
|
||||
|
||||
logger = logging.getLogger('influxdb_client.client.write.dataframe_serializer')
|
||||
|
||||
|
||||
def _itertuples(data_frame):
|
||||
cols = [data_frame.iloc[:, k] for k in range(len(data_frame.columns))]
|
||||
return zip(data_frame.index, *cols)
|
||||
|
||||
|
||||
class DataframeSerializer:
|
||||
"""Serialize DataFrame into LineProtocols."""
|
||||
|
||||
def __init__(self, data_frame, point_settings, precision=DEFAULT_WRITE_PRECISION, chunk_size: int = None,
|
||||
**kwargs) -> None:
|
||||
"""
|
||||
Init serializer.
|
||||
|
||||
:param data_frame: Pandas DataFrame to serialize
|
||||
:param point_settings: Default Tags
|
||||
:param precision: The precision for the unix timestamps within the body line-protocol.
|
||||
:param chunk_size: The size of chunk for serializing into chunks.
|
||||
:key data_frame_measurement_name: name of measurement for writing Pandas DataFrame
|
||||
:key data_frame_tag_columns: list of DataFrame columns which are tags, rest columns will be fields
|
||||
:key data_frame_timestamp_column: name of DataFrame column which contains a timestamp. The column can be defined as a :class:`~str` value
|
||||
formatted as `2018-10-26`, `2018-10-26 12:00`, `2018-10-26 12:00:00-05:00`
|
||||
or other formats and types supported by `pandas.to_datetime <https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.to_datetime.html#pandas.to_datetime>`_ - ``DataFrame``
|
||||
:key data_frame_timestamp_timezone: name of the timezone which is used for timestamp column - ``DataFrame``
|
||||
""" # noqa: E501
|
||||
# This function is hard to understand but for good reason:
|
||||
# the approach used here is considerably more efficient
|
||||
# than the alternatives.
|
||||
#
|
||||
# We build up a Python expression that efficiently converts a data point
|
||||
# tuple into line-protocol entry, and then evaluate the expression
|
||||
# as a lambda so that we can call it. This avoids the overhead of
|
||||
# invoking a function on every data value - we only have one function
|
||||
# call per row instead. The expression consists of exactly
|
||||
# one f-string, so we build up the parts of it as segments
|
||||
# that are concatenated together to make the full f-string inside
|
||||
# the lambda.
|
||||
#
|
||||
# Things are made a little more complex because fields and tags with NaN
|
||||
# values and empty tags are omitted from the generated line-protocol
|
||||
# output.
|
||||
#
|
||||
# As an example, say we have a data frame with two value columns:
|
||||
# a float
|
||||
# b int
|
||||
#
|
||||
# This will generate a lambda expression to be evaluated that looks like
|
||||
# this:
|
||||
#
|
||||
# lambda p: f"""{measurement_name} {keys[0]}={p[1]},{keys[1]}={p[2]}i {p[0].value}"""
|
||||
#
|
||||
# This lambda is then executed for each row p.
|
||||
#
|
||||
# When NaNs are present, the expression looks like this (split
|
||||
# across two lines to satisfy the code-style checker)
|
||||
#
|
||||
# lambda p: f"""{measurement_name} {"" if pd.isna(p[1])
|
||||
# else f"{keys[0]}={p[1]}"},{keys[1]}={p[2]}i {p[0].value}"""
|
||||
#
|
||||
# When there's a NaN value in column a, we'll end up with a comma at the start of the
|
||||
# fields, so we run a regexp substitution after generating the line-protocol entries
|
||||
# to remove this.
|
||||
#
|
||||
# We're careful to run these potentially costly extra steps only when NaN values actually
|
||||
# exist in the data.
|
||||
|
||||
from ...extras import pd, np
|
||||
if not isinstance(data_frame, pd.DataFrame):
|
||||
raise TypeError('Must be DataFrame, but type was: {0}.'
|
||||
.format(type(data_frame)))
|
||||
|
||||
data_frame_measurement_name = kwargs.get('data_frame_measurement_name')
|
||||
if data_frame_measurement_name is None:
|
||||
raise TypeError('"data_frame_measurement_name" is a Required Argument')
|
||||
|
||||
timestamp_column = kwargs.get('data_frame_timestamp_column', None)
|
||||
timestamp_timezone = kwargs.get('data_frame_timestamp_timezone', None)
|
||||
data_frame = data_frame.copy(deep=False)
|
||||
data_frame_timestamp = data_frame.index if timestamp_column is None else data_frame[timestamp_column]
|
||||
if isinstance(data_frame_timestamp, pd.PeriodIndex):
|
||||
data_frame_timestamp = data_frame_timestamp.to_timestamp()
|
||||
else:
|
||||
# TODO: this is almost certainly not what you want
|
||||
# when the index is the default RangeIndex.
|
||||
# Instead, it would probably be better to leave
|
||||
# out the timestamp unless a time column is explicitly
|
||||
# enabled.
|
||||
data_frame_timestamp = pd.to_datetime(data_frame_timestamp, unit=precision)
|
||||
|
||||
if timestamp_timezone:
|
||||
if isinstance(data_frame_timestamp, pd.DatetimeIndex):
|
||||
data_frame_timestamp = data_frame_timestamp.tz_localize(timestamp_timezone)
|
||||
else:
|
||||
data_frame_timestamp = data_frame_timestamp.dt.tz_localize(timestamp_timezone)
|
||||
|
||||
if hasattr(data_frame_timestamp, 'tzinfo') and data_frame_timestamp.tzinfo is None:
|
||||
data_frame_timestamp = data_frame_timestamp.tz_localize('UTC')
|
||||
if timestamp_column is None:
|
||||
data_frame.index = data_frame_timestamp
|
||||
else:
|
||||
data_frame[timestamp_column] = data_frame_timestamp
|
||||
|
||||
data_frame_tag_columns = kwargs.get('data_frame_tag_columns')
|
||||
data_frame_tag_columns = set(data_frame_tag_columns or [])
|
||||
|
||||
# keys holds a list of string keys.
|
||||
keys = []
|
||||
# tags holds a list of tag f-string segments ordered alphabetically by tag key.
|
||||
tags = []
|
||||
# fields holds a list of field f-string segments ordered alphebetically by field key
|
||||
fields = []
|
||||
# field_indexes holds the index into each row of all the fields.
|
||||
field_indexes = []
|
||||
|
||||
if point_settings.defaultTags:
|
||||
for key, value in point_settings.defaultTags.items():
|
||||
# Avoid overwriting existing data if there's a column
|
||||
# that already exists with the default tag's name.
|
||||
# Note: when a new column is added, the old DataFrame
|
||||
# that we've made a shallow copy of is unaffected.
|
||||
# TODO: when there are NaN or empty values in
|
||||
# the column, we could make a deep copy of the
|
||||
# data and fill in those values with the default tag value.
|
||||
if key not in data_frame.columns:
|
||||
data_frame[key] = value
|
||||
data_frame_tag_columns.add(key)
|
||||
|
||||
# Get a list of all the columns sorted by field/tag key.
|
||||
# We want to iterate through the columns in sorted order
|
||||
# so that we know when we're on the first field so we
|
||||
# can know whether a comma is needed for that
|
||||
# field.
|
||||
columns = sorted(enumerate(data_frame.dtypes.items()), key=lambda col: col[1][0])
|
||||
|
||||
# null_columns has a bool value for each column holding
|
||||
# whether that column contains any null (NaN or None) values.
|
||||
null_columns = data_frame.isnull().any()
|
||||
timestamp_index = 0
|
||||
|
||||
# Iterate through the columns building up the expression for each column.
|
||||
for index, (key, value) in columns:
|
||||
key = str(key)
|
||||
key_format = f'{{keys[{len(keys)}]}}'
|
||||
keys.append(key.translate(_ESCAPE_KEY))
|
||||
# The field index is one more than the column index because the
|
||||
# time index is at column zero in the finally zipped-together
|
||||
# result columns.
|
||||
field_index = index + 1
|
||||
val_format = f'p[{field_index}]'
|
||||
|
||||
if key in data_frame_tag_columns:
|
||||
# This column is a tag column.
|
||||
if null_columns.iloc[index]:
|
||||
key_value = f"""{{
|
||||
'' if {val_format} == '' or pd.isna({val_format}) else
|
||||
f',{key_format}={{str({val_format}).translate(_ESCAPE_STRING)}}'
|
||||
}}"""
|
||||
else:
|
||||
key_value = f',{key_format}={{str({val_format}).translate(_ESCAPE_KEY)}}'
|
||||
tags.append(key_value)
|
||||
continue
|
||||
elif timestamp_column is not None and key in timestamp_column:
|
||||
timestamp_index = field_index
|
||||
continue
|
||||
|
||||
# This column is a field column.
|
||||
# Note: no comma separator is needed for the first field.
|
||||
# It's important to omit it because when the first
|
||||
# field column has no nulls, we don't run the comma-removal
|
||||
# regexp substitution step.
|
||||
sep = '' if len(field_indexes) == 0 else ','
|
||||
if issubclass(value.type, np.integer) or issubclass(value.type, np.floating) or issubclass(value.type, np.bool_): # noqa: E501
|
||||
suffix = 'i' if issubclass(value.type, np.integer) else ''
|
||||
if null_columns.iloc[index]:
|
||||
field_value = f"""{{"" if pd.isna({val_format}) else f"{sep}{key_format}={{{val_format}}}{suffix}"}}""" # noqa: E501
|
||||
else:
|
||||
field_value = f"{sep}{key_format}={{{val_format}}}{suffix}"
|
||||
else:
|
||||
if null_columns.iloc[index]:
|
||||
field_value = f"""{{
|
||||
'' if pd.isna({val_format}) else
|
||||
f'{sep}{key_format}="{{str({val_format}).translate(_ESCAPE_STRING)}}"'
|
||||
}}"""
|
||||
else:
|
||||
field_value = f'''{sep}{key_format}="{{str({val_format}).translate(_ESCAPE_STRING)}}"'''
|
||||
field_indexes.append(field_index)
|
||||
fields.append(field_value)
|
||||
|
||||
measurement_name = str(data_frame_measurement_name).translate(_ESCAPE_MEASUREMENT)
|
||||
|
||||
tags = ''.join(tags)
|
||||
fields = ''.join(fields)
|
||||
timestamp = '{p[%s].value}' % timestamp_index
|
||||
if precision == WritePrecision.US:
|
||||
timestamp = '{int(p[%s].value / 1e3)}' % timestamp_index
|
||||
elif precision == WritePrecision.MS:
|
||||
timestamp = '{int(p[%s].value / 1e6)}' % timestamp_index
|
||||
elif precision == WritePrecision.S:
|
||||
timestamp = '{int(p[%s].value / 1e9)}' % timestamp_index
|
||||
|
||||
f = eval(f'lambda p: f"""{{measurement_name}}{tags} {fields} {timestamp}"""', {
|
||||
'measurement_name': measurement_name,
|
||||
'_ESCAPE_KEY': _ESCAPE_KEY,
|
||||
'_ESCAPE_STRING': _ESCAPE_STRING,
|
||||
'keys': keys,
|
||||
'pd': pd,
|
||||
})
|
||||
|
||||
for k, v in dict(data_frame.dtypes).items():
|
||||
if k in data_frame_tag_columns:
|
||||
data_frame = data_frame.replace({k: ''}, np.nan)
|
||||
|
||||
def _any_not_nan(p, indexes):
|
||||
return any(map(lambda x: not pd.isna(p[x]), indexes))
|
||||
|
||||
self.data_frame = data_frame
|
||||
self.f = f
|
||||
self.field_indexes = field_indexes
|
||||
self.first_field_maybe_null = null_columns.iloc[field_indexes[0] - 1]
|
||||
self._any_not_nan = _any_not_nan
|
||||
|
||||
#
|
||||
# prepare chunks
|
||||
#
|
||||
if chunk_size is not None:
|
||||
self.number_of_chunks = int(math.ceil(len(data_frame) / float(chunk_size)))
|
||||
self.chunk_size = chunk_size
|
||||
else:
|
||||
self.number_of_chunks = None
|
||||
|
||||
def serialize(self, chunk_idx: int = None):
|
||||
"""
|
||||
Serialize chunk into LineProtocols.
|
||||
|
||||
:param chunk_idx: The index of chunk to serialize. If `None` then serialize whole dataframe.
|
||||
"""
|
||||
if chunk_idx is None:
|
||||
chunk = self.data_frame
|
||||
else:
|
||||
logger.debug("Serialize chunk %s/%s ...", chunk_idx + 1, self.number_of_chunks)
|
||||
chunk = self.data_frame[chunk_idx * self.chunk_size:(chunk_idx + 1) * self.chunk_size]
|
||||
|
||||
if self.first_field_maybe_null:
|
||||
# When the first field is null (None/NaN), we'll have
|
||||
# a spurious leading comma which needs to be removed.
|
||||
lp = (re.sub('^(( |[^ ])* ),([a-zA-Z0-9])(.*)', '\\1\\3\\4', self.f(p))
|
||||
for p in filter(lambda x: self._any_not_nan(x, self.field_indexes), _itertuples(chunk)))
|
||||
return list(lp)
|
||||
else:
|
||||
return list(map(self.f, _itertuples(chunk)))
|
||||
|
||||
def number_of_chunks(self):
|
||||
"""
|
||||
Return the number of chunks.
|
||||
|
||||
:return: number of chunks or None if chunk_size is not specified.
|
||||
"""
|
||||
return self.number_of_chunks
|
||||
|
||||
|
||||
def data_frame_to_list_of_points(data_frame, point_settings, precision=DEFAULT_WRITE_PRECISION, **kwargs):
|
||||
"""
|
||||
Serialize DataFrame into LineProtocols.
|
||||
|
||||
:param data_frame: Pandas DataFrame to serialize
|
||||
:param point_settings: Default Tags
|
||||
:param precision: The precision for the unix timestamps within the body line-protocol.
|
||||
:key data_frame_measurement_name: name of measurement for writing Pandas DataFrame
|
||||
:key data_frame_tag_columns: list of DataFrame columns which are tags, rest columns will be fields
|
||||
:key data_frame_timestamp_column: name of DataFrame column which contains a timestamp. The column can be defined as a :class:`~str` value
|
||||
formatted as `2018-10-26`, `2018-10-26 12:00`, `2018-10-26 12:00:00-05:00`
|
||||
or other formats and types supported by `pandas.to_datetime <https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.to_datetime.html#pandas.to_datetime>`_ - ``DataFrame``
|
||||
:key data_frame_timestamp_timezone: name of the timezone which is used for timestamp column - ``DataFrame``
|
||||
""" # noqa: E501
|
||||
return DataframeSerializer(data_frame, point_settings, precision, **kwargs).serialize()
|
||||
@@ -0,0 +1,371 @@
|
||||
"""Point data structure to represent LineProtocol."""
|
||||
|
||||
import math
|
||||
import warnings
|
||||
from builtins import int
|
||||
from datetime import datetime, timedelta, timezone
|
||||
from decimal import Decimal
|
||||
from numbers import Integral
|
||||
|
||||
from influxdb_client.client.util.date_utils import get_date_helper
|
||||
from influxdb_client.domain.write_precision import WritePrecision
|
||||
|
||||
EPOCH = datetime.fromtimestamp(0, tz=timezone.utc)
|
||||
|
||||
DEFAULT_WRITE_PRECISION = WritePrecision.NS
|
||||
|
||||
_ESCAPE_MEASUREMENT = str.maketrans({
|
||||
',': r'\,',
|
||||
' ': r'\ ',
|
||||
'\n': r'\n',
|
||||
'\t': r'\t',
|
||||
'\r': r'\r',
|
||||
})
|
||||
|
||||
_ESCAPE_KEY = str.maketrans({
|
||||
',': r'\,',
|
||||
'=': r'\=',
|
||||
' ': r'\ ',
|
||||
'\n': r'\n',
|
||||
'\t': r'\t',
|
||||
'\r': r'\r',
|
||||
})
|
||||
|
||||
_ESCAPE_STRING = str.maketrans({
|
||||
'"': r'\"',
|
||||
'\\': r'\\',
|
||||
})
|
||||
|
||||
try:
|
||||
import numpy as np
|
||||
|
||||
_HAS_NUMPY = True
|
||||
except ModuleNotFoundError:
|
||||
_HAS_NUMPY = False
|
||||
|
||||
|
||||
class Point(object):
|
||||
"""
|
||||
Point defines the values that will be written to the database.
|
||||
|
||||
Ref: https://docs.influxdata.com/influxdb/latest/reference/key-concepts/data-elements/#point
|
||||
"""
|
||||
|
||||
@staticmethod
|
||||
def measurement(measurement):
|
||||
"""Create a new Point with specified measurement name."""
|
||||
p = Point(measurement)
|
||||
return p
|
||||
|
||||
@staticmethod
|
||||
def from_dict(dictionary: dict, write_precision: WritePrecision = DEFAULT_WRITE_PRECISION, **kwargs):
|
||||
"""
|
||||
Initialize point from 'dict' structure.
|
||||
|
||||
The expected dict structure is:
|
||||
- measurement
|
||||
- tags
|
||||
- fields
|
||||
- time
|
||||
|
||||
Example:
|
||||
.. code-block:: python
|
||||
|
||||
# Use default dictionary structure
|
||||
dict_structure = {
|
||||
"measurement": "h2o_feet",
|
||||
"tags": {"location": "coyote_creek"},
|
||||
"fields": {"water_level": 1.0},
|
||||
"time": 1
|
||||
}
|
||||
point = Point.from_dict(dict_structure, WritePrecision.NS)
|
||||
|
||||
Example:
|
||||
.. code-block:: python
|
||||
|
||||
# Use custom dictionary structure
|
||||
dictionary = {
|
||||
"name": "sensor_pt859",
|
||||
"location": "warehouse_125",
|
||||
"version": "2021.06.05.5874",
|
||||
"pressure": 125,
|
||||
"temperature": 10,
|
||||
"created": 1632208639,
|
||||
}
|
||||
point = Point.from_dict(dictionary,
|
||||
write_precision=WritePrecision.S,
|
||||
record_measurement_key="name",
|
||||
record_time_key="created",
|
||||
record_tag_keys=["location", "version"],
|
||||
record_field_keys=["pressure", "temperature"])
|
||||
|
||||
Int Types:
|
||||
The following example shows how to configure the types of integers fields.
|
||||
It is useful when you want to serialize integers always as ``float`` to avoid ``field type conflict``
|
||||
or use ``unsigned 64-bit integer`` as the type for serialization.
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
# Use custom dictionary structure
|
||||
dict_structure = {
|
||||
"measurement": "h2o_feet",
|
||||
"tags": {"location": "coyote_creek"},
|
||||
"fields": {
|
||||
"water_level": 1.0,
|
||||
"some_counter": 108913123234
|
||||
},
|
||||
"time": 1
|
||||
}
|
||||
|
||||
point = Point.from_dict(dict_structure, field_types={"some_counter": "uint"})
|
||||
|
||||
:param dictionary: dictionary for serialize into data Point
|
||||
:param write_precision: sets the precision for the supplied time values
|
||||
:key record_measurement_key: key of dictionary with specified measurement
|
||||
:key record_measurement_name: static measurement name for data Point
|
||||
:key record_time_key: key of dictionary with specified timestamp
|
||||
:key record_tag_keys: list of dictionary keys to use as a tag
|
||||
:key record_field_keys: list of dictionary keys to use as a field
|
||||
:key field_types: optional dictionary to specify types of serialized fields. Currently, is supported customization for integer types.
|
||||
Possible integers types:
|
||||
- ``int`` - serialize integers as "**Signed 64-bit integers**" - ``9223372036854775807i`` (default behaviour)
|
||||
- ``uint`` - serialize integers as "**Unsigned 64-bit integers**" - ``9223372036854775807u``
|
||||
- ``float`` - serialize integers as "**IEEE-754 64-bit floating-point numbers**". Useful for unify number types in your pipeline to avoid field type conflict - ``9223372036854775807``
|
||||
The ``field_types`` can be also specified as part of incoming dictionary. For more info see an example above.
|
||||
:return: new data point
|
||||
""" # noqa: E501
|
||||
measurement_ = kwargs.get('record_measurement_name', None)
|
||||
if measurement_ is None:
|
||||
measurement_ = dictionary[kwargs.get('record_measurement_key', 'measurement')]
|
||||
point = Point(measurement_)
|
||||
|
||||
record_tag_keys = kwargs.get('record_tag_keys', None)
|
||||
if record_tag_keys is not None:
|
||||
for tag_key in record_tag_keys:
|
||||
if tag_key in dictionary:
|
||||
point.tag(tag_key, dictionary[tag_key])
|
||||
elif 'tags' in dictionary:
|
||||
for tag_key, tag_value in dictionary['tags'].items():
|
||||
point.tag(tag_key, tag_value)
|
||||
|
||||
record_field_keys = kwargs.get('record_field_keys', None)
|
||||
if record_field_keys is not None:
|
||||
for field_key in record_field_keys:
|
||||
if field_key in dictionary:
|
||||
point.field(field_key, dictionary[field_key])
|
||||
else:
|
||||
for field_key, field_value in dictionary['fields'].items():
|
||||
point.field(field_key, field_value)
|
||||
|
||||
record_time_key = kwargs.get('record_time_key', 'time')
|
||||
if record_time_key in dictionary:
|
||||
point.time(dictionary[record_time_key], write_precision=write_precision)
|
||||
|
||||
_field_types = kwargs.get('field_types', {})
|
||||
if 'field_types' in dictionary:
|
||||
_field_types = dictionary['field_types']
|
||||
# Map API fields types to Line Protocol types postfix:
|
||||
# - int: 'i'
|
||||
# - uint: 'u'
|
||||
# - float: ''
|
||||
point._field_types = dict(map(
|
||||
lambda item: (item[0], 'i' if item[1] == 'int' else 'u' if item[1] == 'uint' else ''),
|
||||
_field_types.items()
|
||||
))
|
||||
|
||||
return point
|
||||
|
||||
def __init__(self, measurement_name):
|
||||
"""Initialize defaults."""
|
||||
self._tags = {}
|
||||
self._fields = {}
|
||||
self._name = measurement_name
|
||||
self._time = None
|
||||
self._write_precision = DEFAULT_WRITE_PRECISION
|
||||
self._field_types = {}
|
||||
|
||||
def time(self, time, write_precision=DEFAULT_WRITE_PRECISION):
|
||||
"""
|
||||
Specify timestamp for DataPoint with declared precision.
|
||||
|
||||
If time doesn't have specified timezone we assume that timezone is UTC.
|
||||
|
||||
Examples::
|
||||
Point.measurement("h2o").field("val", 1).time("2009-11-10T23:00:00.123456Z")
|
||||
Point.measurement("h2o").field("val", 1).time(1257894000123456000)
|
||||
Point.measurement("h2o").field("val", 1).time(datetime(2009, 11, 10, 23, 0, 0, 123456))
|
||||
Point.measurement("h2o").field("val", 1).time(1257894000123456000, write_precision=WritePrecision.NS)
|
||||
|
||||
|
||||
:param time: the timestamp for your data
|
||||
:param write_precision: sets the precision for the supplied time values
|
||||
:return: this point
|
||||
"""
|
||||
self._write_precision = write_precision
|
||||
self._time = time
|
||||
return self
|
||||
|
||||
def tag(self, key, value):
|
||||
"""Add tag with key and value."""
|
||||
self._tags[key] = value
|
||||
return self
|
||||
|
||||
def field(self, field, value):
|
||||
"""Add field with key and value."""
|
||||
self._fields[field] = value
|
||||
return self
|
||||
|
||||
def to_line_protocol(self, precision=None):
|
||||
"""
|
||||
Create LineProtocol.
|
||||
|
||||
:param precision: required precision of LineProtocol. If it's not set then use the precision from ``Point``.
|
||||
"""
|
||||
_measurement = _escape_key(self._name, _ESCAPE_MEASUREMENT)
|
||||
if _measurement.startswith("#"):
|
||||
message = f"""The measurement name '{_measurement}' start with '#'.
|
||||
|
||||
The output Line protocol will be interpret as a comment by InfluxDB. For more info see:
|
||||
- https://docs.influxdata.com/influxdb/latest/reference/syntax/line-protocol/#comments
|
||||
"""
|
||||
warnings.warn(message, SyntaxWarning)
|
||||
_tags = _append_tags(self._tags)
|
||||
_fields = _append_fields(self._fields, self._field_types)
|
||||
if not _fields:
|
||||
return ""
|
||||
_time = _append_time(self._time, self._write_precision if precision is None else precision)
|
||||
|
||||
return f"{_measurement}{_tags}{_fields}{_time}"
|
||||
|
||||
@property
|
||||
def write_precision(self):
|
||||
"""Get precision."""
|
||||
return self._write_precision
|
||||
|
||||
@classmethod
|
||||
def set_str_rep(cls, rep_function):
|
||||
"""Set the string representation for all Points."""
|
||||
cls.__str___rep = rep_function
|
||||
|
||||
def __str__(self):
|
||||
"""Create string representation of this Point."""
|
||||
return self.to_line_protocol()
|
||||
|
||||
def __eq__(self, other):
|
||||
"""Return true iff other is equal to self."""
|
||||
if not isinstance(other, Point):
|
||||
return False
|
||||
# assume points are equal iff their instance fields are equal
|
||||
return (self._tags == other._tags and
|
||||
self._fields == other._fields and
|
||||
self._name == other._name and
|
||||
self._time == other._time and
|
||||
self._write_precision == other._write_precision and
|
||||
self._field_types == other._field_types)
|
||||
|
||||
|
||||
def _append_tags(tags):
|
||||
_return = []
|
||||
for tag_key, tag_value in sorted(tags.items()):
|
||||
|
||||
if tag_value is None:
|
||||
continue
|
||||
|
||||
tag = _escape_key(tag_key)
|
||||
value = _escape_tag_value(tag_value)
|
||||
if tag != '' and value != '':
|
||||
_return.append(f'{tag}={value}')
|
||||
|
||||
return f"{',' if _return else ''}{','.join(_return)} "
|
||||
|
||||
|
||||
def _append_fields(fields, field_types):
|
||||
_return = []
|
||||
|
||||
for field, value in sorted(fields.items()):
|
||||
if value is None:
|
||||
continue
|
||||
|
||||
if isinstance(value, float) or isinstance(value, Decimal) or _np_is_subtype(value, 'float'):
|
||||
if not math.isfinite(value):
|
||||
continue
|
||||
s = str(value)
|
||||
# It's common to represent whole numbers as floats
|
||||
# and the trailing ".0" that Python produces is unnecessary
|
||||
# in line-protocol, inconsistent with other line-protocol encoders,
|
||||
# and takes more space than needed, so trim it off.
|
||||
if s.endswith('.0'):
|
||||
s = s[:-2]
|
||||
_return.append(f'{_escape_key(field)}={s}')
|
||||
elif (isinstance(value, int) or _np_is_subtype(value, 'int')) and not isinstance(value, bool):
|
||||
_type = field_types.get(field, "i")
|
||||
_return.append(f'{_escape_key(field)}={str(value)}{_type}')
|
||||
elif isinstance(value, bool):
|
||||
_return.append(f'{_escape_key(field)}={str(value).lower()}')
|
||||
elif isinstance(value, str):
|
||||
_return.append(f'{_escape_key(field)}="{_escape_string(value)}"')
|
||||
else:
|
||||
raise ValueError(f'Type: "{type(value)}" of field: "{field}" is not supported.')
|
||||
|
||||
return f"{','.join(_return)}"
|
||||
|
||||
|
||||
def _append_time(time, write_precision) -> str:
|
||||
if time is None:
|
||||
return ''
|
||||
return f" {int(_convert_timestamp(time, write_precision))}"
|
||||
|
||||
|
||||
def _escape_key(tag, escape_list=None) -> str:
|
||||
if escape_list is None:
|
||||
escape_list = _ESCAPE_KEY
|
||||
return str(tag).translate(escape_list)
|
||||
|
||||
|
||||
def _escape_tag_value(value) -> str:
|
||||
ret = _escape_key(value)
|
||||
if ret.endswith('\\'):
|
||||
ret += ' '
|
||||
return ret
|
||||
|
||||
|
||||
def _escape_string(value) -> str:
|
||||
return str(value).translate(_ESCAPE_STRING)
|
||||
|
||||
|
||||
def _convert_timestamp(timestamp, precision=DEFAULT_WRITE_PRECISION):
|
||||
date_helper = get_date_helper()
|
||||
if isinstance(timestamp, Integral):
|
||||
return timestamp # assume precision is correct if timestamp is int
|
||||
|
||||
if isinstance(timestamp, str):
|
||||
timestamp = date_helper.parse_date(timestamp)
|
||||
|
||||
if isinstance(timestamp, timedelta) or isinstance(timestamp, datetime):
|
||||
|
||||
if isinstance(timestamp, datetime):
|
||||
timestamp = date_helper.to_utc(timestamp) - EPOCH
|
||||
|
||||
ns = date_helper.to_nanoseconds(timestamp)
|
||||
|
||||
if precision is None or precision == WritePrecision.NS:
|
||||
return ns
|
||||
elif precision == WritePrecision.US:
|
||||
return ns / 1e3
|
||||
elif precision == WritePrecision.MS:
|
||||
return ns / 1e6
|
||||
elif precision == WritePrecision.S:
|
||||
return ns / 1e9
|
||||
|
||||
raise ValueError(timestamp)
|
||||
|
||||
|
||||
def _np_is_subtype(value, np_type):
|
||||
if not _HAS_NUMPY or not hasattr(value, 'dtype'):
|
||||
return False
|
||||
|
||||
if np_type == 'float':
|
||||
return np.issubdtype(value, np.floating)
|
||||
elif np_type == 'int':
|
||||
return np.issubdtype(value, np.integer)
|
||||
return False
|
||||
@@ -0,0 +1,148 @@
|
||||
"""Implementation for Retry strategy during HTTP requests."""
|
||||
|
||||
import logging
|
||||
from datetime import datetime, timedelta
|
||||
from itertools import takewhile
|
||||
from random import random
|
||||
from typing import Callable
|
||||
|
||||
from urllib3 import Retry
|
||||
from urllib3.exceptions import MaxRetryError, ResponseError
|
||||
|
||||
from influxdb_client.client.exceptions import InfluxDBError
|
||||
|
||||
logger = logging.getLogger('influxdb_client.client.write.retry')
|
||||
|
||||
|
||||
class WritesRetry(Retry):
|
||||
"""
|
||||
Writes retry configuration.
|
||||
|
||||
The next delay is computed as random value between range
|
||||
`retry_interval * exponential_base^(attempts-1)` and `retry_interval * exponential_base^(attempts)
|
||||
|
||||
Example:
|
||||
for retry_interval=5, exponential_base=2, max_retry_delay=125, total=5
|
||||
retry delays are random distributed values within the ranges of
|
||||
[5-10, 10-20, 20-40, 40-80, 80-125]
|
||||
"""
|
||||
|
||||
def __init__(self, jitter_interval=0, max_retry_delay=125, exponential_base=2, max_retry_time=180, total=5,
|
||||
retry_interval=5, retry_callback: Callable[[Exception], int] = None, **kw):
|
||||
"""
|
||||
Initialize defaults.
|
||||
|
||||
:param int jitter_interval: random milliseconds when retrying writes
|
||||
:param num max_retry_delay: maximum delay when retrying write in seconds
|
||||
:param int max_retry_time: maximum total retry timeout in seconds,
|
||||
attempt after this timout throws MaxRetryError
|
||||
:param int total: maximum number of retries
|
||||
:param num retry_interval: initial first retry delay range in seconds
|
||||
:param int exponential_base: base for the exponential retry delay,
|
||||
:param Callable[[Exception], int] retry_callback: the callable ``callback`` to run after retryable
|
||||
error occurred.
|
||||
The callable must accept one argument:
|
||||
- `Exception`: an retryable error
|
||||
"""
|
||||
super().__init__(**kw)
|
||||
self.jitter_interval = jitter_interval
|
||||
self.total = total
|
||||
self.retry_interval = retry_interval
|
||||
self.max_retry_delay = max_retry_delay
|
||||
self.max_retry_time = max_retry_time
|
||||
self.exponential_base = exponential_base
|
||||
self.retry_timeout = datetime.now() + timedelta(seconds=max_retry_time)
|
||||
self.retry_callback = retry_callback
|
||||
|
||||
def new(self, **kw):
|
||||
"""Initialize defaults."""
|
||||
if 'jitter_interval' not in kw:
|
||||
kw['jitter_interval'] = self.jitter_interval
|
||||
if 'retry_interval' not in kw:
|
||||
kw['retry_interval'] = self.retry_interval
|
||||
if 'max_retry_delay' not in kw:
|
||||
kw['max_retry_delay'] = self.max_retry_delay
|
||||
if 'max_retry_time' not in kw:
|
||||
kw['max_retry_time'] = self.max_retry_time
|
||||
if 'exponential_base' not in kw:
|
||||
kw['exponential_base'] = self.exponential_base
|
||||
if 'retry_callback' not in kw:
|
||||
kw['retry_callback'] = self.retry_callback
|
||||
|
||||
new = super().new(**kw)
|
||||
new.retry_timeout = self.retry_timeout
|
||||
return new
|
||||
|
||||
def is_retry(self, method, status_code, has_retry_after=False):
|
||||
"""is_retry doesn't require retry_after header. If there is not Retry-After we will use backoff."""
|
||||
if not self._is_method_retryable(method):
|
||||
return False
|
||||
|
||||
return self.total and (status_code >= 429)
|
||||
|
||||
def get_backoff_time(self):
|
||||
"""Variant of exponential backoff with initial and max delay and a random jitter delay."""
|
||||
# We want to consider only the last consecutive errors sequence (Ignore redirects).
|
||||
consecutive_errors_len = len(
|
||||
list(
|
||||
takewhile(lambda x: x.redirect_location is None, reversed(self.history))
|
||||
)
|
||||
)
|
||||
# First fail doesn't increase backoff
|
||||
consecutive_errors_len -= 1
|
||||
if consecutive_errors_len < 0:
|
||||
return 0
|
||||
|
||||
range_start = self.retry_interval
|
||||
range_stop = self.retry_interval * self.exponential_base
|
||||
|
||||
i = 1
|
||||
while i <= consecutive_errors_len:
|
||||
i += 1
|
||||
range_start = range_stop
|
||||
range_stop = range_stop * self.exponential_base
|
||||
if range_stop > self.max_retry_delay:
|
||||
break
|
||||
|
||||
if range_stop > self.max_retry_delay:
|
||||
range_stop = self.max_retry_delay
|
||||
|
||||
return range_start + (range_stop - range_start) * self._random()
|
||||
|
||||
def get_retry_after(self, response):
|
||||
"""Get the value of Retry-After header and append random jitter delay."""
|
||||
retry_after = super().get_retry_after(response)
|
||||
if retry_after:
|
||||
retry_after += self._jitter_delay()
|
||||
return retry_after
|
||||
|
||||
def increment(self, method=None, url=None, response=None, error=None, _pool=None, _stacktrace=None):
|
||||
"""Return a new Retry object with incremented retry counters."""
|
||||
if self.retry_timeout < datetime.now():
|
||||
raise MaxRetryError(_pool, url, error or ResponseError("max_retry_time exceeded"))
|
||||
|
||||
new_retry = super().increment(method, url, response, error, _pool, _stacktrace)
|
||||
|
||||
if response is not None:
|
||||
parsed_error = InfluxDBError(response=response)
|
||||
elif error is not None:
|
||||
parsed_error = error
|
||||
else:
|
||||
parsed_error = f"Failed request to: {url}"
|
||||
|
||||
message = f"The retriable error occurred during request. Reason: '{parsed_error}'."
|
||||
if isinstance(parsed_error, InfluxDBError):
|
||||
message += f" Retry in {parsed_error.retry_after}s."
|
||||
|
||||
if self.retry_callback:
|
||||
self.retry_callback(parsed_error)
|
||||
|
||||
logger.warning(message)
|
||||
|
||||
return new_retry
|
||||
|
||||
def _jitter_delay(self):
|
||||
return self.jitter_interval * random()
|
||||
|
||||
def _random(self):
|
||||
return random()
|
||||
@@ -0,0 +1,587 @@
|
||||
"""Collect and write time series data to InfluxDB Cloud or InfluxDB OSS."""
|
||||
|
||||
# coding: utf-8
|
||||
import logging
|
||||
import os
|
||||
import warnings
|
||||
from collections import defaultdict
|
||||
from datetime import timedelta
|
||||
from enum import Enum
|
||||
from random import random
|
||||
from time import sleep
|
||||
from typing import Union, Any, Iterable, NamedTuple
|
||||
|
||||
import reactivex as rx
|
||||
from reactivex import operators as ops, Observable
|
||||
from reactivex.scheduler import ThreadPoolScheduler
|
||||
from reactivex.subject import Subject
|
||||
|
||||
from influxdb_client import WritePrecision
|
||||
from influxdb_client.client._base import _BaseWriteApi, _HAS_DATACLASS
|
||||
from influxdb_client.client.util.helpers import get_org_query_param
|
||||
from influxdb_client.client.write.dataframe_serializer import DataframeSerializer
|
||||
from influxdb_client.client.write.point import Point, DEFAULT_WRITE_PRECISION
|
||||
from influxdb_client.client.write.retry import WritesRetry
|
||||
from influxdb_client.rest import _UTF_8_encoding
|
||||
|
||||
logger = logging.getLogger('influxdb_client.client.write_api')
|
||||
|
||||
|
||||
if _HAS_DATACLASS:
|
||||
import dataclasses
|
||||
from dataclasses import dataclass
|
||||
|
||||
|
||||
class WriteType(Enum):
|
||||
"""Configuration which type of writes will client use."""
|
||||
|
||||
batching = 1
|
||||
asynchronous = 2
|
||||
synchronous = 3
|
||||
|
||||
|
||||
class WriteOptions(object):
|
||||
"""Write configuration."""
|
||||
|
||||
def __init__(self, write_type: WriteType = WriteType.batching,
|
||||
batch_size=1_000, flush_interval=1_000,
|
||||
jitter_interval=0,
|
||||
retry_interval=5_000,
|
||||
max_retries=5,
|
||||
max_retry_delay=125_000,
|
||||
max_retry_time=180_000,
|
||||
exponential_base=2,
|
||||
max_close_wait=300_000,
|
||||
write_scheduler=ThreadPoolScheduler(max_workers=1)) -> None:
|
||||
"""
|
||||
Create write api configuration.
|
||||
|
||||
:param write_type: methods of write (batching, asynchronous, synchronous)
|
||||
:param batch_size: the number of data point to collect in batch
|
||||
:param flush_interval: flush data at least in this interval (milliseconds)
|
||||
:param jitter_interval: this is primarily to avoid large write spikes for users running a large number of
|
||||
client instances ie, a jitter of 5s and flush duration 10s means flushes will happen every 10-15s
|
||||
(milliseconds)
|
||||
:param retry_interval: the time to wait before retry unsuccessful write (milliseconds)
|
||||
:param max_retries: the number of max retries when write fails, 0 means retry is disabled
|
||||
:param max_retry_delay: the maximum delay between each retry attempt in milliseconds
|
||||
:param max_retry_time: total timeout for all retry attempts in milliseconds, if 0 retry is disabled
|
||||
:param exponential_base: base for the exponential retry delay
|
||||
:parama max_close_wait: the maximum time to wait for writes to be flushed if close() is called
|
||||
:param write_scheduler:
|
||||
"""
|
||||
self.write_type = write_type
|
||||
self.batch_size = batch_size
|
||||
self.flush_interval = flush_interval
|
||||
self.jitter_interval = jitter_interval
|
||||
self.retry_interval = retry_interval
|
||||
self.max_retries = max_retries
|
||||
self.max_retry_delay = max_retry_delay
|
||||
self.max_retry_time = max_retry_time
|
||||
self.exponential_base = exponential_base
|
||||
self.write_scheduler = write_scheduler
|
||||
self.max_close_wait = max_close_wait
|
||||
|
||||
def to_retry_strategy(self, **kwargs):
|
||||
"""
|
||||
Create a Retry strategy from write options.
|
||||
|
||||
:key retry_callback: The callable ``callback`` to run after retryable error occurred.
|
||||
The callable must accept one argument:
|
||||
- `Exception`: an retryable error
|
||||
"""
|
||||
return WritesRetry(
|
||||
total=self.max_retries,
|
||||
retry_interval=self.retry_interval / 1_000,
|
||||
jitter_interval=self.jitter_interval / 1_000,
|
||||
max_retry_delay=self.max_retry_delay / 1_000,
|
||||
max_retry_time=self.max_retry_time / 1_000,
|
||||
exponential_base=self.exponential_base,
|
||||
retry_callback=kwargs.get("retry_callback", None),
|
||||
allowed_methods=["POST"])
|
||||
|
||||
def __getstate__(self):
|
||||
"""Return a dict of attributes that you want to pickle."""
|
||||
state = self.__dict__.copy()
|
||||
# Remove write scheduler
|
||||
del state['write_scheduler']
|
||||
return state
|
||||
|
||||
def __setstate__(self, state):
|
||||
"""Set your object with the provided dict."""
|
||||
self.__dict__.update(state)
|
||||
# Init default write Scheduler
|
||||
self.write_scheduler = ThreadPoolScheduler(max_workers=1)
|
||||
|
||||
|
||||
SYNCHRONOUS = WriteOptions(write_type=WriteType.synchronous)
|
||||
ASYNCHRONOUS = WriteOptions(write_type=WriteType.asynchronous)
|
||||
|
||||
|
||||
class PointSettings(object):
|
||||
"""Settings to store default tags."""
|
||||
|
||||
def __init__(self, **default_tags) -> None:
|
||||
"""
|
||||
Create point settings for write api.
|
||||
|
||||
:param default_tags: Default tags which will be added to each point written by api.
|
||||
"""
|
||||
self.defaultTags = dict()
|
||||
|
||||
for key, val in default_tags.items():
|
||||
self.add_default_tag(key, val)
|
||||
|
||||
@staticmethod
|
||||
def _get_value(value):
|
||||
|
||||
if value.startswith("${env."):
|
||||
return os.environ.get(value[6:-1])
|
||||
|
||||
return value
|
||||
|
||||
def add_default_tag(self, key, value) -> None:
|
||||
"""Add new default tag with key and value."""
|
||||
self.defaultTags[key] = self._get_value(value)
|
||||
|
||||
|
||||
class _BatchItemKey(object):
|
||||
def __init__(self, bucket, org, precision=DEFAULT_WRITE_PRECISION) -> None:
|
||||
self.bucket = bucket
|
||||
self.org = org
|
||||
self.precision = precision
|
||||
pass
|
||||
|
||||
def __hash__(self) -> int:
|
||||
return hash((self.bucket, self.org, self.precision))
|
||||
|
||||
def __eq__(self, o: object) -> bool:
|
||||
return isinstance(o, self.__class__) \
|
||||
and self.bucket == o.bucket and self.org == o.org and self.precision == o.precision
|
||||
|
||||
def __str__(self) -> str:
|
||||
return '_BatchItemKey[bucket:\'{}\', org:\'{}\', precision:\'{}\']' \
|
||||
.format(str(self.bucket), str(self.org), str(self.precision))
|
||||
|
||||
|
||||
class _BatchItem(object):
|
||||
def __init__(self, key: _BatchItemKey, data, size=1) -> None:
|
||||
self.key = key
|
||||
self.data = data
|
||||
self.size = size
|
||||
pass
|
||||
|
||||
def to_key_tuple(self) -> (str, str, str):
|
||||
return self.key.bucket, self.key.org, self.key.precision
|
||||
|
||||
def __str__(self) -> str:
|
||||
return '_BatchItem[key:\'{}\', size: \'{}\']' \
|
||||
.format(str(self.key), str(self.size))
|
||||
|
||||
|
||||
class _BatchResponse(object):
|
||||
def __init__(self, data: _BatchItem, exception: Exception = None):
|
||||
self.data = data
|
||||
self.exception = exception
|
||||
pass
|
||||
|
||||
def __str__(self) -> str:
|
||||
return '_BatchResponse[status:\'{}\', \'{}\']' \
|
||||
.format("failed" if self.exception else "success", str(self.data))
|
||||
|
||||
|
||||
def _body_reduce(batch_items):
|
||||
return b'\n'.join(map(lambda batch_item: batch_item.data, batch_items))
|
||||
|
||||
|
||||
class WriteApi(_BaseWriteApi):
|
||||
"""
|
||||
Implementation for '/api/v2/write' endpoint.
|
||||
|
||||
Example:
|
||||
.. code-block:: python
|
||||
|
||||
from influxdb_client import InfluxDBClient
|
||||
from influxdb_client.client.write_api import SYNCHRONOUS
|
||||
|
||||
|
||||
# Initialize SYNCHRONOUS instance of WriteApi
|
||||
with InfluxDBClient(url="http://localhost:8086", token="my-token", org="my-org") as client:
|
||||
write_api = client.write_api(write_options=SYNCHRONOUS)
|
||||
"""
|
||||
|
||||
def __init__(self,
|
||||
influxdb_client,
|
||||
write_options: WriteOptions = WriteOptions(),
|
||||
point_settings: PointSettings = PointSettings(),
|
||||
**kwargs) -> None:
|
||||
"""
|
||||
Initialize defaults.
|
||||
|
||||
:param influxdb_client: with default settings (organization)
|
||||
:param write_options: write api configuration
|
||||
:param point_settings: settings to store default tags.
|
||||
:key success_callback: The callable ``callback`` to run after successfully writen a batch.
|
||||
|
||||
The callable must accept two arguments:
|
||||
- `Tuple`: ``(bucket, organization, precision)``
|
||||
- `str`: written data
|
||||
|
||||
**[batching mode]**
|
||||
:key error_callback: The callable ``callback`` to run after unsuccessfully writen a batch.
|
||||
|
||||
The callable must accept three arguments:
|
||||
- `Tuple`: ``(bucket, organization, precision)``
|
||||
- `str`: written data
|
||||
- `Exception`: an occurred error
|
||||
|
||||
**[batching mode]**
|
||||
:key retry_callback: The callable ``callback`` to run after retryable error occurred.
|
||||
|
||||
The callable must accept three arguments:
|
||||
- `Tuple`: ``(bucket, organization, precision)``
|
||||
- `str`: written data
|
||||
- `Exception`: an retryable error
|
||||
|
||||
**[batching mode]**
|
||||
"""
|
||||
super().__init__(influxdb_client=influxdb_client, point_settings=point_settings)
|
||||
self._write_options = write_options
|
||||
self._success_callback = kwargs.get('success_callback', None)
|
||||
self._error_callback = kwargs.get('error_callback', None)
|
||||
self._retry_callback = kwargs.get('retry_callback', None)
|
||||
self._window_scheduler = None
|
||||
|
||||
if self._write_options.write_type is WriteType.batching:
|
||||
# Define Subject that listen incoming data and produces writes into InfluxDB
|
||||
self._subject = Subject()
|
||||
|
||||
self._window_scheduler = ThreadPoolScheduler(1)
|
||||
self._disposable = self._subject.pipe(
|
||||
# Split incoming data to windows by batch_size or flush_interval
|
||||
ops.window_with_time_or_count(count=write_options.batch_size,
|
||||
timespan=timedelta(milliseconds=write_options.flush_interval),
|
||||
scheduler=self._window_scheduler),
|
||||
# Map window into groups defined by 'organization', 'bucket' and 'precision'
|
||||
ops.flat_map(lambda window: window.pipe(
|
||||
# Group window by 'organization', 'bucket' and 'precision'
|
||||
ops.group_by(lambda batch_item: batch_item.key),
|
||||
# Create batch (concatenation line protocols by \n)
|
||||
ops.map(lambda group: group.pipe(
|
||||
ops.to_iterable(),
|
||||
ops.map(lambda xs: _BatchItem(key=group.key, data=_body_reduce(xs), size=len(xs))))),
|
||||
ops.merge_all())),
|
||||
# Write data into InfluxDB (possibility to retry if its fail)
|
||||
ops.filter(lambda batch: batch.size > 0),
|
||||
ops.map(mapper=lambda batch: self._to_response(data=batch, delay=self._jitter_delay())),
|
||||
ops.merge_all()) \
|
||||
.subscribe(self._on_next, self._on_error, self._on_complete)
|
||||
|
||||
else:
|
||||
self._subject = None
|
||||
self._disposable = None
|
||||
|
||||
if self._write_options.write_type is WriteType.asynchronous:
|
||||
message = """The 'WriteType.asynchronous' is deprecated and will be removed in future major version.
|
||||
|
||||
You can use native asynchronous version of the client:
|
||||
- https://influxdb-client.readthedocs.io/en/stable/usage.html#how-to-use-asyncio
|
||||
"""
|
||||
warnings.warn(message, DeprecationWarning)
|
||||
|
||||
def write(self, bucket: str, org: str = None,
|
||||
record: Union[
|
||||
str, Iterable['str'], Point, Iterable['Point'], dict, Iterable['dict'], bytes, Iterable['bytes'],
|
||||
Observable, NamedTuple, Iterable['NamedTuple'], 'dataclass', Iterable['dataclass']
|
||||
] = None,
|
||||
write_precision: WritePrecision = DEFAULT_WRITE_PRECISION, **kwargs) -> Any:
|
||||
"""
|
||||
Write time-series data into InfluxDB.
|
||||
|
||||
:param str bucket: specifies the destination bucket for writes (required)
|
||||
:param str, Organization org: specifies the destination organization for writes;
|
||||
take the ID, Name or Organization.
|
||||
If not specified the default value from ``InfluxDBClient.org`` is used.
|
||||
:param WritePrecision write_precision: specifies the precision for the unix timestamps within
|
||||
the body line-protocol. The precision specified on a Point has precedes
|
||||
and is use for write.
|
||||
:param record: Point, Line Protocol, Dictionary, NamedTuple, Data Classes, Pandas DataFrame or
|
||||
RxPY Observable to write
|
||||
:key data_frame_measurement_name: name of measurement for writing Pandas DataFrame - ``DataFrame``
|
||||
:key data_frame_tag_columns: list of DataFrame columns which are tags,
|
||||
rest columns will be fields - ``DataFrame``
|
||||
:key data_frame_timestamp_column: name of DataFrame column which contains a timestamp. The column can be defined as a :class:`~str` value
|
||||
formatted as `2018-10-26`, `2018-10-26 12:00`, `2018-10-26 12:00:00-05:00`
|
||||
or other formats and types supported by `pandas.to_datetime <https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.to_datetime.html#pandas.to_datetime>`_ - ``DataFrame``
|
||||
:key data_frame_timestamp_timezone: name of the timezone which is used for timestamp column - ``DataFrame``
|
||||
:key record_measurement_key: key of record with specified measurement -
|
||||
``dictionary``, ``NamedTuple``, ``dataclass``
|
||||
:key record_measurement_name: static measurement name - ``dictionary``, ``NamedTuple``, ``dataclass``
|
||||
:key record_time_key: key of record with specified timestamp - ``dictionary``, ``NamedTuple``, ``dataclass``
|
||||
:key record_tag_keys: list of record keys to use as a tag - ``dictionary``, ``NamedTuple``, ``dataclass``
|
||||
:key record_field_keys: list of record keys to use as a field - ``dictionary``, ``NamedTuple``, ``dataclass``
|
||||
|
||||
Example:
|
||||
.. code-block:: python
|
||||
|
||||
# Record as Line Protocol
|
||||
write_api.write("my-bucket", "my-org", "h2o_feet,location=us-west level=125i 1")
|
||||
|
||||
# Record as Dictionary
|
||||
dictionary = {
|
||||
"measurement": "h2o_feet",
|
||||
"tags": {"location": "us-west"},
|
||||
"fields": {"level": 125},
|
||||
"time": 1
|
||||
}
|
||||
write_api.write("my-bucket", "my-org", dictionary)
|
||||
|
||||
# Record as Point
|
||||
from influxdb_client import Point
|
||||
point = Point("h2o_feet").tag("location", "us-west").field("level", 125).time(1)
|
||||
write_api.write("my-bucket", "my-org", point)
|
||||
|
||||
DataFrame:
|
||||
If the ``data_frame_timestamp_column`` is not specified the index of `Pandas DataFrame <https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.html>`_
|
||||
is used as a ``timestamp`` for written data. The index can be `PeriodIndex <https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.PeriodIndex.html#pandas.PeriodIndex>`_
|
||||
or its must be transformable to ``datetime`` by
|
||||
`pandas.to_datetime <https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.to_datetime.html#pandas.to_datetime>`_.
|
||||
|
||||
If you would like to transform a column to ``PeriodIndex``, you can use something like:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
import pandas as pd
|
||||
|
||||
# DataFrame
|
||||
data_frame = ...
|
||||
# Set column as Index
|
||||
data_frame.set_index('column_name', inplace=True)
|
||||
# Transform index to PeriodIndex
|
||||
data_frame.index = pd.to_datetime(data_frame.index, unit='s')
|
||||
|
||||
""" # noqa: E501
|
||||
org = get_org_query_param(org=org, client=self._influxdb_client)
|
||||
|
||||
self._append_default_tags(record)
|
||||
|
||||
if self._write_options.write_type is WriteType.batching:
|
||||
return self._write_batching(bucket, org, record,
|
||||
write_precision, **kwargs)
|
||||
|
||||
payloads = defaultdict(list)
|
||||
self._serialize(record, write_precision, payloads, **kwargs)
|
||||
|
||||
_async_req = True if self._write_options.write_type == WriteType.asynchronous else False
|
||||
|
||||
def write_payload(payload):
|
||||
final_string = b'\n'.join(payload[1])
|
||||
return self._post_write(_async_req, bucket, org, final_string, payload[0])
|
||||
|
||||
results = list(map(write_payload, payloads.items()))
|
||||
if not _async_req:
|
||||
return None
|
||||
elif len(results) == 1:
|
||||
return results[0]
|
||||
return results
|
||||
|
||||
def flush(self):
|
||||
"""Flush data."""
|
||||
# TODO
|
||||
pass
|
||||
|
||||
def close(self):
|
||||
"""Flush data and dispose a batching buffer."""
|
||||
self.__del__()
|
||||
|
||||
def __enter__(self):
|
||||
"""
|
||||
Enter the runtime context related to this object.
|
||||
|
||||
It will bind this method’s return value to the target(s)
|
||||
specified in the `as` clause of the statement.
|
||||
|
||||
return: self instance
|
||||
"""
|
||||
return self
|
||||
|
||||
def __exit__(self, exc_type, exc_val, exc_tb):
|
||||
"""Exit the runtime context related to this object and close the WriteApi."""
|
||||
self.close()
|
||||
|
||||
def __del__(self):
|
||||
"""Close WriteApi."""
|
||||
if self._subject:
|
||||
self._subject.on_completed()
|
||||
self._subject.dispose()
|
||||
self._subject = None
|
||||
|
||||
"""
|
||||
We impose a maximum wait time to ensure that we do not cause a deadlock if the
|
||||
background thread has exited abnormally
|
||||
|
||||
Each iteration waits 100ms, but sleep expects the unit to be seconds so convert
|
||||
the maximum wait time to seconds.
|
||||
|
||||
We keep a counter of how long we've waited
|
||||
"""
|
||||
max_wait_time = self._write_options.max_close_wait / 1000
|
||||
waited = 0
|
||||
sleep_period = 0.1
|
||||
|
||||
# Wait for writing to finish
|
||||
while not self._disposable.is_disposed:
|
||||
sleep(sleep_period)
|
||||
waited += sleep_period
|
||||
|
||||
# Have we reached the upper limit?
|
||||
if waited >= max_wait_time:
|
||||
logger.warning(
|
||||
"Reached max_close_wait (%s seconds) waiting for batches to finish writing. Force closing",
|
||||
max_wait_time
|
||||
)
|
||||
break
|
||||
|
||||
if self._window_scheduler:
|
||||
self._window_scheduler.executor.shutdown(wait=False)
|
||||
self._window_scheduler = None
|
||||
|
||||
if self._disposable:
|
||||
self._disposable = None
|
||||
pass
|
||||
|
||||
def _write_batching(self, bucket, org, data,
|
||||
precision=DEFAULT_WRITE_PRECISION,
|
||||
**kwargs):
|
||||
if isinstance(data, bytes):
|
||||
_key = _BatchItemKey(bucket, org, precision)
|
||||
self._subject.on_next(_BatchItem(key=_key, data=data))
|
||||
|
||||
elif isinstance(data, str):
|
||||
self._write_batching(bucket, org, data.encode(_UTF_8_encoding),
|
||||
precision, **kwargs)
|
||||
|
||||
elif isinstance(data, Point):
|
||||
self._write_batching(bucket, org, data.to_line_protocol(), data.write_precision, **kwargs)
|
||||
|
||||
elif isinstance(data, dict):
|
||||
self._write_batching(bucket, org, Point.from_dict(data, write_precision=precision, **kwargs),
|
||||
precision, **kwargs)
|
||||
|
||||
elif 'DataFrame' in type(data).__name__:
|
||||
serializer = DataframeSerializer(data, self._point_settings, precision, self._write_options.batch_size,
|
||||
**kwargs)
|
||||
for chunk_idx in range(serializer.number_of_chunks):
|
||||
self._write_batching(bucket, org,
|
||||
serializer.serialize(chunk_idx),
|
||||
precision, **kwargs)
|
||||
elif hasattr(data, "_asdict"):
|
||||
# noinspection PyProtectedMember
|
||||
self._write_batching(bucket, org, data._asdict(), precision, **kwargs)
|
||||
|
||||
elif _HAS_DATACLASS and dataclasses.is_dataclass(data):
|
||||
self._write_batching(bucket, org, dataclasses.asdict(data), precision, **kwargs)
|
||||
|
||||
elif isinstance(data, Iterable):
|
||||
for item in data:
|
||||
self._write_batching(bucket, org, item, precision, **kwargs)
|
||||
|
||||
elif isinstance(data, Observable):
|
||||
data.subscribe(lambda it: self._write_batching(bucket, org, it, precision, **kwargs))
|
||||
pass
|
||||
|
||||
return None
|
||||
|
||||
def _http(self, batch_item: _BatchItem):
|
||||
|
||||
logger.debug("Write time series data into InfluxDB: %s", batch_item)
|
||||
|
||||
if self._retry_callback:
|
||||
def _retry_callback_delegate(exception):
|
||||
return self._retry_callback(batch_item.to_key_tuple(), batch_item.data, exception)
|
||||
else:
|
||||
_retry_callback_delegate = None
|
||||
|
||||
retry = self._write_options.to_retry_strategy(retry_callback=_retry_callback_delegate)
|
||||
|
||||
self._post_write(False, batch_item.key.bucket, batch_item.key.org, batch_item.data,
|
||||
batch_item.key.precision, urlopen_kw={'retries': retry})
|
||||
|
||||
logger.debug("Write request finished %s", batch_item)
|
||||
|
||||
return _BatchResponse(data=batch_item)
|
||||
|
||||
def _post_write(self, _async_req, bucket, org, body, precision, **kwargs):
|
||||
|
||||
return self._write_service.post_write(org=org, bucket=bucket, body=body, precision=precision,
|
||||
async_req=_async_req,
|
||||
content_type="text/plain; charset=utf-8",
|
||||
**kwargs)
|
||||
|
||||
def _to_response(self, data: _BatchItem, delay: timedelta):
|
||||
|
||||
return rx.of(data).pipe(
|
||||
ops.subscribe_on(self._write_options.write_scheduler),
|
||||
# use delay if its specified
|
||||
ops.delay(duetime=delay, scheduler=self._write_options.write_scheduler),
|
||||
# invoke http call
|
||||
ops.map(lambda x: self._http(x)),
|
||||
# catch exception to fail batch response
|
||||
ops.catch(handler=lambda exception, source: rx.just(_BatchResponse(exception=exception, data=data))),
|
||||
)
|
||||
|
||||
def _jitter_delay(self):
|
||||
return timedelta(milliseconds=random() * self._write_options.jitter_interval)
|
||||
|
||||
def _on_next(self, response: _BatchResponse):
|
||||
if response.exception:
|
||||
logger.error("The batch item wasn't processed successfully because: %s", response.exception)
|
||||
if self._error_callback:
|
||||
try:
|
||||
self._error_callback(response.data.to_key_tuple(), response.data.data, response.exception)
|
||||
except Exception as e:
|
||||
"""
|
||||
Unfortunately, because callbacks are user-provided generic code, exceptions can be entirely
|
||||
arbitrary
|
||||
|
||||
We trap it, log that it occurred and then proceed - there's not much more that we can
|
||||
really do.
|
||||
"""
|
||||
logger.error("The configured error callback threw an exception: %s", e)
|
||||
|
||||
else:
|
||||
logger.debug("The batch item: %s was processed successfully.", response)
|
||||
if self._success_callback:
|
||||
try:
|
||||
self._success_callback(response.data.to_key_tuple(), response.data.data)
|
||||
except Exception as e:
|
||||
logger.error("The configured success callback threw an exception: %s", e)
|
||||
|
||||
@staticmethod
|
||||
def _on_error(ex):
|
||||
logger.error("unexpected error during batching: %s", ex)
|
||||
|
||||
def _on_complete(self):
|
||||
self._disposable.dispose()
|
||||
logger.info("the batching processor was disposed")
|
||||
|
||||
def __getstate__(self):
|
||||
"""Return a dict of attributes that you want to pickle."""
|
||||
state = self.__dict__.copy()
|
||||
# Remove rx
|
||||
del state['_subject']
|
||||
del state['_disposable']
|
||||
del state['_window_scheduler']
|
||||
del state['_write_service']
|
||||
return state
|
||||
|
||||
def __setstate__(self, state):
|
||||
"""Set your object with the provided dict."""
|
||||
self.__dict__.update(state)
|
||||
# Init Rx
|
||||
self.__init__(self._influxdb_client,
|
||||
self._write_options,
|
||||
self._point_settings,
|
||||
success_callback=self._success_callback,
|
||||
error_callback=self._error_callback,
|
||||
retry_callback=self._retry_callback)
|
||||
@@ -0,0 +1,125 @@
|
||||
"""Collect and async write time series data to InfluxDB Cloud or InfluxDB OSS."""
|
||||
import logging
|
||||
from collections import defaultdict
|
||||
from typing import Union, Iterable, NamedTuple
|
||||
|
||||
from influxdb_client import Point, WritePrecision
|
||||
from influxdb_client.client._base import _BaseWriteApi, _HAS_DATACLASS
|
||||
from influxdb_client.client.util.helpers import get_org_query_param
|
||||
from influxdb_client.client.write.point import DEFAULT_WRITE_PRECISION
|
||||
from influxdb_client.client.write_api import PointSettings
|
||||
|
||||
logger = logging.getLogger('influxdb_client.client.write_api_async')
|
||||
|
||||
if _HAS_DATACLASS:
|
||||
from dataclasses import dataclass
|
||||
|
||||
|
||||
class WriteApiAsync(_BaseWriteApi):
|
||||
"""
|
||||
Implementation for '/api/v2/write' endpoint.
|
||||
|
||||
Example:
|
||||
.. code-block:: python
|
||||
|
||||
from influxdb_client_async import InfluxDBClientAsync
|
||||
|
||||
|
||||
# Initialize async/await instance of Write API
|
||||
async with InfluxDBClientAsync(url="http://localhost:8086", token="my-token", org="my-org") as client:
|
||||
write_api = client.write_api()
|
||||
"""
|
||||
|
||||
def __init__(self, influxdb_client, point_settings: PointSettings = PointSettings()) -> None:
|
||||
"""
|
||||
Initialize defaults.
|
||||
|
||||
:param influxdb_client: with default settings (organization)
|
||||
:param point_settings: settings to store default tags.
|
||||
"""
|
||||
super().__init__(influxdb_client=influxdb_client, point_settings=point_settings)
|
||||
|
||||
async def write(self, bucket: str, org: str = None,
|
||||
record: Union[str, Iterable['str'], Point, Iterable['Point'], dict, Iterable['dict'], bytes,
|
||||
Iterable['bytes'], NamedTuple, Iterable['NamedTuple'], 'dataclass',
|
||||
Iterable['dataclass']] = None,
|
||||
write_precision: WritePrecision = DEFAULT_WRITE_PRECISION, **kwargs) -> bool:
|
||||
"""
|
||||
Write time-series data into InfluxDB.
|
||||
|
||||
:param str bucket: specifies the destination bucket for writes (required)
|
||||
:param str, Organization org: specifies the destination organization for writes;
|
||||
take the ID, Name or Organization.
|
||||
If not specified the default value from ``InfluxDBClientAsync.org`` is used.
|
||||
:param WritePrecision write_precision: specifies the precision for the unix timestamps within
|
||||
the body line-protocol. The precision specified on a Point has precedes
|
||||
and is use for write.
|
||||
:param record: Point, Line Protocol, Dictionary, NamedTuple, Data Classes, Pandas DataFrame
|
||||
:key data_frame_measurement_name: name of measurement for writing Pandas DataFrame - ``DataFrame``
|
||||
:key data_frame_tag_columns: list of DataFrame columns which are tags,
|
||||
rest columns will be fields - ``DataFrame``
|
||||
:key data_frame_timestamp_column: name of DataFrame column which contains a timestamp. The column can be defined as a :class:`~str` value
|
||||
formatted as `2018-10-26`, `2018-10-26 12:00`, `2018-10-26 12:00:00-05:00`
|
||||
or other formats and types supported by `pandas.to_datetime <https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.to_datetime.html#pandas.to_datetime>`_ - ``DataFrame``
|
||||
:key data_frame_timestamp_timezone: name of the timezone which is used for timestamp column - ``DataFrame``
|
||||
:key record_measurement_key: key of record with specified measurement -
|
||||
``dictionary``, ``NamedTuple``, ``dataclass``
|
||||
:key record_measurement_name: static measurement name - ``dictionary``, ``NamedTuple``, ``dataclass``
|
||||
:key record_time_key: key of record with specified timestamp - ``dictionary``, ``NamedTuple``, ``dataclass``
|
||||
:key record_tag_keys: list of record keys to use as a tag - ``dictionary``, ``NamedTuple``, ``dataclass``
|
||||
:key record_field_keys: list of record keys to use as a field - ``dictionary``, ``NamedTuple``, ``dataclass``
|
||||
:return: ``True`` for successfully accepted data, otherwise raise an exception
|
||||
|
||||
Example:
|
||||
.. code-block:: python
|
||||
|
||||
# Record as Line Protocol
|
||||
await write_api.write("my-bucket", "my-org", "h2o_feet,location=us-west level=125i 1")
|
||||
|
||||
# Record as Dictionary
|
||||
dictionary = {
|
||||
"measurement": "h2o_feet",
|
||||
"tags": {"location": "us-west"},
|
||||
"fields": {"level": 125},
|
||||
"time": 1
|
||||
}
|
||||
await write_api.write("my-bucket", "my-org", dictionary)
|
||||
|
||||
# Record as Point
|
||||
from influxdb_client import Point
|
||||
point = Point("h2o_feet").tag("location", "us-west").field("level", 125).time(1)
|
||||
await write_api.write("my-bucket", "my-org", point)
|
||||
|
||||
DataFrame:
|
||||
If the ``data_frame_timestamp_column`` is not specified the index of `Pandas DataFrame <https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.html>`_
|
||||
is used as a ``timestamp`` for written data. The index can be `PeriodIndex <https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.PeriodIndex.html#pandas.PeriodIndex>`_
|
||||
or its must be transformable to ``datetime`` by
|
||||
`pandas.to_datetime <https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.to_datetime.html#pandas.to_datetime>`_.
|
||||
|
||||
If you would like to transform a column to ``PeriodIndex``, you can use something like:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
import pandas as pd
|
||||
|
||||
# DataFrame
|
||||
data_frame = ...
|
||||
# Set column as Index
|
||||
data_frame.set_index('column_name', inplace=True)
|
||||
# Transform index to PeriodIndex
|
||||
data_frame.index = pd.to_datetime(data_frame.index, unit='s')
|
||||
|
||||
""" # noqa: E501
|
||||
org = get_org_query_param(org=org, client=self._influxdb_client)
|
||||
self._append_default_tags(record)
|
||||
|
||||
payloads = defaultdict(list)
|
||||
self._serialize(record, write_precision, payloads, precision_from_point=False, **kwargs)
|
||||
|
||||
# joint list by \n
|
||||
body = b'\n'.join(payloads[write_precision])
|
||||
response = await self._write_service.post_write_async(org=org, bucket=bucket, body=body,
|
||||
precision=write_precision, async_req=False,
|
||||
_return_http_data_only=False,
|
||||
content_type="text/plain; charset=utf-8")
|
||||
return response[1] in (201, 204)
|
||||
Reference in New Issue
Block a user