venv added, updated

This commit is contained in:
Norbert
2024-09-13 09:46:28 +02:00
parent 577596d9f3
commit 82af8c809a
4812 changed files with 640223 additions and 2 deletions

View File

@@ -0,0 +1,396 @@
# coding: utf-8
# flake8: noqa
"""
InfluxDB OSS API Service.
The InfluxDB v2 API provides a programmatic interface for all interactions with InfluxDB. Access the InfluxDB API using the `/api/v2/` endpoint. # noqa: E501
OpenAPI spec version: 2.0.0
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
# import apis into sdk package
from influxdb_client.service.authorizations_service import AuthorizationsService
from influxdb_client.service.backup_service import BackupService
from influxdb_client.service.bucket_schemas_service import BucketSchemasService
from influxdb_client.service.buckets_service import BucketsService
from influxdb_client.service.cells_service import CellsService
from influxdb_client.service.checks_service import ChecksService
from influxdb_client.service.config_service import ConfigService
from influxdb_client.service.dbr_ps_service import DBRPsService
from influxdb_client.service.dashboards_service import DashboardsService
from influxdb_client.service.delete_service import DeleteService
from influxdb_client.service.health_service import HealthService
from influxdb_client.service.invokable_scripts_service import InvokableScriptsService
from influxdb_client.service.labels_service import LabelsService
from influxdb_client.service.legacy_authorizations_service import LegacyAuthorizationsService
from influxdb_client.service.metrics_service import MetricsService
from influxdb_client.service.notification_endpoints_service import NotificationEndpointsService
from influxdb_client.service.notification_rules_service import NotificationRulesService
from influxdb_client.service.organizations_service import OrganizationsService
from influxdb_client.service.ping_service import PingService
from influxdb_client.service.query_service import QueryService
from influxdb_client.service.ready_service import ReadyService
from influxdb_client.service.remote_connections_service import RemoteConnectionsService
from influxdb_client.service.replications_service import ReplicationsService
from influxdb_client.service.resources_service import ResourcesService
from influxdb_client.service.restore_service import RestoreService
from influxdb_client.service.routes_service import RoutesService
from influxdb_client.service.rules_service import RulesService
from influxdb_client.service.scraper_targets_service import ScraperTargetsService
from influxdb_client.service.secrets_service import SecretsService
from influxdb_client.service.setup_service import SetupService
from influxdb_client.service.signin_service import SigninService
from influxdb_client.service.signout_service import SignoutService
from influxdb_client.service.sources_service import SourcesService
from influxdb_client.service.tasks_service import TasksService
from influxdb_client.service.telegraf_plugins_service import TelegrafPluginsService
from influxdb_client.service.telegrafs_service import TelegrafsService
from influxdb_client.service.templates_service import TemplatesService
from influxdb_client.service.users_service import UsersService
from influxdb_client.service.variables_service import VariablesService
from influxdb_client.service.views_service import ViewsService
from influxdb_client.service.write_service import WriteService
from influxdb_client.configuration import Configuration
# import models into sdk package
from influxdb_client.domain.ast_response import ASTResponse
from influxdb_client.domain.add_resource_member_request_body import AddResourceMemberRequestBody
from influxdb_client.domain.analyze_query_response import AnalyzeQueryResponse
from influxdb_client.domain.analyze_query_response_errors import AnalyzeQueryResponseErrors
from influxdb_client.domain.array_expression import ArrayExpression
from influxdb_client.domain.authorization import Authorization
from influxdb_client.domain.authorization_post_request import AuthorizationPostRequest
from influxdb_client.domain.authorization_update_request import AuthorizationUpdateRequest
from influxdb_client.domain.authorizations import Authorizations
from influxdb_client.domain.axes import Axes
from influxdb_client.domain.axis import Axis
from influxdb_client.domain.axis_scale import AxisScale
from influxdb_client.domain.bad_statement import BadStatement
from influxdb_client.domain.band_view_properties import BandViewProperties
from influxdb_client.domain.binary_expression import BinaryExpression
from influxdb_client.domain.block import Block
from influxdb_client.domain.boolean_literal import BooleanLiteral
from influxdb_client.domain.bucket import Bucket
from influxdb_client.domain.bucket_links import BucketLinks
from influxdb_client.domain.bucket_metadata_manifest import BucketMetadataManifest
from influxdb_client.domain.bucket_retention_rules import BucketRetentionRules
from influxdb_client.domain.bucket_shard_mapping import BucketShardMapping
from influxdb_client.domain.buckets import Buckets
from influxdb_client.domain.builder_aggregate_function_type import BuilderAggregateFunctionType
from influxdb_client.domain.builder_config import BuilderConfig
from influxdb_client.domain.builder_config_aggregate_window import BuilderConfigAggregateWindow
from influxdb_client.domain.builder_functions_type import BuilderFunctionsType
from influxdb_client.domain.builder_tags_type import BuilderTagsType
from influxdb_client.domain.builtin_statement import BuiltinStatement
from influxdb_client.domain.call_expression import CallExpression
from influxdb_client.domain.cell import Cell
from influxdb_client.domain.cell_links import CellLinks
from influxdb_client.domain.cell_update import CellUpdate
from influxdb_client.domain.cell_with_view_properties import CellWithViewProperties
from influxdb_client.domain.check import Check
from influxdb_client.domain.check_base import CheckBase
from influxdb_client.domain.check_base_links import CheckBaseLinks
from influxdb_client.domain.check_discriminator import CheckDiscriminator
from influxdb_client.domain.check_patch import CheckPatch
from influxdb_client.domain.check_status_level import CheckStatusLevel
from influxdb_client.domain.check_view_properties import CheckViewProperties
from influxdb_client.domain.checks import Checks
from influxdb_client.domain.column_data_type import ColumnDataType
from influxdb_client.domain.column_semantic_type import ColumnSemanticType
from influxdb_client.domain.conditional_expression import ConditionalExpression
from influxdb_client.domain.config import Config
from influxdb_client.domain.constant_variable_properties import ConstantVariableProperties
from influxdb_client.domain.create_cell import CreateCell
from influxdb_client.domain.create_dashboard_request import CreateDashboardRequest
from influxdb_client.domain.custom_check import CustomCheck
from influxdb_client.domain.dbrp import DBRP
from influxdb_client.domain.dbrp_create import DBRPCreate
from influxdb_client.domain.dbrp_get import DBRPGet
from influxdb_client.domain.dbrp_update import DBRPUpdate
from influxdb_client.domain.dbr_ps import DBRPs
from influxdb_client.domain.dashboard import Dashboard
from influxdb_client.domain.dashboard_color import DashboardColor
from influxdb_client.domain.dashboard_query import DashboardQuery
from influxdb_client.domain.dashboard_with_view_properties import DashboardWithViewProperties
from influxdb_client.domain.dashboards import Dashboards
from influxdb_client.domain.date_time_literal import DateTimeLiteral
from influxdb_client.domain.deadman_check import DeadmanCheck
from influxdb_client.domain.decimal_places import DecimalPlaces
from influxdb_client.domain.delete_predicate_request import DeletePredicateRequest
from influxdb_client.domain.dialect import Dialect
from influxdb_client.domain.dict_expression import DictExpression
from influxdb_client.domain.dict_item import DictItem
from influxdb_client.domain.duration import Duration
from influxdb_client.domain.duration_literal import DurationLiteral
from influxdb_client.domain.error import Error
from influxdb_client.domain.expression import Expression
from influxdb_client.domain.expression_statement import ExpressionStatement
from influxdb_client.domain.field import Field
from influxdb_client.domain.file import File
from influxdb_client.domain.float_literal import FloatLiteral
from influxdb_client.domain.flux_response import FluxResponse
from influxdb_client.domain.flux_suggestion import FluxSuggestion
from influxdb_client.domain.flux_suggestions import FluxSuggestions
from influxdb_client.domain.function_expression import FunctionExpression
from influxdb_client.domain.gauge_view_properties import GaugeViewProperties
from influxdb_client.domain.greater_threshold import GreaterThreshold
from influxdb_client.domain.http_notification_endpoint import HTTPNotificationEndpoint
from influxdb_client.domain.http_notification_rule import HTTPNotificationRule
from influxdb_client.domain.http_notification_rule_base import HTTPNotificationRuleBase
from influxdb_client.domain.health_check import HealthCheck
from influxdb_client.domain.heatmap_view_properties import HeatmapViewProperties
from influxdb_client.domain.histogram_view_properties import HistogramViewProperties
from influxdb_client.domain.identifier import Identifier
from influxdb_client.domain.import_declaration import ImportDeclaration
from influxdb_client.domain.index_expression import IndexExpression
from influxdb_client.domain.integer_literal import IntegerLiteral
from influxdb_client.domain.is_onboarding import IsOnboarding
from influxdb_client.domain.label import Label
from influxdb_client.domain.label_create_request import LabelCreateRequest
from influxdb_client.domain.label_mapping import LabelMapping
from influxdb_client.domain.label_response import LabelResponse
from influxdb_client.domain.label_update import LabelUpdate
from influxdb_client.domain.labels_response import LabelsResponse
from influxdb_client.domain.language_request import LanguageRequest
from influxdb_client.domain.legacy_authorization_post_request import LegacyAuthorizationPostRequest
from influxdb_client.domain.lesser_threshold import LesserThreshold
from influxdb_client.domain.line_plus_single_stat_properties import LinePlusSingleStatProperties
from influxdb_client.domain.line_protocol_error import LineProtocolError
from influxdb_client.domain.line_protocol_length_error import LineProtocolLengthError
from influxdb_client.domain.links import Links
from influxdb_client.domain.list_stacks_response import ListStacksResponse
from influxdb_client.domain.log_event import LogEvent
from influxdb_client.domain.logical_expression import LogicalExpression
from influxdb_client.domain.logs import Logs
from influxdb_client.domain.map_variable_properties import MapVariableProperties
from influxdb_client.domain.markdown_view_properties import MarkdownViewProperties
from influxdb_client.domain.measurement_schema import MeasurementSchema
from influxdb_client.domain.measurement_schema_column import MeasurementSchemaColumn
from influxdb_client.domain.measurement_schema_create_request import MeasurementSchemaCreateRequest
from influxdb_client.domain.measurement_schema_list import MeasurementSchemaList
from influxdb_client.domain.measurement_schema_update_request import MeasurementSchemaUpdateRequest
from influxdb_client.domain.member_assignment import MemberAssignment
from influxdb_client.domain.member_expression import MemberExpression
from influxdb_client.domain.metadata_backup import MetadataBackup
from influxdb_client.domain.model_property import ModelProperty
from influxdb_client.domain.mosaic_view_properties import MosaicViewProperties
from influxdb_client.domain.node import Node
from influxdb_client.domain.notification_endpoint import NotificationEndpoint
from influxdb_client.domain.notification_endpoint_base import NotificationEndpointBase
from influxdb_client.domain.notification_endpoint_base_links import NotificationEndpointBaseLinks
from influxdb_client.domain.notification_endpoint_discriminator import NotificationEndpointDiscriminator
from influxdb_client.domain.notification_endpoint_type import NotificationEndpointType
from influxdb_client.domain.notification_endpoint_update import NotificationEndpointUpdate
from influxdb_client.domain.notification_endpoints import NotificationEndpoints
from influxdb_client.domain.notification_rule import NotificationRule
from influxdb_client.domain.notification_rule_base import NotificationRuleBase
from influxdb_client.domain.notification_rule_base_links import NotificationRuleBaseLinks
from influxdb_client.domain.notification_rule_discriminator import NotificationRuleDiscriminator
from influxdb_client.domain.notification_rule_update import NotificationRuleUpdate
from influxdb_client.domain.notification_rules import NotificationRules
from influxdb_client.domain.object_expression import ObjectExpression
from influxdb_client.domain.onboarding_request import OnboardingRequest
from influxdb_client.domain.onboarding_response import OnboardingResponse
from influxdb_client.domain.option_statement import OptionStatement
from influxdb_client.domain.organization import Organization
from influxdb_client.domain.organization_links import OrganizationLinks
from influxdb_client.domain.organizations import Organizations
from influxdb_client.domain.package import Package
from influxdb_client.domain.package_clause import PackageClause
from influxdb_client.domain.pager_duty_notification_endpoint import PagerDutyNotificationEndpoint
from influxdb_client.domain.pager_duty_notification_rule import PagerDutyNotificationRule
from influxdb_client.domain.pager_duty_notification_rule_base import PagerDutyNotificationRuleBase
from influxdb_client.domain.paren_expression import ParenExpression
from influxdb_client.domain.password_reset_body import PasswordResetBody
from influxdb_client.domain.patch_bucket_request import PatchBucketRequest
from influxdb_client.domain.patch_dashboard_request import PatchDashboardRequest
from influxdb_client.domain.patch_organization_request import PatchOrganizationRequest
from influxdb_client.domain.patch_retention_rule import PatchRetentionRule
from influxdb_client.domain.patch_stack_request import PatchStackRequest
from influxdb_client.domain.patch_stack_request_additional_resources import PatchStackRequestAdditionalResources
from influxdb_client.domain.permission import Permission
from influxdb_client.domain.permission_resource import PermissionResource
from influxdb_client.domain.pipe_expression import PipeExpression
from influxdb_client.domain.pipe_literal import PipeLiteral
from influxdb_client.domain.post_bucket_request import PostBucketRequest
from influxdb_client.domain.post_check import PostCheck
from influxdb_client.domain.post_notification_endpoint import PostNotificationEndpoint
from influxdb_client.domain.post_notification_rule import PostNotificationRule
from influxdb_client.domain.post_organization_request import PostOrganizationRequest
from influxdb_client.domain.post_restore_kv_response import PostRestoreKVResponse
from influxdb_client.domain.post_stack_request import PostStackRequest
from influxdb_client.domain.property_key import PropertyKey
from influxdb_client.domain.query import Query
from influxdb_client.domain.query_edit_mode import QueryEditMode
from influxdb_client.domain.query_variable_properties import QueryVariableProperties
from influxdb_client.domain.query_variable_properties_values import QueryVariablePropertiesValues
from influxdb_client.domain.range_threshold import RangeThreshold
from influxdb_client.domain.ready import Ready
from influxdb_client.domain.regexp_literal import RegexpLiteral
from influxdb_client.domain.remote_connection import RemoteConnection
from influxdb_client.domain.remote_connection_creation_request import RemoteConnectionCreationRequest
from influxdb_client.domain.remote_connection_update_request import RemoteConnectionUpdateRequest
from influxdb_client.domain.remote_connections import RemoteConnections
from influxdb_client.domain.renamable_field import RenamableField
from influxdb_client.domain.replication import Replication
from influxdb_client.domain.replication_creation_request import ReplicationCreationRequest
from influxdb_client.domain.replication_update_request import ReplicationUpdateRequest
from influxdb_client.domain.replications import Replications
from influxdb_client.domain.resource_member import ResourceMember
from influxdb_client.domain.resource_members import ResourceMembers
from influxdb_client.domain.resource_members_links import ResourceMembersLinks
from influxdb_client.domain.resource_owner import ResourceOwner
from influxdb_client.domain.resource_owners import ResourceOwners
from influxdb_client.domain.restored_bucket_mappings import RestoredBucketMappings
from influxdb_client.domain.retention_policy_manifest import RetentionPolicyManifest
from influxdb_client.domain.return_statement import ReturnStatement
from influxdb_client.domain.routes import Routes
from influxdb_client.domain.routes_external import RoutesExternal
from influxdb_client.domain.routes_query import RoutesQuery
from influxdb_client.domain.routes_system import RoutesSystem
from influxdb_client.domain.rule_status_level import RuleStatusLevel
from influxdb_client.domain.run import Run
from influxdb_client.domain.run_links import RunLinks
from influxdb_client.domain.run_manually import RunManually
from influxdb_client.domain.runs import Runs
from influxdb_client.domain.smtp_notification_rule import SMTPNotificationRule
from influxdb_client.domain.smtp_notification_rule_base import SMTPNotificationRuleBase
from influxdb_client.domain.scatter_view_properties import ScatterViewProperties
from influxdb_client.domain.schema_type import SchemaType
from influxdb_client.domain.scraper_target_request import ScraperTargetRequest
from influxdb_client.domain.scraper_target_response import ScraperTargetResponse
from influxdb_client.domain.scraper_target_responses import ScraperTargetResponses
from influxdb_client.domain.script import Script
from influxdb_client.domain.script_create_request import ScriptCreateRequest
from influxdb_client.domain.script_invocation_params import ScriptInvocationParams
from influxdb_client.domain.script_language import ScriptLanguage
from influxdb_client.domain.script_update_request import ScriptUpdateRequest
from influxdb_client.domain.scripts import Scripts
from influxdb_client.domain.secret_keys import SecretKeys
from influxdb_client.domain.secret_keys_response import SecretKeysResponse
from influxdb_client.domain.shard_group_manifest import ShardGroupManifest
from influxdb_client.domain.shard_manifest import ShardManifest
from influxdb_client.domain.shard_owner import ShardOwner
from influxdb_client.domain.simple_table_view_properties import SimpleTableViewProperties
from influxdb_client.domain.single_stat_view_properties import SingleStatViewProperties
from influxdb_client.domain.slack_notification_endpoint import SlackNotificationEndpoint
from influxdb_client.domain.slack_notification_rule import SlackNotificationRule
from influxdb_client.domain.slack_notification_rule_base import SlackNotificationRuleBase
from influxdb_client.domain.source import Source
from influxdb_client.domain.source_links import SourceLinks
from influxdb_client.domain.sources import Sources
from influxdb_client.domain.stack import Stack
from influxdb_client.domain.stack_associations import StackAssociations
from influxdb_client.domain.stack_events import StackEvents
from influxdb_client.domain.stack_links import StackLinks
from influxdb_client.domain.stack_resources import StackResources
from influxdb_client.domain.statement import Statement
from influxdb_client.domain.static_legend import StaticLegend
from influxdb_client.domain.status_rule import StatusRule
from influxdb_client.domain.string_literal import StringLiteral
from influxdb_client.domain.subscription_manifest import SubscriptionManifest
from influxdb_client.domain.table_view_properties import TableViewProperties
from influxdb_client.domain.table_view_properties_table_options import TableViewPropertiesTableOptions
from influxdb_client.domain.tag_rule import TagRule
from influxdb_client.domain.task import Task
from influxdb_client.domain.task_create_request import TaskCreateRequest
from influxdb_client.domain.task_links import TaskLinks
from influxdb_client.domain.task_status_type import TaskStatusType
from influxdb_client.domain.task_update_request import TaskUpdateRequest
from influxdb_client.domain.tasks import Tasks
from influxdb_client.domain.telegraf import Telegraf
from influxdb_client.domain.telegraf_plugin import TelegrafPlugin
from influxdb_client.domain.telegraf_plugin_request import TelegrafPluginRequest
from influxdb_client.domain.telegraf_plugin_request_plugins import TelegrafPluginRequestPlugins
from influxdb_client.domain.telegraf_plugins import TelegrafPlugins
from influxdb_client.domain.telegraf_request import TelegrafRequest
from influxdb_client.domain.telegraf_request_metadata import TelegrafRequestMetadata
from influxdb_client.domain.telegrafs import Telegrafs
from influxdb_client.domain.telegram_notification_endpoint import TelegramNotificationEndpoint
from influxdb_client.domain.telegram_notification_rule import TelegramNotificationRule
from influxdb_client.domain.telegram_notification_rule_base import TelegramNotificationRuleBase
from influxdb_client.domain.template_apply import TemplateApply
from influxdb_client.domain.template_apply_remotes import TemplateApplyRemotes
from influxdb_client.domain.template_apply_template import TemplateApplyTemplate
from influxdb_client.domain.template_chart import TemplateChart
from influxdb_client.domain.template_export_by_id import TemplateExportByID
from influxdb_client.domain.template_export_by_id_org_ids import TemplateExportByIDOrgIDs
from influxdb_client.domain.template_export_by_id_resource_filters import TemplateExportByIDResourceFilters
from influxdb_client.domain.template_export_by_id_resources import TemplateExportByIDResources
from influxdb_client.domain.template_kind import TemplateKind
from influxdb_client.domain.template_summary import TemplateSummary
from influxdb_client.domain.template_summary_diff import TemplateSummaryDiff
from influxdb_client.domain.template_summary_diff_buckets import TemplateSummaryDiffBuckets
from influxdb_client.domain.template_summary_diff_buckets_new_old import TemplateSummaryDiffBucketsNewOld
from influxdb_client.domain.template_summary_diff_checks import TemplateSummaryDiffChecks
from influxdb_client.domain.template_summary_diff_dashboards import TemplateSummaryDiffDashboards
from influxdb_client.domain.template_summary_diff_dashboards_new_old import TemplateSummaryDiffDashboardsNewOld
from influxdb_client.domain.template_summary_diff_label_mappings import TemplateSummaryDiffLabelMappings
from influxdb_client.domain.template_summary_diff_labels import TemplateSummaryDiffLabels
from influxdb_client.domain.template_summary_diff_labels_new_old import TemplateSummaryDiffLabelsNewOld
from influxdb_client.domain.template_summary_diff_notification_endpoints import TemplateSummaryDiffNotificationEndpoints
from influxdb_client.domain.template_summary_diff_notification_rules import TemplateSummaryDiffNotificationRules
from influxdb_client.domain.template_summary_diff_notification_rules_new_old import TemplateSummaryDiffNotificationRulesNewOld
from influxdb_client.domain.template_summary_diff_tasks import TemplateSummaryDiffTasks
from influxdb_client.domain.template_summary_diff_tasks_new_old import TemplateSummaryDiffTasksNewOld
from influxdb_client.domain.template_summary_diff_telegraf_configs import TemplateSummaryDiffTelegrafConfigs
from influxdb_client.domain.template_summary_diff_variables import TemplateSummaryDiffVariables
from influxdb_client.domain.template_summary_diff_variables_new_old import TemplateSummaryDiffVariablesNewOld
from influxdb_client.domain.template_summary_errors import TemplateSummaryErrors
from influxdb_client.domain.template_summary_label import TemplateSummaryLabel
from influxdb_client.domain.template_summary_label_properties import TemplateSummaryLabelProperties
from influxdb_client.domain.template_summary_summary import TemplateSummarySummary
from influxdb_client.domain.template_summary_summary_buckets import TemplateSummarySummaryBuckets
from influxdb_client.domain.template_summary_summary_dashboards import TemplateSummarySummaryDashboards
from influxdb_client.domain.template_summary_summary_label_mappings import TemplateSummarySummaryLabelMappings
from influxdb_client.domain.template_summary_summary_notification_rules import TemplateSummarySummaryNotificationRules
from influxdb_client.domain.template_summary_summary_status_rules import TemplateSummarySummaryStatusRules
from influxdb_client.domain.template_summary_summary_tag_rules import TemplateSummarySummaryTagRules
from influxdb_client.domain.template_summary_summary_tasks import TemplateSummarySummaryTasks
from influxdb_client.domain.template_summary_summary_variables import TemplateSummarySummaryVariables
from influxdb_client.domain.test_statement import TestStatement
from influxdb_client.domain.threshold import Threshold
from influxdb_client.domain.threshold_base import ThresholdBase
from influxdb_client.domain.threshold_check import ThresholdCheck
from influxdb_client.domain.unary_expression import UnaryExpression
from influxdb_client.domain.unsigned_integer_literal import UnsignedIntegerLiteral
from influxdb_client.domain.user import User
from influxdb_client.domain.user_response import UserResponse
from influxdb_client.domain.user_response_links import UserResponseLinks
from influxdb_client.domain.users import Users
from influxdb_client.domain.variable import Variable
from influxdb_client.domain.variable_assignment import VariableAssignment
from influxdb_client.domain.variable_links import VariableLinks
from influxdb_client.domain.variable_properties import VariableProperties
from influxdb_client.domain.variables import Variables
from influxdb_client.domain.view import View
from influxdb_client.domain.view_links import ViewLinks
from influxdb_client.domain.view_properties import ViewProperties
from influxdb_client.domain.views import Views
from influxdb_client.domain.write_precision import WritePrecision
from influxdb_client.domain.xy_geom import XYGeom
from influxdb_client.domain.xy_view_properties import XYViewProperties
from influxdb_client.client.authorizations_api import AuthorizationsApi
from influxdb_client.client.bucket_api import BucketsApi
from influxdb_client.client.delete_api import DeleteApi
from influxdb_client.client.invokable_scripts_api import InvokableScriptsApi
from influxdb_client.client.labels_api import LabelsApi
from influxdb_client.client.organizations_api import OrganizationsApi
from influxdb_client.client.query_api import QueryApi
from influxdb_client.client.tasks_api import TasksApi
from influxdb_client.client.users_api import UsersApi
from influxdb_client.client.write_api import WriteApi, WriteOptions
from influxdb_client.client.influxdb_client import InfluxDBClient
from influxdb_client.client.logging_handler import InfluxLoggingHandler
from influxdb_client.client.write.point import Point
from influxdb_client.version import VERSION
__version__ = VERSION

View File

@@ -0,0 +1 @@
"""Asynchronous REST APIs."""

View File

@@ -0,0 +1,663 @@
# coding: utf-8
"""
InfluxDB OSS API Service.
The InfluxDB v2 API provides a programmatic interface for all interactions with InfluxDB. Access the InfluxDB API using the `/api/v2/` endpoint. # noqa: E501
OpenAPI spec version: 2.0.0
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import datetime
import json
import mimetypes
import os
import re
import tempfile
from multiprocessing.pool import ThreadPool
from urllib.parse import quote
import influxdb_client.domain
from influxdb_client import SigninService
from influxdb_client import SignoutService
from influxdb_client._async import rest
from influxdb_client.configuration import Configuration
from influxdb_client.rest import _requires_create_user_session, _requires_expire_user_session
class ApiClientAsync(object):
"""Generic API client for OpenAPI client library Build.
OpenAPI generic API client. This client handles the client-
server communication, and is invariant across implementations. Specifics of
the methods and models for each application are generated from the OpenAPI
templates.
NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
:param configuration: .Configuration object for this client
:param header_name: a header to pass when making calls to the API.
:param header_value: a header value to pass when making calls to
the API.
:param cookie: a cookie to include in the header when making calls
to the API
:param pool_threads: The number of threads to use for async requests
to the API. More threads means more concurrent API requests.
"""
PRIMITIVE_TYPES = (float, bool, bytes, str, int)
NATIVE_TYPES_MAPPING = {
'int': int,
'long': int,
'float': float,
'str': str,
'bool': bool,
'date': datetime.date,
'datetime': datetime.datetime,
'object': object,
}
_pool = None
def __init__(self, configuration=None, header_name=None, header_value=None,
cookie=None, pool_threads=None, **kwargs):
"""Initialize generic API client."""
if configuration is None:
configuration = Configuration()
self.configuration = configuration
self.pool_threads = pool_threads
self.rest_client = rest.RESTClientObjectAsync(configuration, **kwargs)
self.default_headers = {}
if header_name is not None:
self.default_headers[header_name] = header_value
self.cookie = cookie
# Set default User-Agent.
from influxdb_client import VERSION
self.user_agent = f'influxdb-client-python/{VERSION}'
async def close(self):
"""Dispose api client."""
await self._signout()
await self.rest_client.close()
"""Dispose pools."""
if self._pool:
self._pool.close()
self._pool.join()
self._pool = None
@property
def pool(self):
"""Create thread pool on first request avoids instantiating unused threadpool for blocking clients."""
if self._pool is None:
self._pool = ThreadPool(self.pool_threads)
return self._pool
@property
def user_agent(self):
"""User agent for this API client."""
return self.default_headers['User-Agent']
@user_agent.setter
def user_agent(self, value):
"""Set User agent for this API client."""
self.default_headers['User-Agent'] = value
def set_default_header(self, header_name, header_value):
"""Set HTTP header for this API client."""
self.default_headers[header_name] = header_value
async def __call_api(
self, resource_path, method, path_params=None,
query_params=None, header_params=None, body=None, post_params=None,
files=None, response_type=None, auth_settings=None,
_return_http_data_only=None, collection_formats=None,
_preload_content=True, _request_timeout=None, urlopen_kw=None):
config = self.configuration
await self._signin(resource_path=resource_path)
# header parameters
header_params = header_params or {}
config.update_request_header_params(resource_path, header_params)
header_params.update(self.default_headers)
if self.cookie:
header_params['Cookie'] = self.cookie
if header_params:
header_params = self.sanitize_for_serialization(header_params)
header_params = dict(self.parameters_to_tuples(header_params,
collection_formats))
# path parameters
if path_params:
path_params = self.sanitize_for_serialization(path_params)
path_params = self.parameters_to_tuples(path_params,
collection_formats)
for k, v in path_params:
# specified safe chars, encode everything
resource_path = resource_path.replace(
'{%s}' % k,
quote(str(v), safe=config.safe_chars_for_path_param)
)
# query parameters
if query_params:
query_params = self.sanitize_for_serialization(query_params)
query_params = self.parameters_to_tuples(query_params,
collection_formats)
# post parameters
if post_params or files:
post_params = self.prepare_post_parameters(post_params, files)
post_params = self.sanitize_for_serialization(post_params)
post_params = self.parameters_to_tuples(post_params,
collection_formats)
# auth setting
self.update_params_for_auth(header_params, query_params, auth_settings)
# body
if body:
body = self.sanitize_for_serialization(body)
body = config.update_request_body(resource_path, body)
# request url
url = self.configuration.host + resource_path
urlopen_kw = urlopen_kw or {}
# perform request and return response
response_data = await self.request(
method, url, query_params=query_params, headers=header_params,
post_params=post_params, body=body,
_preload_content=_preload_content,
_request_timeout=_request_timeout, **urlopen_kw)
self.last_response = response_data
return_data = response_data
if _preload_content:
# deserialize response data
if response_type:
return_data = self.deserialize(response_data, response_type)
else:
return_data = None
if _return_http_data_only is not False:
return return_data
else:
return (return_data, response_data.status,
response_data.getheaders())
def sanitize_for_serialization(self, obj):
"""Build a JSON POST object.
If obj is None, return None.
If obj is str, int, long, float, bool, return directly.
If obj is datetime.datetime, datetime.date
convert to string in iso8601 format.
If obj is list, sanitize each element in the list.
If obj is dict, return the dict.
If obj is OpenAPI model, return the properties dict.
:param obj: The data to serialize.
:return: The serialized form of data.
"""
if obj is None:
return None
elif isinstance(obj, self.PRIMITIVE_TYPES):
return obj
elif isinstance(obj, list):
return [self.sanitize_for_serialization(sub_obj)
for sub_obj in obj]
elif isinstance(obj, tuple):
return tuple(self.sanitize_for_serialization(sub_obj)
for sub_obj in obj)
elif isinstance(obj, (datetime.datetime, datetime.date)):
return obj.isoformat()
if isinstance(obj, dict):
obj_dict = obj
else:
# Convert model obj to dict except
# attributes `openapi_types`, `attribute_map`
# and attributes which value is not None.
# Convert attribute name to json key in
# model definition for request.
obj_dict = {obj.attribute_map[attr]: getattr(obj, attr)
for attr, _ in obj.openapi_types.items()
if getattr(obj, attr) is not None}
return {key: self.sanitize_for_serialization(val)
for key, val in obj_dict.items()}
def deserialize(self, response, response_type):
"""Deserializes response into an object.
:param response: RESTResponse object to be deserialized.
:param response_type: class literal for
deserialized object, or string of class name.
:return: deserialized object.
"""
# handle file downloading
# save response body into a tmp file and return the instance
if response_type == "file":
return self.__deserialize_file(response)
# fetch data from response object
try:
data = json.loads(response.data)
except ValueError:
data = response.data
return self.__deserialize(data, response_type)
def __deserialize(self, data, klass):
"""Deserializes dict, list, str into an object.
:param data: dict, list or str.
:param klass: class literal, or string of class name.
:return: object.
"""
if data is None:
return None
if type(klass) == str:
if klass.startswith('list['):
sub_kls = re.match(r'list\[(.*)\]', klass).group(1)
return [self.__deserialize(sub_data, sub_kls)
for sub_data in data]
if klass.startswith('dict('):
sub_kls = re.match(r'dict\(([^,]*), (.*)\)', klass).group(2)
return {k: self.__deserialize(v, sub_kls)
for k, v in data.items()}
# convert str to class
if klass in self.NATIVE_TYPES_MAPPING:
klass = self.NATIVE_TYPES_MAPPING[klass]
else:
klass = getattr(influxdb_client.domain, klass)
if klass in self.PRIMITIVE_TYPES:
return self.__deserialize_primitive(data, klass)
elif klass == object:
return self.__deserialize_object(data)
elif klass == datetime.date:
return self.__deserialize_date(data)
elif klass == datetime.datetime:
return self.__deserialize_datatime(data)
else:
return self.__deserialize_model(data, klass)
def call_api(self, resource_path, method,
path_params=None, query_params=None, header_params=None,
body=None, post_params=None, files=None,
response_type=None, auth_settings=None, async_req=None,
_return_http_data_only=None, collection_formats=None,
_preload_content=True, _request_timeout=None, urlopen_kw=None):
"""Make the HTTP request (synchronous) and Return deserialized data.
To make an async_req request, set the async_req parameter.
:param resource_path: Path to method endpoint.
:param method: Method to call.
:param path_params: Path parameters in the url.
:param query_params: Query parameters in the url.
:param header_params: Header parameters to be
placed in the request header.
:param body: Request body.
:param post_params dict: Request post form parameters,
for `application/x-www-form-urlencoded`, `multipart/form-data`.
:param auth_settings list: Auth Settings names for the request.
:param response: Response data type.
:param files dict: key -> filename, value -> filepath,
for `multipart/form-data`.
:param async_req bool: execute request asynchronously
:param _return_http_data_only: response data without head status code
and headers
:param collection_formats: dict of collection formats for path, query,
header, and post parameters.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param urlopen_kw: Additional parameters are passed to
:meth:`urllib3.request.RequestMethods.request`
:return:
If async_req parameter is True,
the request will be called asynchronously.
The method will return the request thread.
If parameter async_req is False or missing,
then the method will return the response directly.
"""
if not async_req:
return self.__call_api(resource_path, method,
path_params, query_params, header_params,
body, post_params, files,
response_type, auth_settings,
_return_http_data_only, collection_formats,
_preload_content, _request_timeout, urlopen_kw)
else:
thread = self.pool.apply_async(self.__call_api, (resource_path,
method, path_params, query_params,
header_params, body,
post_params, files,
response_type, auth_settings,
_return_http_data_only,
collection_formats,
_preload_content, _request_timeout, urlopen_kw))
return thread
def request(self, method, url, query_params=None, headers=None,
post_params=None, body=None, _preload_content=True,
_request_timeout=None, **urlopen_kw):
"""Make the HTTP request using RESTClient."""
if method == "GET":
return self.rest_client.GET(url,
query_params=query_params,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
headers=headers,
**urlopen_kw)
elif method == "HEAD":
return self.rest_client.HEAD(url,
query_params=query_params,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
headers=headers,
**urlopen_kw)
elif method == "OPTIONS":
return self.rest_client.OPTIONS(url,
query_params=query_params,
headers=headers,
post_params=post_params,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
body=body,
**urlopen_kw)
elif method == "POST":
return self.rest_client.POST(url,
query_params=query_params,
headers=headers,
post_params=post_params,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
body=body,
**urlopen_kw)
elif method == "PUT":
return self.rest_client.PUT(url,
query_params=query_params,
headers=headers,
post_params=post_params,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
body=body,
**urlopen_kw)
elif method == "PATCH":
return self.rest_client.PATCH(url,
query_params=query_params,
headers=headers,
post_params=post_params,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
body=body,
**urlopen_kw)
elif method == "DELETE":
return self.rest_client.DELETE(url,
query_params=query_params,
headers=headers,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
body=body,
**urlopen_kw)
else:
raise ValueError(
"http method must be `GET`, `HEAD`, `OPTIONS`,"
" `POST`, `PATCH`, `PUT` or `DELETE`."
)
def parameters_to_tuples(self, params, collection_formats):
"""Get parameters as list of tuples, formatting collections.
:param params: Parameters as dict or list of two-tuples
:param dict collection_formats: Parameter collection formats
:return: Parameters as list of tuples, collections formatted
"""
new_params = []
if collection_formats is None:
collection_formats = {}
for k, v in params.items() if isinstance(params, dict) else params: # noqa: E501
if k in collection_formats:
collection_format = collection_formats[k]
if collection_format == 'multi':
new_params.extend((k, value) for value in v)
else:
if collection_format == 'ssv':
delimiter = ' '
elif collection_format == 'tsv':
delimiter = '\t'
elif collection_format == 'pipes':
delimiter = '|'
else: # csv is the default
delimiter = ','
new_params.append(
(k, delimiter.join(str(value) for value in v)))
else:
new_params.append((k, v))
return new_params
def prepare_post_parameters(self, post_params=None, files=None):
"""Build form parameters.
:param post_params: Normal form parameters.
:param files: File parameters.
:return: Form parameters with files.
"""
params = []
if post_params:
params = post_params
if files:
for k, v in files.items():
if not v:
continue
file_names = v if type(v) is list else [v]
for n in file_names:
with open(n, 'rb') as f:
filename = os.path.basename(f.name)
filedata = f.read()
mimetype = (mimetypes.guess_type(filename)[0] or
'application/octet-stream')
params.append(
tuple([k, tuple([filename, filedata, mimetype])]))
return params
def select_header_accept(self, accepts):
"""Return `Accept` based on an array of accepts provided.
:param accepts: List of headers.
:return: Accept (e.g. application/json).
"""
if not accepts:
return
accepts = [x.lower() for x in accepts]
if 'application/json' in accepts:
return 'application/json'
else:
return ', '.join(accepts)
def select_header_content_type(self, content_types):
"""Return `Content-Type` based on an array of content_types provided.
:param content_types: List of content-types.
:return: Content-Type (e.g. application/json).
"""
if not content_types:
return 'application/json'
content_types = [x.lower() for x in content_types]
if 'application/json' in content_types or '*/*' in content_types:
return 'application/json'
else:
return content_types[0]
def update_params_for_auth(self, headers, querys, auth_settings):
"""Update header and query params based on authentication setting.
:param headers: Header parameters dict to be updated.
:param querys: Query parameters tuple list to be updated.
:param auth_settings: Authentication setting identifiers list.
"""
if not auth_settings:
return
for auth in auth_settings:
auth_setting = self.configuration.auth_settings().get(auth)
if auth_setting:
if not auth_setting['value']:
continue
elif auth_setting['in'] == 'header':
headers[auth_setting['key']] = auth_setting['value']
elif auth_setting['in'] == 'query':
querys.append((auth_setting['key'], auth_setting['value']))
else:
raise ValueError(
'Authentication token must be in `query` or `header`'
)
def __deserialize_file(self, response):
"""Deserializes body to file.
Saves response body into a file in a temporary folder,
using the filename from the `Content-Disposition` header if provided.
:param response: RESTResponse.
:return: file path.
"""
fd, path = tempfile.mkstemp(dir=self.configuration.temp_folder_path)
os.close(fd)
os.remove(path)
content_disposition = response.getheader("Content-Disposition")
if content_disposition:
filename = re.search(r'filename=[\'"]?([^\'"\s]+)[\'"]?',
content_disposition).group(1)
path = os.path.join(os.path.dirname(path), filename)
with open(path, "wb") as f:
f.write(response.data)
return path
def __deserialize_primitive(self, data, klass):
"""Deserializes string to primitive type.
:param data: str.
:param klass: class literal.
:return: int, long, float, str, bool.
"""
try:
return klass(data)
except UnicodeEncodeError:
return str(data)
except TypeError:
return data
def __deserialize_object(self, value):
"""Return an original value.
:return: object.
"""
return value
def __deserialize_date(self, string):
"""Deserializes string to date.
:param string: str.
:return: date.
"""
try:
from dateutil.parser import parse
return parse(string).date()
except ImportError:
return string
except ValueError:
raise rest.ApiException(
status=0,
reason="Failed to parse `{0}` as date object".format(string)
)
def __deserialize_datatime(self, string):
"""Deserializes string to datetime.
The string should be in iso8601 datetime format.
:param string: str.
:return: datetime.
"""
try:
from dateutil.parser import parse
return parse(string)
except ImportError:
return string
except ValueError:
raise rest.ApiException(
status=0,
reason=(
"Failed to parse `{0}` as datetime object"
.format(string)
)
)
def __deserialize_model(self, data, klass):
"""Deserializes list or dict to model.
:param data: dict, list.
:param klass: class literal.
:return: model object.
"""
if not klass.openapi_types and not hasattr(klass,
'get_real_child_model'):
return data
kwargs = {}
if klass.openapi_types is not None:
for attr, attr_type in klass.openapi_types.items():
if (data is not None and
klass.attribute_map[attr] in data and
isinstance(data, (list, dict))):
value = data[klass.attribute_map[attr]]
kwargs[attr] = self.__deserialize(value, attr_type)
instance = klass(**kwargs)
if hasattr(instance, 'get_real_child_model'):
klass_name = instance.get_real_child_model(data)
if klass_name:
instance = self.__deserialize(data, klass_name)
return instance
async def _signin(self, resource_path: str):
if _requires_create_user_session(self.configuration, self.cookie, resource_path):
http_info = await SigninService(self).post_signin_async(_return_http_data_only=False)
self.cookie = http_info[2]['set-cookie']
async def _signout(self):
if _requires_expire_user_session(self.configuration, self.cookie):
await SignoutService(self).post_signout_async()
self.cookie = None

View File

@@ -0,0 +1,309 @@
"""
InfluxDB OSS API Service.
The InfluxDB v2 API provides a programmatic interface for all interactions with InfluxDB. Access the InfluxDB API using the `/api/v2/` endpoint. # noqa: E501
OpenAPI spec version: 2.0.0
Generated by: https://openapi-generator.tech
"""
import io
import json
import re
import ssl
from urllib.parse import urlencode
import aiohttp
from influxdb_client.rest import ApiException
from influxdb_client.rest import _BaseRESTClient
from influxdb_client.rest import _UTF_8_encoding
async def _on_request_start(session, trace_config_ctx, params):
_BaseRESTClient.log_request(params.method, params.url)
_BaseRESTClient.log_headers(params.headers, '>>>')
async def _on_request_chunk_sent(session, context, params):
if params.chunk:
_BaseRESTClient.log_body(params.chunk, '>>>')
async def _on_request_end(session, trace_config_ctx, params):
_BaseRESTClient.log_response(params.response.status)
_BaseRESTClient.log_headers(params.headers, '<<<')
response_content = params.response.content
data = bytearray()
while True:
chunk = await response_content.read(100)
if not chunk:
break
data += chunk
if data:
_BaseRESTClient.log_body(data.decode(_UTF_8_encoding), '<<<')
response_content.unread_data(data=data)
class RESTResponseAsync(io.IOBase):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
def __init__(self, resp, data):
"""Initialize with HTTP response."""
self.aiohttp_response = resp
self.status = resp.status
self.reason = resp.reason
self.data = data
def getheaders(self):
"""Return a CIMultiDictProxy of the response headers."""
return self.aiohttp_response.headers
def getheader(self, name, default=None):
"""Return a given response header."""
return self.aiohttp_response.headers.get(name, default)
class RESTClientObjectAsync(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
def __init__(self, configuration, pools_size=4, maxsize=None, **kwargs):
"""Initialize REST client."""
# maxsize is number of requests to host that are allowed in parallel
if maxsize is None:
maxsize = configuration.connection_pool_maxsize
if configuration.ssl_context is None:
ssl_context = ssl.create_default_context(cafile=configuration.ssl_ca_cert)
if configuration.cert_file:
ssl_context.load_cert_chain(
certfile=configuration.cert_file, keyfile=configuration.cert_key_file,
password=configuration.cert_key_password
)
if not configuration.verify_ssl:
ssl_context.check_hostname = False
ssl_context.verify_mode = ssl.CERT_NONE
else:
ssl_context = configuration.ssl_context
connector = aiohttp.TCPConnector(
limit=maxsize,
ssl=ssl_context
)
self.proxy = configuration.proxy
self.proxy_headers = configuration.proxy_headers
self.allow_redirects = kwargs.get('allow_redirects', True)
self.max_redirects = kwargs.get('max_redirects', 10)
# configure tracing
trace_config = aiohttp.TraceConfig()
trace_config.on_request_start.append(_on_request_start)
trace_config.on_request_chunk_sent.append(_on_request_chunk_sent)
trace_config.on_request_end.append(_on_request_end)
# timeout
if isinstance(configuration.timeout, (int, float,)): # noqa: E501,F821
timeout = aiohttp.ClientTimeout(total=configuration.timeout / 1_000)
elif isinstance(configuration.timeout, aiohttp.ClientTimeout):
timeout = configuration.timeout
else:
timeout = aiohttp.client.DEFAULT_TIMEOUT
# https pool manager
_client_session_type = kwargs.get('client_session_type', aiohttp.ClientSession)
_client_session_kwargs = kwargs.get('client_session_kwargs', {})
self.pool_manager = _client_session_type(
connector=connector,
timeout=timeout,
trace_configs=[trace_config] if configuration.debug else None,
**_client_session_kwargs
)
async def close(self):
"""Dispose connection pool manager."""
await self.pool_manager.close()
async def request(self, method, url, query_params=None, headers=None,
body=None, post_params=None, _preload_content=True,
_request_timeout=None):
"""Execute request.
:param method: http request method
:param url: http request url
:param query_params: query parameters in the url
:param headers: http request headers
:param body: request json body, for `application/json`
:param post_params: request post parameters,
`application/x-www-form-urlencoded`
and `multipart/form-data`
:param _preload_content: this is a non-applicable field for
the AiohttpClient.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
"""
method = method.upper()
assert method in ['GET', 'HEAD', 'DELETE', 'POST', 'PUT',
'PATCH', 'OPTIONS']
if post_params and body:
raise ValueError(
"body parameter cannot be used with post_params parameter."
)
post_params = post_params or {}
headers = headers or {}
if 'Content-Type' not in headers:
headers['Content-Type'] = 'application/json'
args = {
"method": method,
"url": url,
"headers": headers,
"allow_redirects": self.allow_redirects,
"max_redirects": self.max_redirects
}
if self.proxy:
args["proxy"] = self.proxy
if self.proxy_headers:
args["proxy_headers"] = self.proxy_headers
if query_params:
args["url"] += '?' + urlencode(query_params)
# For `POST`, `PUT`, `PATCH`, `OPTIONS`, `DELETE`
if method in ['POST', 'PUT', 'PATCH', 'OPTIONS', 'DELETE']:
if re.search('json', headers['Content-Type'], re.IGNORECASE):
if body is not None:
body = json.dumps(body)
args["data"] = body
elif headers['Content-Type'] == 'application/x-www-form-urlencoded': # noqa: E501
args["data"] = aiohttp.FormData(post_params)
elif headers['Content-Type'] == 'multipart/form-data':
# must del headers['Content-Type'], or the correct
# Content-Type which generated by aiohttp
del headers['Content-Type']
data = aiohttp.FormData()
for param in post_params:
k, v = param
if isinstance(v, tuple) and len(v) == 3:
data.add_field(k,
value=v[1],
filename=v[0],
content_type=v[2])
else:
data.add_field(k, v)
args["data"] = data
# Pass a `bytes` parameter directly in the body to support
# other content types than Json when `body` argument is provided
# in serialized form
elif isinstance(body, bytes):
args["data"] = body
else:
# Cannot generate the request from given parameters
msg = """Cannot prepare a request message for provided
arguments. Please check that your arguments match
declared content type."""
raise ApiException(status=0, reason=msg)
r = await self.pool_manager.request(**args)
if _preload_content:
data = await r.read()
r = RESTResponseAsync(r, data)
if not 200 <= r.status <= 299:
raise ApiException(http_resp=r)
return r
async def GET(self, url, headers=None, query_params=None,
_preload_content=True, _request_timeout=None):
"""Perform GET HTTP request."""
return (await self.request("GET", url,
headers=headers,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
query_params=query_params))
async def HEAD(self, url, headers=None, query_params=None,
_preload_content=True, _request_timeout=None):
"""Perform HEAD HTTP request."""
return (await self.request("HEAD", url,
headers=headers,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
query_params=query_params))
async def OPTIONS(self, url, headers=None, query_params=None,
post_params=None, body=None, _preload_content=True,
_request_timeout=None):
"""Perform OPTIONS HTTP request."""
return (await self.request("OPTIONS", url,
headers=headers,
query_params=query_params,
post_params=post_params,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
body=body))
async def DELETE(self, url, headers=None, query_params=None, body=None,
_preload_content=True, _request_timeout=None):
"""Perform DELETE HTTP request."""
return (await self.request("DELETE", url,
headers=headers,
query_params=query_params,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
body=body))
async def POST(self, url, headers=None, query_params=None,
post_params=None, body=None, _preload_content=True,
_request_timeout=None):
"""Perform POST HTTP request."""
return (await self.request("POST", url,
headers=headers,
query_params=query_params,
post_params=post_params,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
body=body))
async def PUT(self, url, headers=None, query_params=None, post_params=None,
body=None, _preload_content=True, _request_timeout=None):
"""Perform PUT HTTP request."""
return (await self.request("PUT", url,
headers=headers,
query_params=query_params,
post_params=post_params,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
body=body))
async def PATCH(self, url, headers=None, query_params=None,
post_params=None, body=None, _preload_content=True,
_request_timeout=None):
"""Perform PATCH HTTP request."""
return (await self.request("PATCH", url,
headers=headers,
query_params=query_params,
post_params=post_params,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
body=body))

View File

@@ -0,0 +1 @@
"""Synchronous REST APIs."""

View File

@@ -0,0 +1,663 @@
# coding: utf-8
"""
InfluxDB OSS API Service.
The InfluxDB v2 API provides a programmatic interface for all interactions with InfluxDB. Access the InfluxDB API using the `/api/v2/` endpoint. # noqa: E501
OpenAPI spec version: 2.0.0
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import datetime
import json
import mimetypes
import os
import re
import tempfile
from multiprocessing.pool import ThreadPool
from urllib.parse import quote
import influxdb_client.domain
from influxdb_client import SigninService
from influxdb_client import SignoutService
from influxdb_client._sync import rest
from influxdb_client.configuration import Configuration
from influxdb_client.rest import _requires_create_user_session, _requires_expire_user_session
class ApiClient(object):
"""Generic API client for OpenAPI client library Build.
OpenAPI generic API client. This client handles the client-
server communication, and is invariant across implementations. Specifics of
the methods and models for each application are generated from the OpenAPI
templates.
NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
:param configuration: .Configuration object for this client
:param header_name: a header to pass when making calls to the API.
:param header_value: a header value to pass when making calls to
the API.
:param cookie: a cookie to include in the header when making calls
to the API
:param pool_threads: The number of threads to use for async requests
to the API. More threads means more concurrent API requests.
"""
PRIMITIVE_TYPES = (float, bool, bytes, str, int)
NATIVE_TYPES_MAPPING = {
'int': int,
'long': int,
'float': float,
'str': str,
'bool': bool,
'date': datetime.date,
'datetime': datetime.datetime,
'object': object,
}
_pool = None
def __init__(self, configuration=None, header_name=None, header_value=None,
cookie=None, pool_threads=None, retries=False):
"""Initialize generic API client."""
if configuration is None:
configuration = Configuration()
self.configuration = configuration
self.pool_threads = pool_threads
self.rest_client = rest.RESTClientObject(configuration, retries=retries)
self.default_headers = {}
if header_name is not None:
self.default_headers[header_name] = header_value
self.cookie = cookie
# Set default User-Agent.
from influxdb_client import VERSION
self.user_agent = f'influxdb-client-python/{VERSION}'
def __del__(self):
"""Dispose pools."""
self._signout()
if self._pool:
self._pool.close()
self._pool.join()
self._pool = None
if self.rest_client and self.rest_client.pool_manager and hasattr(self.rest_client.pool_manager, 'clear'):
self.rest_client.pool_manager.clear()
@property
def pool(self):
"""Create thread pool on first request avoids instantiating unused threadpool for blocking clients."""
if self._pool is None:
self._pool = ThreadPool(self.pool_threads)
return self._pool
@property
def user_agent(self):
"""User agent for this API client."""
return self.default_headers['User-Agent']
@user_agent.setter
def user_agent(self, value):
"""Set User agent for this API client."""
self.default_headers['User-Agent'] = value
def set_default_header(self, header_name, header_value):
"""Set HTTP header for this API client."""
self.default_headers[header_name] = header_value
def __call_api(
self, resource_path, method, path_params=None,
query_params=None, header_params=None, body=None, post_params=None,
files=None, response_type=None, auth_settings=None,
_return_http_data_only=None, collection_formats=None,
_preload_content=True, _request_timeout=None, urlopen_kw=None):
config = self.configuration
self._signin(resource_path=resource_path)
# header parameters
header_params = header_params or {}
config.update_request_header_params(resource_path, header_params)
header_params.update(self.default_headers)
if self.cookie:
header_params['Cookie'] = self.cookie
if header_params:
header_params = self.sanitize_for_serialization(header_params)
header_params = dict(self.parameters_to_tuples(header_params,
collection_formats))
# path parameters
if path_params:
path_params = self.sanitize_for_serialization(path_params)
path_params = self.parameters_to_tuples(path_params,
collection_formats)
for k, v in path_params:
# specified safe chars, encode everything
resource_path = resource_path.replace(
'{%s}' % k,
quote(str(v), safe=config.safe_chars_for_path_param)
)
# query parameters
if query_params:
query_params = self.sanitize_for_serialization(query_params)
query_params = self.parameters_to_tuples(query_params,
collection_formats)
# post parameters
if post_params or files:
post_params = self.prepare_post_parameters(post_params, files)
post_params = self.sanitize_for_serialization(post_params)
post_params = self.parameters_to_tuples(post_params,
collection_formats)
# auth setting
self.update_params_for_auth(header_params, query_params, auth_settings)
# body
if body:
body = self.sanitize_for_serialization(body)
body = config.update_request_body(resource_path, body)
# request url
url = self.configuration.host + resource_path
urlopen_kw = urlopen_kw or {}
# perform request and return response
response_data = self.request(
method, url, query_params=query_params, headers=header_params,
post_params=post_params, body=body,
_preload_content=_preload_content,
_request_timeout=_request_timeout, **urlopen_kw)
self.last_response = response_data
return_data = response_data
if _preload_content:
# deserialize response data
if response_type:
return_data = self.deserialize(response_data, response_type)
else:
return_data = None
if _return_http_data_only:
return (return_data)
else:
return (return_data, response_data.status,
response_data.getheaders())
def sanitize_for_serialization(self, obj):
"""Build a JSON POST object.
If obj is None, return None.
If obj is str, int, long, float, bool, return directly.
If obj is datetime.datetime, datetime.date
convert to string in iso8601 format.
If obj is list, sanitize each element in the list.
If obj is dict, return the dict.
If obj is OpenAPI model, return the properties dict.
:param obj: The data to serialize.
:return: The serialized form of data.
"""
if obj is None:
return None
elif isinstance(obj, self.PRIMITIVE_TYPES):
return obj
elif isinstance(obj, list):
return [self.sanitize_for_serialization(sub_obj)
for sub_obj in obj]
elif isinstance(obj, tuple):
return tuple(self.sanitize_for_serialization(sub_obj)
for sub_obj in obj)
elif isinstance(obj, (datetime.datetime, datetime.date)):
return obj.isoformat()
if isinstance(obj, dict):
obj_dict = obj
else:
# Convert model obj to dict except
# attributes `openapi_types`, `attribute_map`
# and attributes which value is not None.
# Convert attribute name to json key in
# model definition for request.
obj_dict = {obj.attribute_map[attr]: getattr(obj, attr)
for attr, _ in obj.openapi_types.items()
if getattr(obj, attr) is not None}
return {key: self.sanitize_for_serialization(val)
for key, val in obj_dict.items()}
def deserialize(self, response, response_type):
"""Deserializes response into an object.
:param response: RESTResponse object to be deserialized.
:param response_type: class literal for
deserialized object, or string of class name.
:return: deserialized object.
"""
# handle file downloading
# save response body into a tmp file and return the instance
if response_type == "file":
return self.__deserialize_file(response)
# fetch data from response object
try:
data = json.loads(response.data)
except ValueError:
data = response.data
return self.__deserialize(data, response_type)
def __deserialize(self, data, klass):
"""Deserializes dict, list, str into an object.
:param data: dict, list or str.
:param klass: class literal, or string of class name.
:return: object.
"""
if data is None:
return None
if type(klass) == str:
if klass.startswith('list['):
sub_kls = re.match(r'list\[(.*)\]', klass).group(1)
return [self.__deserialize(sub_data, sub_kls)
for sub_data in data]
if klass.startswith('dict('):
sub_kls = re.match(r'dict\(([^,]*), (.*)\)', klass).group(2)
return {k: self.__deserialize(v, sub_kls)
for k, v in data.items()}
# convert str to class
if klass in self.NATIVE_TYPES_MAPPING:
klass = self.NATIVE_TYPES_MAPPING[klass]
else:
klass = getattr(influxdb_client.domain, klass)
if klass in self.PRIMITIVE_TYPES:
return self.__deserialize_primitive(data, klass)
elif klass == object:
return self.__deserialize_object(data)
elif klass == datetime.date:
return self.__deserialize_date(data)
elif klass == datetime.datetime:
return self.__deserialize_datatime(data)
else:
return self.__deserialize_model(data, klass)
def call_api(self, resource_path, method,
path_params=None, query_params=None, header_params=None,
body=None, post_params=None, files=None,
response_type=None, auth_settings=None, async_req=None,
_return_http_data_only=None, collection_formats=None,
_preload_content=True, _request_timeout=None, urlopen_kw=None):
"""Make the HTTP request (synchronous) and Return deserialized data.
To make an async_req request, set the async_req parameter.
:param resource_path: Path to method endpoint.
:param method: Method to call.
:param path_params: Path parameters in the url.
:param query_params: Query parameters in the url.
:param header_params: Header parameters to be
placed in the request header.
:param body: Request body.
:param post_params dict: Request post form parameters,
for `application/x-www-form-urlencoded`, `multipart/form-data`.
:param auth_settings list: Auth Settings names for the request.
:param response: Response data type.
:param files dict: key -> filename, value -> filepath,
for `multipart/form-data`.
:param async_req bool: execute request asynchronously
:param _return_http_data_only: response data without head status code
and headers
:param collection_formats: dict of collection formats for path, query,
header, and post parameters.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param urlopen_kw: Additional parameters are passed to
:meth:`urllib3.request.RequestMethods.request`
:return:
If async_req parameter is True,
the request will be called asynchronously.
The method will return the request thread.
If parameter async_req is False or missing,
then the method will return the response directly.
"""
if not async_req:
return self.__call_api(resource_path, method,
path_params, query_params, header_params,
body, post_params, files,
response_type, auth_settings,
_return_http_data_only, collection_formats,
_preload_content, _request_timeout, urlopen_kw)
else:
thread = self.pool.apply_async(self.__call_api, (resource_path,
method, path_params, query_params,
header_params, body,
post_params, files,
response_type, auth_settings,
_return_http_data_only,
collection_formats,
_preload_content, _request_timeout, urlopen_kw))
return thread
def request(self, method, url, query_params=None, headers=None,
post_params=None, body=None, _preload_content=True,
_request_timeout=None, **urlopen_kw):
"""Make the HTTP request using RESTClient."""
if method == "GET":
return self.rest_client.GET(url,
query_params=query_params,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
headers=headers,
**urlopen_kw)
elif method == "HEAD":
return self.rest_client.HEAD(url,
query_params=query_params,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
headers=headers,
**urlopen_kw)
elif method == "OPTIONS":
return self.rest_client.OPTIONS(url,
query_params=query_params,
headers=headers,
post_params=post_params,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
body=body,
**urlopen_kw)
elif method == "POST":
return self.rest_client.POST(url,
query_params=query_params,
headers=headers,
post_params=post_params,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
body=body,
**urlopen_kw)
elif method == "PUT":
return self.rest_client.PUT(url,
query_params=query_params,
headers=headers,
post_params=post_params,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
body=body,
**urlopen_kw)
elif method == "PATCH":
return self.rest_client.PATCH(url,
query_params=query_params,
headers=headers,
post_params=post_params,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
body=body,
**urlopen_kw)
elif method == "DELETE":
return self.rest_client.DELETE(url,
query_params=query_params,
headers=headers,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
body=body,
**urlopen_kw)
else:
raise ValueError(
"http method must be `GET`, `HEAD`, `OPTIONS`,"
" `POST`, `PATCH`, `PUT` or `DELETE`."
)
def parameters_to_tuples(self, params, collection_formats):
"""Get parameters as list of tuples, formatting collections.
:param params: Parameters as dict or list of two-tuples
:param dict collection_formats: Parameter collection formats
:return: Parameters as list of tuples, collections formatted
"""
new_params = []
if collection_formats is None:
collection_formats = {}
for k, v in params.items() if isinstance(params, dict) else params: # noqa: E501
if k in collection_formats:
collection_format = collection_formats[k]
if collection_format == 'multi':
new_params.extend((k, value) for value in v)
else:
if collection_format == 'ssv':
delimiter = ' '
elif collection_format == 'tsv':
delimiter = '\t'
elif collection_format == 'pipes':
delimiter = '|'
else: # csv is the default
delimiter = ','
new_params.append(
(k, delimiter.join(str(value) for value in v)))
else:
new_params.append((k, v))
return new_params
def prepare_post_parameters(self, post_params=None, files=None):
"""Build form parameters.
:param post_params: Normal form parameters.
:param files: File parameters.
:return: Form parameters with files.
"""
params = []
if post_params:
params = post_params
if files:
for k, v in files.items():
if not v:
continue
file_names = v if type(v) is list else [v]
for n in file_names:
with open(n, 'rb') as f:
filename = os.path.basename(f.name)
filedata = f.read()
mimetype = (mimetypes.guess_type(filename)[0] or
'application/octet-stream')
params.append(
tuple([k, tuple([filename, filedata, mimetype])]))
return params
def select_header_accept(self, accepts):
"""Return `Accept` based on an array of accepts provided.
:param accepts: List of headers.
:return: Accept (e.g. application/json).
"""
if not accepts:
return
accepts = [x.lower() for x in accepts]
if 'application/json' in accepts:
return 'application/json'
else:
return ', '.join(accepts)
def select_header_content_type(self, content_types):
"""Return `Content-Type` based on an array of content_types provided.
:param content_types: List of content-types.
:return: Content-Type (e.g. application/json).
"""
if not content_types:
return 'application/json'
content_types = [x.lower() for x in content_types]
if 'application/json' in content_types or '*/*' in content_types:
return 'application/json'
else:
return content_types[0]
def update_params_for_auth(self, headers, querys, auth_settings):
"""Update header and query params based on authentication setting.
:param headers: Header parameters dict to be updated.
:param querys: Query parameters tuple list to be updated.
:param auth_settings: Authentication setting identifiers list.
"""
if not auth_settings:
return
for auth in auth_settings:
auth_setting = self.configuration.auth_settings().get(auth)
if auth_setting:
if not auth_setting['value']:
continue
elif auth_setting['in'] == 'header':
headers[auth_setting['key']] = auth_setting['value']
elif auth_setting['in'] == 'query':
querys.append((auth_setting['key'], auth_setting['value']))
else:
raise ValueError(
'Authentication token must be in `query` or `header`'
)
def __deserialize_file(self, response):
"""Deserializes body to file.
Saves response body into a file in a temporary folder,
using the filename from the `Content-Disposition` header if provided.
:param response: RESTResponse.
:return: file path.
"""
fd, path = tempfile.mkstemp(dir=self.configuration.temp_folder_path)
os.close(fd)
os.remove(path)
content_disposition = response.getheader("Content-Disposition")
if content_disposition:
filename = re.search(r'filename=[\'"]?([^\'"\s]+)[\'"]?',
content_disposition).group(1)
path = os.path.join(os.path.dirname(path), filename)
with open(path, "wb") as f:
f.write(response.data)
return path
def __deserialize_primitive(self, data, klass):
"""Deserializes string to primitive type.
:param data: str.
:param klass: class literal.
:return: int, long, float, str, bool.
"""
try:
return klass(data)
except UnicodeEncodeError:
return str(data)
except TypeError:
return data
def __deserialize_object(self, value):
"""Return an original value.
:return: object.
"""
return value
def __deserialize_date(self, string):
"""Deserializes string to date.
:param string: str.
:return: date.
"""
try:
from dateutil.parser import parse
return parse(string).date()
except ImportError:
return string
except ValueError:
raise rest.ApiException(
status=0,
reason="Failed to parse `{0}` as date object".format(string)
)
def __deserialize_datatime(self, string):
"""Deserializes string to datetime.
The string should be in iso8601 datetime format.
:param string: str.
:return: datetime.
"""
try:
from dateutil.parser import parse
return parse(string)
except ImportError:
return string
except ValueError:
raise rest.ApiException(
status=0,
reason=(
"Failed to parse `{0}` as datetime object"
.format(string)
)
)
def __deserialize_model(self, data, klass):
"""Deserializes list or dict to model.
:param data: dict, list.
:param klass: class literal.
:return: model object.
"""
if not klass.openapi_types and not hasattr(klass,
'get_real_child_model'):
return data
kwargs = {}
if klass.openapi_types is not None:
for attr, attr_type in klass.openapi_types.items():
if (data is not None and
klass.attribute_map[attr] in data and
isinstance(data, (list, dict))):
value = data[klass.attribute_map[attr]]
kwargs[attr] = self.__deserialize(value, attr_type)
instance = klass(**kwargs)
if hasattr(instance, 'get_real_child_model'):
klass_name = instance.get_real_child_model(data)
if klass_name:
instance = self.__deserialize(data, klass_name)
return instance
def _signin(self, resource_path: str):
if _requires_create_user_session(self.configuration, self.cookie, resource_path):
http_info = SigninService(self).post_signin_with_http_info()
self.cookie = http_info[2]['set-cookie']
def _signout(self):
if _requires_expire_user_session(self.configuration, self.cookie):
SignoutService(self).post_signout()
self.cookie = None

View File

@@ -0,0 +1,355 @@
# coding: utf-8
"""
InfluxDB OSS API Service.
The InfluxDB v2 API provides a programmatic interface for all interactions with InfluxDB. Access the InfluxDB API using the `/api/v2/` endpoint. # noqa: E501
OpenAPI spec version: 2.0.0
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import io
import json
import re
import ssl
from urllib.parse import urlencode
from influxdb_client.rest import ApiException
from influxdb_client.rest import _BaseRESTClient
try:
import urllib3
except ImportError:
raise ImportError('OpenAPI Python client requires urllib3.')
class RESTResponse(io.IOBase):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
def __init__(self, resp):
"""Initialize with HTTP response."""
self.urllib3_response = resp
self.status = resp.status
self.reason = resp.reason
self.data = resp.data
def getheaders(self):
"""Return a dictionary of the response headers."""
return self.urllib3_response.headers
def getheader(self, name, default=None):
"""Return a given response header."""
return self.urllib3_response.headers.get(name, default)
class RESTClientObject(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
def __init__(self, configuration, pools_size=4, maxsize=None, retries=False):
"""Initialize REST client."""
# urllib3.PoolManager will pass all kw parameters to connectionpool
# https://github.com/shazow/urllib3/blob/f9409436f83aeb79fbaf090181cd81b784f1b8ce/urllib3/poolmanager.py#L75 # noqa: E501
# https://github.com/shazow/urllib3/blob/f9409436f83aeb79fbaf090181cd81b784f1b8ce/urllib3/connectionpool.py#L680 # noqa: E501
# maxsize is the number of requests to host that are allowed in parallel # noqa: E501
# Custom SSL certificates and client certificates: http://urllib3.readthedocs.io/en/latest/advanced-usage.html # noqa: E501
self.configuration = configuration
self.pools_size = pools_size
self.maxsize = maxsize
self.retries = retries
# cert_reqs
if configuration.verify_ssl:
cert_reqs = ssl.CERT_REQUIRED
else:
cert_reqs = ssl.CERT_NONE
# ca_certs
if configuration.ssl_ca_cert:
ca_certs = configuration.ssl_ca_cert
else:
ca_certs = None
addition_pool_args = {}
if configuration.assert_hostname is not None:
addition_pool_args['assert_hostname'] = configuration.assert_hostname # noqa: E501
addition_pool_args['retries'] = self.retries
if maxsize is None:
if configuration.connection_pool_maxsize is not None:
maxsize = configuration.connection_pool_maxsize
else:
maxsize = 4
# https pool manager
if configuration.proxy:
self.pool_manager = urllib3.ProxyManager(
num_pools=pools_size,
maxsize=maxsize,
cert_reqs=cert_reqs,
ca_certs=ca_certs,
cert_file=configuration.cert_file,
key_file=configuration.cert_key_file,
key_password=configuration.cert_key_password,
proxy_url=configuration.proxy,
proxy_headers=configuration.proxy_headers,
ssl_context=configuration.ssl_context,
**addition_pool_args
)
else:
self.pool_manager = urllib3.PoolManager(
num_pools=pools_size,
maxsize=maxsize,
cert_reqs=cert_reqs,
ca_certs=ca_certs,
cert_file=configuration.cert_file,
key_file=configuration.cert_key_file,
key_password=configuration.cert_key_password,
ssl_context=configuration.ssl_context,
**addition_pool_args
)
def request(self, method, url, query_params=None, headers=None,
body=None, post_params=None, _preload_content=True,
_request_timeout=None, **urlopen_kw):
"""Perform requests.
:param method: http request method
:param url: http request url
:param query_params: query parameters in the url
:param headers: http request headers
:param body: request json body, for `application/json`
:param post_params: request post parameters,
`application/x-www-form-urlencoded`
and `multipart/form-data`
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param urlopen_kw: Additional parameters are passed to
:meth:`urllib3.request.RequestMethods.request`
"""
method = method.upper()
assert method in ['GET', 'HEAD', 'DELETE', 'POST', 'PUT',
'PATCH', 'OPTIONS']
if post_params and body:
raise ValueError(
"body parameter cannot be used with post_params parameter."
)
post_params = post_params or {}
headers = headers or {}
timeout = None
_configured_timeout = _request_timeout or self.configuration.timeout
if _configured_timeout:
if isinstance(_configured_timeout, (int, float, )): # noqa: E501,F821
timeout = urllib3.Timeout(total=_configured_timeout / 1_000)
elif (isinstance(_configured_timeout, tuple) and
len(_configured_timeout) == 2):
timeout = urllib3.Timeout(
connect=_configured_timeout[0] / 1_000, read=_configured_timeout[1] / 1_000)
if 'Content-Type' not in headers:
headers['Content-Type'] = 'application/json'
if self.configuration.debug:
_BaseRESTClient.log_request(method, f"{url}{'' if query_params is None else '?' + urlencode(query_params)}")
_BaseRESTClient.log_headers(headers, '>>>')
_BaseRESTClient.log_body(body, '>>>')
try:
# For `POST`, `PUT`, `PATCH`, `OPTIONS`, `DELETE`
if method in ['POST', 'PUT', 'PATCH', 'OPTIONS', 'DELETE']:
if query_params:
url += '?' + urlencode(query_params)
if re.search('json', headers['Content-Type'], re.IGNORECASE):
request_body = None
if body is not None:
request_body = json.dumps(body)
r = self.pool_manager.request(
method, url,
body=request_body,
preload_content=_preload_content,
timeout=timeout,
headers=headers,
**urlopen_kw)
elif headers['Content-Type'] == 'application/x-www-form-urlencoded': # noqa: E501
r = self.pool_manager.request(
method, url,
fields=post_params,
encode_multipart=False,
preload_content=_preload_content,
timeout=timeout,
headers=headers,
**urlopen_kw)
elif headers['Content-Type'] == 'multipart/form-data':
# must del headers['Content-Type'], or the correct
# Content-Type which generated by urllib3 will be
# overwritten.
del headers['Content-Type']
r = self.pool_manager.request(
method, url,
fields=post_params,
encode_multipart=True,
preload_content=_preload_content,
timeout=timeout,
headers=headers,
**urlopen_kw)
# Pass a `string` parameter directly in the body to support
# other content types than Json when `body` argument is
# provided in serialized form
elif isinstance(body, str) or isinstance(body, bytes):
request_body = body
r = self.pool_manager.request(
method, url,
body=request_body,
preload_content=_preload_content,
timeout=timeout,
headers=headers,
**urlopen_kw)
else:
# Cannot generate the request from given parameters
msg = """Cannot prepare a request message for provided
arguments. Please check that your arguments match
declared content type."""
raise ApiException(status=0, reason=msg)
# For `GET`, `HEAD`
else:
r = self.pool_manager.request(method, url,
fields=query_params,
preload_content=_preload_content,
timeout=timeout,
headers=headers,
**urlopen_kw)
except urllib3.exceptions.SSLError as e:
msg = "{0}\n{1}".format(type(e).__name__, str(e))
raise ApiException(status=0, reason=msg)
if _preload_content:
r = RESTResponse(r)
# In the python 3, the response.data is bytes.
# we need to decode it to string.
r.data = r.data.decode('utf8')
if self.configuration.debug:
_BaseRESTClient.log_response(r.status)
if hasattr(r, 'headers'):
_BaseRESTClient.log_headers(r.headers, '<<<')
if hasattr(r, 'urllib3_response'):
_BaseRESTClient.log_headers(r.urllib3_response.headers, '<<<')
_BaseRESTClient.log_body(r.data, '<<<')
if not 200 <= r.status <= 299:
raise ApiException(http_resp=r)
return r
def GET(self, url, headers=None, query_params=None, _preload_content=True,
_request_timeout=None, **urlopen_kw):
"""Perform GET HTTP request."""
return self.request("GET", url,
headers=headers,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
query_params=query_params,
**urlopen_kw)
def HEAD(self, url, headers=None, query_params=None, _preload_content=True,
_request_timeout=None, **urlopen_kw):
"""Perform HEAD HTTP request."""
return self.request("HEAD", url,
headers=headers,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
query_params=query_params,
**urlopen_kw)
def OPTIONS(self, url, headers=None, query_params=None, post_params=None,
body=None, _preload_content=True, _request_timeout=None, **urlopen_kw):
"""Perform OPTIONS HTTP request."""
return self.request("OPTIONS", url,
headers=headers,
query_params=query_params,
post_params=post_params,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
body=body,
**urlopen_kw)
def DELETE(self, url, headers=None, query_params=None, body=None,
_preload_content=True, _request_timeout=None, **urlopen_kw):
"""Perform DELETE HTTP request."""
return self.request("DELETE", url,
headers=headers,
query_params=query_params,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
body=body,
**urlopen_kw)
def POST(self, url, headers=None, query_params=None, post_params=None,
body=None, _preload_content=True, _request_timeout=None, **urlopen_kw):
"""Perform POST HTTP request."""
return self.request("POST", url,
headers=headers,
query_params=query_params,
post_params=post_params,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
body=body,
**urlopen_kw)
def PUT(self, url, headers=None, query_params=None, post_params=None,
body=None, _preload_content=True, _request_timeout=None, **urlopen_kw):
"""Perform PUT HTTP request."""
return self.request("PUT", url,
headers=headers,
query_params=query_params,
post_params=post_params,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
body=body,
**urlopen_kw)
def PATCH(self, url, headers=None, query_params=None, post_params=None,
body=None, _preload_content=True, _request_timeout=None, **urlopen_kw):
"""Perform PATCH HTTP request."""
return self.request("PATCH", url,
headers=headers,
query_params=query_params,
post_params=post_params,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
body=body,
**urlopen_kw)
def __getstate__(self):
"""Return a dict of attributes that you want to pickle."""
state = self.__dict__.copy()
# Remove Pool managaer
del state['pool_manager']
return state
def __setstate__(self, state):
"""Set your object with the provided dict."""
self.__dict__.update(state)
# Init Pool manager
self.__init__(self.configuration, self.pools_size, self.maxsize, self.retries)

View File

@@ -0,0 +1,56 @@
# flake8: noqa
"""
InfluxDB OSS API Service.
The InfluxDB v2 API provides a programmatic interface for all interactions with InfluxDB. Access the InfluxDB API using the `/api/v2/` endpoint. # noqa: E501
OpenAPI spec version: 2.0.0
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
# import apis into api package
from influxdb_client.service.authorizations_service import AuthorizationsService
from influxdb_client.service.backup_service import BackupService
from influxdb_client.service.bucket_schemas_service import BucketSchemasService
from influxdb_client.service.buckets_service import BucketsService
from influxdb_client.service.cells_service import CellsService
from influxdb_client.service.checks_service import ChecksService
from influxdb_client.service.config_service import ConfigService
from influxdb_client.service.dbr_ps_service import DBRPsService
from influxdb_client.service.dashboards_service import DashboardsService
from influxdb_client.service.delete_service import DeleteService
from influxdb_client.service.health_service import HealthService
from influxdb_client.service.invokable_scripts_service import InvokableScriptsService
from influxdb_client.service.labels_service import LabelsService
from influxdb_client.service.legacy_authorizations_service import LegacyAuthorizationsService
from influxdb_client.service.metrics_service import MetricsService
from influxdb_client.service.notification_endpoints_service import NotificationEndpointsService
from influxdb_client.service.notification_rules_service import NotificationRulesService
from influxdb_client.service.organizations_service import OrganizationsService
from influxdb_client.service.ping_service import PingService
from influxdb_client.service.query_service import QueryService
from influxdb_client.service.ready_service import ReadyService
from influxdb_client.service.remote_connections_service import RemoteConnectionsService
from influxdb_client.service.replications_service import ReplicationsService
from influxdb_client.service.resources_service import ResourcesService
from influxdb_client.service.restore_service import RestoreService
from influxdb_client.service.routes_service import RoutesService
from influxdb_client.service.rules_service import RulesService
from influxdb_client.service.scraper_targets_service import ScraperTargetsService
from influxdb_client.service.secrets_service import SecretsService
from influxdb_client.service.setup_service import SetupService
from influxdb_client.service.signin_service import SigninService
from influxdb_client.service.signout_service import SignoutService
from influxdb_client.service.sources_service import SourcesService
from influxdb_client.service.tasks_service import TasksService
from influxdb_client.service.telegraf_plugins_service import TelegrafPluginsService
from influxdb_client.service.telegrafs_service import TelegrafsService
from influxdb_client.service.templates_service import TemplatesService
from influxdb_client.service.users_service import UsersService
from influxdb_client.service.variables_service import VariablesService
from influxdb_client.service.views_service import ViewsService
from influxdb_client.service.write_service import WriteService

View File

@@ -0,0 +1,554 @@
"""Commons function for Sync and Async client."""
from __future__ import absolute_import
import base64
import configparser
import logging
import os
from datetime import datetime, timedelta
from typing import List, Generator, Any, Union, Iterable, AsyncGenerator
from urllib3 import HTTPResponse
from influxdb_client import Configuration, Dialect, Query, OptionStatement, VariableAssignment, Identifier, \
Expression, BooleanLiteral, IntegerLiteral, FloatLiteral, DateTimeLiteral, UnaryExpression, DurationLiteral, \
Duration, StringLiteral, ArrayExpression, ImportDeclaration, MemberExpression, MemberAssignment, File, \
WriteService, QueryService, DeleteService, DeletePredicateRequest
from influxdb_client.client.flux_csv_parser import FluxResponseMetadataMode, FluxCsvParser, FluxSerializationMode
from influxdb_client.client.flux_table import FluxRecord, TableList, CSVIterator
from influxdb_client.client.util.date_utils import get_date_helper
from influxdb_client.client.util.helpers import get_org_query_param
from influxdb_client.client.warnings import MissingPivotFunction
from influxdb_client.client.write.dataframe_serializer import DataframeSerializer
from influxdb_client.rest import _UTF_8_encoding
try:
import dataclasses
_HAS_DATACLASS = True
except ModuleNotFoundError:
_HAS_DATACLASS = False
LOGGERS_NAMES = [
'influxdb_client.client.influxdb_client',
'influxdb_client.client.influxdb_client_async',
'influxdb_client.client.write_api',
'influxdb_client.client.write_api_async',
'influxdb_client.client.write.retry',
'influxdb_client.client.write.dataframe_serializer',
'influxdb_client.client.util.multiprocessing_helper',
'influxdb_client.client.http',
'influxdb_client.client.exceptions',
]
# noinspection PyMethodMayBeStatic
class _BaseClient(object):
def __init__(self, url, token, debug=None, timeout=10_000, enable_gzip=False, org: str = None,
default_tags: dict = None, http_client_logger: str = None, **kwargs) -> None:
self.url = url
self.token = token
self.org = org
self.default_tags = default_tags
self.conf = _Configuration()
if self.url.endswith("/"):
self.conf.host = self.url[:-1]
else:
self.conf.host = self.url
self.conf.enable_gzip = enable_gzip
self.conf.verify_ssl = kwargs.get('verify_ssl', True)
self.conf.ssl_ca_cert = kwargs.get('ssl_ca_cert', None)
self.conf.cert_file = kwargs.get('cert_file', None)
self.conf.cert_key_file = kwargs.get('cert_key_file', None)
self.conf.cert_key_password = kwargs.get('cert_key_password', None)
self.conf.ssl_context = kwargs.get('ssl_context', None)
self.conf.proxy = kwargs.get('proxy', None)
self.conf.proxy_headers = kwargs.get('proxy_headers', None)
self.conf.connection_pool_maxsize = kwargs.get('connection_pool_maxsize', self.conf.connection_pool_maxsize)
self.conf.timeout = timeout
# logging
self.conf.loggers["http_client_logger"] = logging.getLogger(http_client_logger)
for client_logger in LOGGERS_NAMES:
self.conf.loggers[client_logger] = logging.getLogger(client_logger)
self.conf.debug = debug
self.conf.username = kwargs.get('username', None)
self.conf.password = kwargs.get('password', None)
# defaults
self.auth_header_name = None
self.auth_header_value = None
# by token
if self.token:
self.auth_header_name = "Authorization"
self.auth_header_value = "Token " + self.token
# by HTTP basic
auth_basic = kwargs.get('auth_basic', False)
if auth_basic:
self.auth_header_name = "Authorization"
self.auth_header_value = "Basic " + base64.b64encode(token.encode()).decode()
# by username, password
if self.conf.username and self.conf.password:
self.auth_header_name = None
self.auth_header_value = None
self.retries = kwargs.get('retries', False)
self.profilers = kwargs.get('profilers', None)
pass
@classmethod
def _from_config_file(cls, config_file: str = "config.ini", debug=None, enable_gzip=False, **kwargs):
config = configparser.ConfigParser()
config_name = kwargs.get('config_name', 'influx2')
is_json = False
try:
config.read(config_file)
except configparser.ParsingError:
with open(config_file) as json_file:
import json
config = json.load(json_file)
is_json = True
def _config_value(key: str):
value = str(config[key]) if is_json else config[config_name][key]
return value.strip('"')
def _has_option(key: str):
return key in config if is_json else config.has_option(config_name, key)
def _has_section(key: str):
return key in config if is_json else config.has_section(key)
url = _config_value('url')
token = _config_value('token')
timeout = None
if _has_option('timeout'):
timeout = _config_value('timeout')
org = None
if _has_option('org'):
org = _config_value('org')
verify_ssl = True
if _has_option('verify_ssl'):
verify_ssl = _config_value('verify_ssl')
ssl_ca_cert = None
if _has_option('ssl_ca_cert'):
ssl_ca_cert = _config_value('ssl_ca_cert')
cert_file = None
if _has_option('cert_file'):
cert_file = _config_value('cert_file')
cert_key_file = None
if _has_option('cert_key_file'):
cert_key_file = _config_value('cert_key_file')
cert_key_password = None
if _has_option('cert_key_password'):
cert_key_password = _config_value('cert_key_password')
connection_pool_maxsize = None
if _has_option('connection_pool_maxsize'):
connection_pool_maxsize = _config_value('connection_pool_maxsize')
auth_basic = False
if _has_option('auth_basic'):
auth_basic = _config_value('auth_basic')
default_tags = None
if _has_section('tags'):
if is_json:
default_tags = config['tags']
else:
tags = {k: v.strip('"') for k, v in config.items('tags')}
default_tags = dict(tags)
profilers = None
if _has_option('profilers'):
profilers = [x.strip() for x in _config_value('profilers').split(',')]
proxy = None
if _has_option('proxy'):
proxy = _config_value('proxy')
return cls(url, token, debug=debug, timeout=_to_int(timeout), org=org, default_tags=default_tags,
enable_gzip=enable_gzip, verify_ssl=_to_bool(verify_ssl), ssl_ca_cert=ssl_ca_cert,
cert_file=cert_file, cert_key_file=cert_key_file, cert_key_password=cert_key_password,
connection_pool_maxsize=_to_int(connection_pool_maxsize), auth_basic=_to_bool(auth_basic),
profilers=profilers, proxy=proxy, **kwargs)
@classmethod
def _from_env_properties(cls, debug=None, enable_gzip=False, **kwargs):
url = os.getenv('INFLUXDB_V2_URL', "http://localhost:8086")
token = os.getenv('INFLUXDB_V2_TOKEN', "my-token")
timeout = os.getenv('INFLUXDB_V2_TIMEOUT', "10000")
org = os.getenv('INFLUXDB_V2_ORG', "my-org")
verify_ssl = os.getenv('INFLUXDB_V2_VERIFY_SSL', "True")
ssl_ca_cert = os.getenv('INFLUXDB_V2_SSL_CA_CERT', None)
cert_file = os.getenv('INFLUXDB_V2_CERT_FILE', None)
cert_key_file = os.getenv('INFLUXDB_V2_CERT_KEY_FILE', None)
cert_key_password = os.getenv('INFLUXDB_V2_CERT_KEY_PASSWORD', None)
connection_pool_maxsize = os.getenv('INFLUXDB_V2_CONNECTION_POOL_MAXSIZE', None)
auth_basic = os.getenv('INFLUXDB_V2_AUTH_BASIC', "False")
prof = os.getenv("INFLUXDB_V2_PROFILERS", None)
profilers = None
if prof is not None:
profilers = [x.strip() for x in prof.split(',')]
default_tags = dict()
for key, value in os.environ.items():
if key.startswith("INFLUXDB_V2_TAG_"):
default_tags[key[16:].lower()] = value
return cls(url, token, debug=debug, timeout=_to_int(timeout), org=org, default_tags=default_tags,
enable_gzip=enable_gzip, verify_ssl=_to_bool(verify_ssl), ssl_ca_cert=ssl_ca_cert,
cert_file=cert_file, cert_key_file=cert_key_file, cert_key_password=cert_key_password,
connection_pool_maxsize=_to_int(connection_pool_maxsize), auth_basic=_to_bool(auth_basic),
profilers=profilers, **kwargs)
# noinspection PyMethodMayBeStatic
class _BaseQueryApi(object):
default_dialect = Dialect(header=True, delimiter=",", comment_prefix="#",
annotations=["datatype", "group", "default"], date_time_format="RFC3339")
def __init__(self, influxdb_client, query_options=None):
from influxdb_client.client.query_api import QueryOptions
self._query_options = QueryOptions() if query_options is None else query_options
self._influxdb_client = influxdb_client
self._query_api = QueryService(influxdb_client.api_client)
"""Base implementation for Queryable API."""
def _to_tables(self, response, query_options=None, response_metadata_mode:
FluxResponseMetadataMode = FluxResponseMetadataMode.full) -> TableList:
"""
Parse HTTP response to TableList.
:param response: HTTP response from an HTTP client. Expected type: `urllib3.response.HTTPResponse`.
"""
_parser = self._to_tables_parser(response, query_options, response_metadata_mode)
list(_parser.generator())
return _parser.table_list()
async def _to_tables_async(self, response, query_options=None, response_metadata_mode:
FluxResponseMetadataMode = FluxResponseMetadataMode.full) -> TableList:
"""
Parse HTTP response to TableList.
:param response: HTTP response from an HTTP client. Expected type: `aiohttp.client_reqrep.ClientResponse`.
"""
async with self._to_tables_parser(response, query_options, response_metadata_mode) as parser:
async for _ in parser.generator_async():
pass
return parser.table_list()
def _to_csv(self, response: HTTPResponse) -> CSVIterator:
"""Parse HTTP response to CSV."""
return CSVIterator(response)
def _to_flux_record_stream(self, response, query_options=None,
response_metadata_mode: FluxResponseMetadataMode = FluxResponseMetadataMode.full) -> \
Generator[FluxRecord, Any, None]:
"""
Parse HTTP response to FluxRecord stream.
:param response: HTTP response from an HTTP client. Expected type: `urllib3.response.HTTPResponse`.
"""
_parser = self._to_flux_record_stream_parser(query_options, response, response_metadata_mode)
return _parser.generator()
async def _to_flux_record_stream_async(self, response, query_options=None, response_metadata_mode:
FluxResponseMetadataMode = FluxResponseMetadataMode.full) -> \
AsyncGenerator['FluxRecord', None]:
"""
Parse HTTP response to FluxRecord stream.
:param response: HTTP response from an HTTP client. Expected type: `aiohttp.client_reqrep.ClientResponse`.
"""
_parser = self._to_flux_record_stream_parser(query_options, response, response_metadata_mode)
return (await _parser.__aenter__()).generator_async()
def _to_data_frame_stream(self, data_frame_index, response, query_options=None,
response_metadata_mode: FluxResponseMetadataMode = FluxResponseMetadataMode.full,
use_extension_dtypes=False):
"""
Parse HTTP response to DataFrame stream.
:param response: HTTP response from an HTTP client. Expected type: `urllib3.response.HTTPResponse`.
"""
_parser = self._to_data_frame_stream_parser(data_frame_index, query_options, response, response_metadata_mode,
use_extension_dtypes)
return _parser.generator()
async def _to_data_frame_stream_async(self, data_frame_index, response, query_options=None, response_metadata_mode:
FluxResponseMetadataMode = FluxResponseMetadataMode.full,
use_extension_dtypes=False):
"""
Parse HTTP response to DataFrame stream.
:param response: HTTP response from an HTTP client. Expected type: `aiohttp.client_reqrep.ClientResponse`.
"""
_parser = self._to_data_frame_stream_parser(data_frame_index, query_options, response, response_metadata_mode,
use_extension_dtypes)
return (await _parser.__aenter__()).generator_async()
def _to_tables_parser(self, response, query_options, response_metadata_mode):
return FluxCsvParser(response=response, serialization_mode=FluxSerializationMode.tables,
query_options=query_options, response_metadata_mode=response_metadata_mode)
def _to_flux_record_stream_parser(self, query_options, response, response_metadata_mode):
return FluxCsvParser(response=response, serialization_mode=FluxSerializationMode.stream,
query_options=query_options, response_metadata_mode=response_metadata_mode)
def _to_data_frame_stream_parser(self, data_frame_index, query_options, response, response_metadata_mode,
use_extension_dtypes):
return FluxCsvParser(response=response, serialization_mode=FluxSerializationMode.dataFrame,
data_frame_index=data_frame_index, query_options=query_options,
response_metadata_mode=response_metadata_mode,
use_extension_dtypes=use_extension_dtypes)
def _to_data_frames(self, _generator):
"""Parse stream of DataFrames into expected type."""
from ..extras import pd
if isinstance(_generator, list):
_dataFrames = _generator
else:
_dataFrames = list(_generator)
if len(_dataFrames) == 0:
return pd.DataFrame(columns=[], index=None)
elif len(_dataFrames) == 1:
return _dataFrames[0]
else:
return _dataFrames
def _org_param(self, org):
return get_org_query_param(org=org, client=self._influxdb_client)
def _get_query_options(self):
if self._query_options and self._query_options.profilers:
return self._query_options
elif self._influxdb_client.profilers:
from influxdb_client.client.query_api import QueryOptions
return QueryOptions(profilers=self._influxdb_client.profilers)
def _create_query(self, query, dialect=default_dialect, params: dict = None, **kwargs):
query_options = self._get_query_options()
profilers = query_options.profilers if query_options is not None else None
q = Query(query=query, dialect=dialect, extern=_BaseQueryApi._build_flux_ast(params, profilers))
if profilers:
print("\n===============")
print("Profiler: query")
print("===============")
print(query)
if kwargs.get('dataframe_query', False):
MissingPivotFunction.print_warning(query)
return q
@staticmethod
def _params_to_extern_ast(params: dict) -> List['OptionStatement']:
statements = []
for key, value in params.items():
expression = _BaseQueryApi._parm_to_extern_ast(value)
if expression is None:
continue
statements.append(OptionStatement("OptionStatement",
VariableAssignment("VariableAssignment", Identifier("Identifier", key),
expression)))
return statements
@staticmethod
def _parm_to_extern_ast(value) -> Union[Expression, None]:
if value is None:
return None
if isinstance(value, bool):
return BooleanLiteral("BooleanLiteral", value)
elif isinstance(value, int):
return IntegerLiteral("IntegerLiteral", str(value))
elif isinstance(value, float):
return FloatLiteral("FloatLiteral", value)
elif isinstance(value, datetime):
value = get_date_helper().to_utc(value)
nanoseconds = getattr(value, 'nanosecond', 0)
fraction = f'{(value.microsecond * 1000 + nanoseconds):09d}'
return DateTimeLiteral("DateTimeLiteral", value.strftime('%Y-%m-%dT%H:%M:%S.') + fraction + 'Z')
elif isinstance(value, timedelta):
_micro_delta = int(value / timedelta(microseconds=1))
if _micro_delta < 0:
return UnaryExpression("UnaryExpression", argument=DurationLiteral("DurationLiteral", [
Duration(magnitude=-_micro_delta, unit="us")]), operator="-")
else:
return DurationLiteral("DurationLiteral", [Duration(magnitude=_micro_delta, unit="us")])
elif isinstance(value, str):
return StringLiteral("StringLiteral", str(value))
elif isinstance(value, Iterable):
return ArrayExpression("ArrayExpression",
elements=list(map(lambda it: _BaseQueryApi._parm_to_extern_ast(it), value)))
else:
return value
@staticmethod
def _build_flux_ast(params: dict = None, profilers: List[str] = None):
imports = []
body = []
if profilers is not None and len(profilers) > 0:
imports.append(ImportDeclaration(
"ImportDeclaration",
path=StringLiteral("StringLiteral", "profiler")))
elements = []
for profiler in profilers:
elements.append(StringLiteral("StringLiteral", value=profiler))
member = MemberExpression(
"MemberExpression",
object=Identifier("Identifier", "profiler"),
_property=Identifier("Identifier", "enabledProfilers"))
prof = OptionStatement(
"OptionStatement",
assignment=MemberAssignment(
"MemberAssignment",
member=member,
init=ArrayExpression(
"ArrayExpression",
elements=elements)))
body.append(prof)
if params is not None:
body.extend(_BaseQueryApi._params_to_extern_ast(params))
return File(package=None, name=None, type=None, imports=imports, body=body)
class _BaseWriteApi(object):
def __init__(self, influxdb_client, point_settings=None):
self._influxdb_client = influxdb_client
self._point_settings = point_settings
self._write_service = WriteService(influxdb_client.api_client)
if influxdb_client.default_tags:
for key, value in influxdb_client.default_tags.items():
self._point_settings.add_default_tag(key, value)
def _append_default_tag(self, key, val, record):
from influxdb_client import Point
if isinstance(record, bytes) or isinstance(record, str):
pass
elif isinstance(record, Point):
record.tag(key, val)
elif isinstance(record, dict):
record.setdefault("tags", {})
record.get("tags")[key] = val
elif isinstance(record, Iterable):
for item in record:
self._append_default_tag(key, val, item)
def _append_default_tags(self, record):
if self._point_settings.defaultTags and record is not None:
for key, val in self._point_settings.defaultTags.items():
self._append_default_tag(key, val, record)
def _serialize(self, record, write_precision, payload, **kwargs):
from influxdb_client import Point
if isinstance(record, bytes):
payload[write_precision].append(record)
elif isinstance(record, str):
self._serialize(record.encode(_UTF_8_encoding), write_precision, payload, **kwargs)
elif isinstance(record, Point):
precision_from_point = kwargs.get('precision_from_point', True)
precision = record.write_precision if precision_from_point else write_precision
self._serialize(record.to_line_protocol(precision=precision), precision, payload, **kwargs)
elif isinstance(record, dict):
self._serialize(Point.from_dict(record, write_precision=write_precision, **kwargs),
write_precision, payload, **kwargs)
elif 'DataFrame' in type(record).__name__:
serializer = DataframeSerializer(record, self._point_settings, write_precision, **kwargs)
self._serialize(serializer.serialize(), write_precision, payload, **kwargs)
elif hasattr(record, "_asdict"):
# noinspection PyProtectedMember
self._serialize(record._asdict(), write_precision, payload, **kwargs)
elif _HAS_DATACLASS and dataclasses.is_dataclass(record):
self._serialize(dataclasses.asdict(record), write_precision, payload, **kwargs)
elif isinstance(record, Iterable):
for item in record:
self._serialize(item, write_precision, payload, **kwargs)
# noinspection PyMethodMayBeStatic
class _BaseDeleteApi(object):
def __init__(self, influxdb_client):
self._influxdb_client = influxdb_client
self._service = DeleteService(influxdb_client.api_client)
def _prepare_predicate_request(self, start, stop, predicate):
date_helper = get_date_helper()
if isinstance(start, datetime):
start = date_helper.to_utc(start)
if isinstance(stop, datetime):
stop = date_helper.to_utc(stop)
predicate_request = DeletePredicateRequest(start=start, stop=stop, predicate=predicate)
return predicate_request
class _Configuration(Configuration):
def __init__(self):
Configuration.__init__(self)
self.enable_gzip = False
self.username = None
self.password = None
def update_request_header_params(self, path: str, params: dict):
super().update_request_header_params(path, params)
if self.enable_gzip:
# GZIP Request
if path == '/api/v2/write':
params["Content-Encoding"] = "gzip"
params["Accept-Encoding"] = "identity"
pass
# GZIP Response
if path == '/api/v2/query':
# params["Content-Encoding"] = "gzip"
params["Accept-Encoding"] = "gzip"
pass
pass
pass
def update_request_body(self, path: str, body):
_body = super().update_request_body(path, body)
if self.enable_gzip:
# GZIP Request
if path == '/api/v2/write':
import gzip
if isinstance(_body, bytes):
return gzip.compress(data=_body)
else:
return gzip.compress(bytes(_body, _UTF_8_encoding))
return _body
def _to_bool(bool_value):
return str(bool_value).lower() in ("yes", "true")
def _to_int(int_value):
return int(int_value) if int_value is not None else None

View File

@@ -0,0 +1,66 @@
class _Page:
def __init__(self, values, has_next, next_after):
self.has_next = has_next
self.values = values
self.next_after = next_after
@staticmethod
def empty():
return _Page([], False, None)
@staticmethod
def initial(after):
return _Page([], True, after)
class _PageIterator:
def __init__(self, page: _Page, get_next_page):
self.page = page
self.get_next_page = get_next_page
def __iter__(self):
return self
def __next__(self):
if not self.page.values:
if self.page.has_next:
self.page = self.get_next_page(self.page)
if not self.page.values:
raise StopIteration
return self.page.values.pop(0)
class _Paginated:
def __init__(self, paginated_getter, pluck_page_resources_from_response):
self.paginated_getter = paginated_getter
self.pluck_page_resources_from_response = pluck_page_resources_from_response
def find_iter(self, **kwargs):
"""Iterate over resources with pagination.
:key str org: The organization name.
:key str org_id: The organization ID.
:key str after: The last resource ID from which to seek from (but not including).
:key int limit: the maximum number of items per page
:return: resources iterator
"""
def get_next_page(page: _Page):
return self._find_next_page(page, **kwargs)
return iter(_PageIterator(_Page.initial(kwargs.get('after')), get_next_page))
def _find_next_page(self, page: _Page, **kwargs):
if not page.has_next:
return _Page.empty()
kw_args = {**kwargs, 'after': page.next_after} if page.next_after is not None else kwargs
response = self.paginated_getter(**kw_args)
resources = self.pluck_page_resources_from_response(response)
has_next = response.links.next is not None
last_id = resources[-1].id if resources else None
return _Page(resources, has_next, last_id)

View File

@@ -0,0 +1,134 @@
"""Authorization is about managing the security of your InfluxDB instance."""
from influxdb_client import Authorization, AuthorizationsService, User, Organization
class AuthorizationsApi(object):
"""Implementation for '/api/v2/authorizations' endpoint."""
def __init__(self, influxdb_client):
"""Initialize defaults."""
self._influxdb_client = influxdb_client
self._authorizations_service = AuthorizationsService(influxdb_client.api_client)
def create_authorization(self, org_id=None, permissions: list = None,
authorization: Authorization = None) -> Authorization:
"""
Create an authorization.
:type permissions: list of Permission
:param org_id: organization id
:param permissions: list of permissions
:type authorization: authorization object
"""
if authorization is not None:
return self._authorizations_service.post_authorizations(authorization_post_request=authorization)
# if org_id is not None and permissions is not None:
authorization = Authorization(org_id=org_id, permissions=permissions)
return self._authorizations_service.post_authorizations(authorization_post_request=authorization)
def find_authorization_by_id(self, auth_id: str) -> Authorization:
"""
Find authorization by id.
:param auth_id: authorization id
:return: Authorization
"""
return self._authorizations_service.get_authorizations_id(auth_id=auth_id)
def find_authorizations(self, **kwargs):
"""
Get a list of all authorizations.
:key str user_id: filter authorizations belonging to a user id
:key str user: filter authorizations belonging to a user name
:key str org_id: filter authorizations belonging to a org id
:key str org: filter authorizations belonging to a org name
:return: Authorizations
"""
authorizations = self._authorizations_service.get_authorizations(**kwargs)
return authorizations.authorizations
def find_authorizations_by_user(self, user: User):
"""
Find authorization by User.
:return: Authorization list
"""
return self.find_authorizations(user_id=user.id)
def find_authorizations_by_user_id(self, user_id: str):
"""
Find authorization by user id.
:return: Authorization list
"""
return self.find_authorizations(user_id=user_id)
def find_authorizations_by_user_name(self, user_name: str):
"""
Find authorization by user name.
:return: Authorization list
"""
return self.find_authorizations(user=user_name)
def find_authorizations_by_org(self, org: Organization):
"""
Find authorization by user name.
:return: Authorization list
"""
if isinstance(org, Organization):
return self.find_authorizations(org_id=org.id)
def find_authorizations_by_org_name(self, org_name: str):
"""
Find authorization by org name.
:return: Authorization list
"""
return self.find_authorizations(org=org_name)
def find_authorizations_by_org_id(self, org_id: str):
"""
Find authorization by org id.
:return: Authorization list
"""
return self.find_authorizations(org_id=org_id)
def update_authorization(self, auth):
"""
Update authorization object.
:param auth:
:return:
"""
return self._authorizations_service.patch_authorizations_id(auth_id=auth.id, authorization_update_request=auth)
def clone_authorization(self, auth) -> Authorization:
"""Clone an authorization."""
if isinstance(auth, Authorization):
cloned = Authorization(org_id=auth.org_id, permissions=auth.permissions)
# cloned.description = auth.description
# cloned.status = auth.status
return self.create_authorization(authorization=cloned)
if isinstance(auth, str):
authorization = self.find_authorization_by_id(auth)
return self.clone_authorization(auth=authorization)
raise ValueError("Invalid argument")
def delete_authorization(self, auth):
"""Delete a authorization."""
if isinstance(auth, Authorization):
return self._authorizations_service.delete_authorizations_id(auth_id=auth.id)
if isinstance(auth, str):
return self._authorizations_service.delete_authorizations_id(auth_id=auth)
raise ValueError("Invalid argument")

View File

@@ -0,0 +1,132 @@
"""
A bucket is a named location where time series data is stored.
All buckets have a retention policy, a duration of time that each data point persists.
A bucket belongs to an organization.
"""
import warnings
from influxdb_client import BucketsService, Bucket, PostBucketRequest, PatchBucketRequest
from influxdb_client.client.util.helpers import get_org_query_param
from influxdb_client.client._pages import _Paginated
class BucketsApi(object):
"""Implementation for '/api/v2/buckets' endpoint."""
def __init__(self, influxdb_client):
"""Initialize defaults."""
self._influxdb_client = influxdb_client
self._buckets_service = BucketsService(influxdb_client.api_client)
def create_bucket(self, bucket=None, bucket_name=None, org_id=None, retention_rules=None,
description=None, org=None) -> Bucket:
"""Create a bucket.
:param Bucket|PostBucketRequest bucket: bucket to create
:param bucket_name: bucket name
:param description: bucket description
:param org_id: org_id
:param bucket_name: bucket name
:param retention_rules: retention rules array or single BucketRetentionRules
:param str, Organization org: specifies the organization for create the bucket;
Take the ``ID``, ``Name`` or ``Organization``.
If not specified the default value from ``InfluxDBClient.org`` is used.
:return: Bucket
If the method is called asynchronously,
returns the request thread.
"""
if retention_rules is None:
retention_rules = []
rules = []
if isinstance(retention_rules, list):
rules.extend(retention_rules)
else:
rules.append(retention_rules)
if org_id is not None:
warnings.warn("org_id is deprecated; use org", DeprecationWarning)
if bucket is None:
bucket = PostBucketRequest(name=bucket_name,
retention_rules=rules,
description=description,
org_id=get_org_query_param(org=(org_id if org is None else org),
client=self._influxdb_client,
required_id=True))
return self._buckets_service.post_buckets(post_bucket_request=bucket)
def update_bucket(self, bucket: Bucket) -> Bucket:
"""Update a bucket.
:param bucket: Bucket update to apply (required)
:return: Bucket
"""
request = PatchBucketRequest(name=bucket.name,
description=bucket.description,
retention_rules=bucket.retention_rules)
return self._buckets_service.patch_buckets_id(bucket_id=bucket.id, patch_bucket_request=request)
def delete_bucket(self, bucket):
"""Delete a bucket.
:param bucket: bucket id or Bucket
:return: Bucket
"""
if isinstance(bucket, Bucket):
bucket_id = bucket.id
else:
bucket_id = bucket
return self._buckets_service.delete_buckets_id(bucket_id=bucket_id)
def find_bucket_by_id(self, id):
"""Find bucket by ID.
:param id:
:return:
"""
return self._buckets_service.get_buckets_id(id)
def find_bucket_by_name(self, bucket_name):
"""Find bucket by name.
:param bucket_name: bucket name
:return: Bucket
"""
buckets = self._buckets_service.get_buckets(name=bucket_name)
if len(buckets.buckets) > 0:
return buckets.buckets[0]
else:
return None
def find_buckets(self, **kwargs):
"""List buckets.
:key int offset: Offset for pagination
:key int limit: Limit for pagination
:key str after: The last resource ID from which to seek from (but not including).
This is to be used instead of `offset`.
:key str org: The organization name.
:key str org_id: The organization ID.
:key str name: Only returns buckets with a specific name.
:return: Buckets
"""
return self._buckets_service.get_buckets(**kwargs)
def find_buckets_iter(self, **kwargs):
"""Iterate over all buckets with pagination.
:key str name: Only returns buckets with the specified name
:key str org: The organization name.
:key str org_id: The organization ID.
:key str after: The last resource ID from which to seek from (but not including).
:key int limit: the maximum number of buckets in one page
:return: Buckets iterator
"""
return _Paginated(self._buckets_service.get_buckets, lambda response: response.buckets).find_iter(**kwargs)

View File

@@ -0,0 +1,35 @@
"""Delete time series data from InfluxDB."""
from datetime import datetime
from typing import Union
from influxdb_client import Organization
from influxdb_client.client._base import _BaseDeleteApi
from influxdb_client.client.util.helpers import get_org_query_param
class DeleteApi(_BaseDeleteApi):
"""Implementation for '/api/v2/delete' endpoint."""
def __init__(self, influxdb_client):
"""Initialize defaults."""
super().__init__(influxdb_client)
def delete(self, start: Union[str, datetime], stop: Union[str, datetime], predicate: str, bucket: str,
org: Union[str, Organization, None] = None) -> None:
"""
Delete Time series data from InfluxDB.
:param str, datetime.datetime start: start time
:param str, datetime.datetime stop: stop time
:param str predicate: predicate
:param str bucket: bucket id or name from which data will be deleted
:param str, Organization org: specifies the organization to delete data from.
Take the ``ID``, ``Name`` or ``Organization``.
If not specified the default value from ``InfluxDBClient.org`` is used.
:return:
"""
predicate_request = self._prepare_predicate_request(start, stop, predicate)
org_param = get_org_query_param(org=org, client=self._influxdb_client, required_id=False)
return self._service.post_delete(delete_predicate_request=predicate_request, bucket=bucket, org=org_param)

View File

@@ -0,0 +1,37 @@
"""Delete time series data from InfluxDB."""
from datetime import datetime
from typing import Union
from influxdb_client import Organization
from influxdb_client.client._base import _BaseDeleteApi
from influxdb_client.client.util.helpers import get_org_query_param
class DeleteApiAsync(_BaseDeleteApi):
"""Async implementation for '/api/v2/delete' endpoint."""
def __init__(self, influxdb_client):
"""Initialize defaults."""
super().__init__(influxdb_client)
async def delete(self, start: Union[str, datetime], stop: Union[str, datetime], predicate: str, bucket: str,
org: Union[str, Organization, None] = None) -> bool:
"""
Delete Time series data from InfluxDB.
:param str, datetime.datetime start: start time
:param str, datetime.datetime stop: stop time
:param str predicate: predicate
:param str bucket: bucket id or name from which data will be deleted
:param str, Organization org: specifies the organization to delete data from.
Take the ``ID``, ``Name`` or ``Organization``.
If not specified the default value from ``InfluxDBClientAsync.org`` is used.
:return: ``True`` for successfully deleted data, otherwise raise an exception
"""
predicate_request = self._prepare_predicate_request(start, stop, predicate)
org_param = get_org_query_param(org=org, client=self._influxdb_client, required_id=False)
response = await self._service.post_delete_async(delete_predicate_request=predicate_request, bucket=bucket,
org=org_param, _return_http_data_only=False)
return response[1] == 204

View File

@@ -0,0 +1,47 @@
"""Exceptions utils for InfluxDB."""
import logging
from urllib3 import HTTPResponse
logger = logging.getLogger('influxdb_client.client.exceptions')
class InfluxDBError(Exception):
"""Raised when a server error occurs."""
def __init__(self, response: HTTPResponse = None, message: str = None):
"""Initialize the InfluxDBError handler."""
if response is not None:
self.response = response
self.message = self._get_message(response)
if isinstance(response, HTTPResponse): # response is HTTPResponse
self.headers = response.headers
self.retry_after = response.headers.get('Retry-After')
else: # response is RESTResponse
self.headers = response.getheaders()
self.retry_after = response.getheader('Retry-After')
else:
self.response = None
self.message = message or 'no response'
self.retry_after = None
super().__init__(self.message)
def _get_message(self, response):
# Body
if response.data:
import json
try:
return json.loads(response.data)["message"]
except Exception as e:
logging.debug(f"Cannot parse error response to JSON: {response.data}, {e}")
return response.data
# Header
for header_key in ["X-Platform-Error-Code", "X-Influx-Error", "X-InfluxDb-Error"]:
header_value = response.getheader(header_key)
if header_value is not None:
return header_value
# Http Status
return response.reason

View File

@@ -0,0 +1,404 @@
"""Parsing response from InfluxDB to FluxStructures or DataFrame."""
import base64
import codecs
import csv as csv_parser
import warnings
from enum import Enum
from typing import List
from influxdb_client.client.flux_table import FluxTable, FluxColumn, FluxRecord, TableList
from influxdb_client.client.util.date_utils import get_date_helper
from influxdb_client.rest import _UTF_8_encoding
ANNOTATION_DEFAULT = "#default"
ANNOTATION_GROUP = "#group"
ANNOTATION_DATATYPE = "#datatype"
ANNOTATIONS = [ANNOTATION_DEFAULT, ANNOTATION_GROUP, ANNOTATION_DATATYPE]
class FluxQueryException(Exception):
"""The exception from InfluxDB."""
def __init__(self, message, reference) -> None:
"""Initialize defaults."""
self.message = message
self.reference = reference
class FluxCsvParserException(Exception):
"""The exception for not parsable data."""
pass
class FluxSerializationMode(Enum):
"""The type how we want to serialize data."""
tables = 1
stream = 2
dataFrame = 3
class FluxResponseMetadataMode(Enum):
"""The configuration for expected amount of metadata response from InfluxDB."""
full = 1
# useful for Invokable scripts
only_names = 2
class _FluxCsvParserMetadata(object):
def __init__(self):
self.table_index = 0
self.table_id = -1
self.start_new_table = False
self.table = None
self.groups = []
self.parsing_state_error = False
class FluxCsvParser(object):
"""Parse to processing response from InfluxDB to FluxStructures or DataFrame."""
def __init__(self, response, serialization_mode: FluxSerializationMode,
data_frame_index: List[str] = None, query_options=None,
response_metadata_mode: FluxResponseMetadataMode = FluxResponseMetadataMode.full,
use_extension_dtypes=False) -> None:
"""
Initialize defaults.
:param response: HTTP response from a HTTP client.
Acceptable types: `urllib3.response.HTTPResponse`, `aiohttp.client_reqrep.ClientResponse`.
"""
self._response = response
self.tables = TableList()
self._serialization_mode = serialization_mode
self._response_metadata_mode = response_metadata_mode
self._use_extension_dtypes = use_extension_dtypes
self._data_frame_index = data_frame_index
self._data_frame_values = []
self._profilers = query_options.profilers if query_options is not None else None
self._profiler_callback = query_options.profiler_callback if query_options is not None else None
self._async_mode = True if 'ClientResponse' in type(response).__name__ else False
def _close(self):
self._response.close()
def __enter__(self):
"""Initialize CSV reader."""
# response can be exhausted by logger, so we have to use data that has already been read
if hasattr(self._response, 'closed') and self._response.closed:
from io import StringIO
self._reader = csv_parser.reader(StringIO(self._response.data.decode(_UTF_8_encoding)))
else:
self._reader = csv_parser.reader(codecs.iterdecode(self._response, _UTF_8_encoding))
return self
def __exit__(self, exc_type, exc_val, exc_tb):
"""Close HTTP response."""
self._close()
async def __aenter__(self) -> 'FluxCsvParser':
"""Initialize CSV reader."""
from aiocsv import AsyncReader
self._reader = AsyncReader(_StreamReaderToWithAsyncRead(self._response.content))
return self
async def __aexit__(self, exc_type, exc_val, exc_tb) -> None:
"""Shutdown the client."""
self.__exit__(exc_type, exc_val, exc_tb)
def generator(self):
"""Return Python generator."""
with self as parser:
for val in parser._parse_flux_response():
yield val
def generator_async(self):
"""Return Python async-generator."""
return self._parse_flux_response_async()
def _parse_flux_response(self):
metadata = _FluxCsvParserMetadata()
for csv in self._reader:
for val in self._parse_flux_response_row(metadata, csv):
yield val
# Return latest DataFrame
if (self._serialization_mode is FluxSerializationMode.dataFrame) & hasattr(self, '_data_frame'):
df = self._prepare_data_frame()
if not self._is_profiler_table(metadata.table):
yield df
async def _parse_flux_response_async(self):
metadata = _FluxCsvParserMetadata()
try:
async for csv in self._reader:
for val in self._parse_flux_response_row(metadata, csv):
yield val
# Return latest DataFrame
if (self._serialization_mode is FluxSerializationMode.dataFrame) & hasattr(self, '_data_frame'):
df = self._prepare_data_frame()
if not self._is_profiler_table(metadata.table):
yield df
finally:
self._close()
def _parse_flux_response_row(self, metadata, csv):
if len(csv) < 1:
# Skip empty line in results (new line is used as a delimiter between tables or table and error)
pass
elif "error" == csv[1] and "reference" == csv[2]:
metadata.parsing_state_error = True
else:
# Throw InfluxException with error response
if metadata.parsing_state_error:
error = csv[1]
reference_value = csv[2]
raise FluxQueryException(error, reference_value)
token = csv[0]
# start new table
if (token in ANNOTATIONS and not metadata.start_new_table) or \
(self._response_metadata_mode is FluxResponseMetadataMode.only_names and not metadata.table):
# Return already parsed DataFrame
if (self._serialization_mode is FluxSerializationMode.dataFrame) & hasattr(self, '_data_frame'):
df = self._prepare_data_frame()
if not self._is_profiler_table(metadata.table):
yield df
metadata.start_new_table = True
metadata.table = FluxTable()
self._insert_table(metadata.table, metadata.table_index)
metadata.table_index = metadata.table_index + 1
metadata.table_id = -1
elif metadata.table is None:
raise FluxCsvParserException("Unable to parse CSV response. FluxTable definition was not found.")
# # datatype,string,long,dateTime:RFC3339,dateTime:RFC3339,dateTime:RFC3339,double,string,string,string
if ANNOTATION_DATATYPE == token:
self.add_data_types(metadata.table, csv)
elif ANNOTATION_GROUP == token:
metadata.groups = csv
elif ANNOTATION_DEFAULT == token:
self.add_default_empty_values(metadata.table, csv)
else:
# parse column names
if metadata.start_new_table:
# Invokable scripts doesn't supports dialect => all columns are string
if not metadata.table.columns and \
self._response_metadata_mode is FluxResponseMetadataMode.only_names:
self.add_data_types(metadata.table, list(map(lambda column: 'string', csv)))
metadata.groups = list(map(lambda column: 'false', csv))
self.add_groups(metadata.table, metadata.groups)
self.add_column_names_and_tags(metadata.table, csv)
metadata.start_new_table = False
# Create DataFrame with default values
if self._serialization_mode is FluxSerializationMode.dataFrame:
from ..extras import pd
labels = list(map(lambda it: it.label, metadata.table.columns))
self._data_frame = pd.DataFrame(data=[], columns=labels, index=None)
pass
else:
# to int conversions todo
current_id = int(csv[2])
if metadata.table_id == -1:
metadata.table_id = current_id
if metadata.table_id != current_id:
# create new table with previous column headers settings
flux_columns = metadata.table.columns
metadata.table = FluxTable()
metadata.table.columns.extend(flux_columns)
self._insert_table(metadata.table, metadata.table_index)
metadata.table_index = metadata.table_index + 1
metadata.table_id = current_id
flux_record = self.parse_record(metadata.table_index - 1, metadata.table, csv)
if self._is_profiler_record(flux_record):
self._print_profiler_info(flux_record)
else:
if self._serialization_mode is FluxSerializationMode.tables:
self.tables[metadata.table_index - 1].records.append(flux_record)
if self._serialization_mode is FluxSerializationMode.stream:
yield flux_record
if self._serialization_mode is FluxSerializationMode.dataFrame:
self._data_frame_values.append(flux_record.values)
pass
def _prepare_data_frame(self):
from ..extras import pd
# We have to create temporary DataFrame because we want to preserve default column values
_temp_df = pd.DataFrame(self._data_frame_values)
self._data_frame_values = []
# Custom DataFrame index
if self._data_frame_index:
self._data_frame = self._data_frame.set_index(self._data_frame_index)
_temp_df = _temp_df.set_index(self._data_frame_index)
# Append data
df = pd.concat([self._data_frame.astype(_temp_df.dtypes), _temp_df])
if self._use_extension_dtypes:
return df.convert_dtypes()
return df
def parse_record(self, table_index, table, csv):
"""Parse one record."""
record = FluxRecord(table_index)
for fluxColumn in table.columns:
column_name = fluxColumn.label
str_val = csv[fluxColumn.index + 1]
record.values[column_name] = self._to_value(str_val, fluxColumn)
record.row.append(record.values[column_name])
return record
def _to_value(self, str_val, column):
if str_val == '' or str_val is None:
default_value = column.default_value
if default_value == '' or default_value is None:
if self._serialization_mode is FluxSerializationMode.dataFrame:
if self._use_extension_dtypes:
from ..extras import pd
return pd.NA
return None
return None
return self._to_value(default_value, column)
if "string" == column.data_type:
return str_val
if "boolean" == column.data_type:
return "true" == str_val
if "unsignedLong" == column.data_type or "long" == column.data_type:
return int(str_val)
if "double" == column.data_type:
return float(str_val)
if "base64Binary" == column.data_type:
return base64.b64decode(str_val)
if "dateTime:RFC3339" == column.data_type or "dateTime:RFC3339Nano" == column.data_type:
return get_date_helper().parse_date(str_val)
if "duration" == column.data_type:
# todo better type ?
return int(str_val)
@staticmethod
def add_data_types(table, data_types):
"""Add data types to columns."""
for index in range(1, len(data_types)):
column_def = FluxColumn(index=index - 1, data_type=data_types[index])
table.columns.append(column_def)
@staticmethod
def add_groups(table, csv):
"""Add group keys to columns."""
i = 1
for column in table.columns:
column.group = csv[i] == "true"
i += 1
@staticmethod
def add_default_empty_values(table, default_values):
"""Add default values to columns."""
i = 1
for column in table.columns:
column.default_value = default_values[i]
i += 1
@staticmethod
def add_column_names_and_tags(table, csv):
"""Add labels to columns."""
if len(csv) != len(set(csv)):
message = f"""The response contains columns with duplicated names: '{csv}'.
You should use the 'record.row' to access your data instead of 'record.values' dictionary.
"""
warnings.warn(message, UserWarning)
print(message)
i = 1
for column in table.columns:
column.label = csv[i]
i += 1
def _insert_table(self, table, table_index):
if self._serialization_mode is FluxSerializationMode.tables:
self.tables.insert(table_index, table)
def _is_profiler_record(self, flux_record: FluxRecord) -> bool:
if not self._profilers:
return False
for profiler in self._profilers:
if "_measurement" in flux_record.values and flux_record["_measurement"] == "profiler/" + profiler:
return True
return False
def _is_profiler_table(self, table: FluxTable) -> bool:
if not self._profilers:
return False
return any(filter(lambda column: (column.default_value == "_profiler" and column.label == "result"),
table.columns))
def table_list(self) -> TableList:
"""Get the list of flux tables."""
if not self._profilers:
return self.tables
else:
return TableList(filter(lambda table: not self._is_profiler_table(table), self.tables))
def _print_profiler_info(self, flux_record: FluxRecord):
if flux_record.get_measurement().startswith("profiler/"):
if self._profiler_callback:
self._profiler_callback(flux_record)
else:
msg = "Profiler: " + flux_record.get_measurement()
print("\n" + len(msg) * "=")
print(msg)
print(len(msg) * "=")
for name in flux_record.values:
val = flux_record[name]
if isinstance(val, str) and len(val) > 50:
print(f"{name:<20}: \n\n{val}")
elif val is not None:
print(f"{name:<20}: {val:<20}")
class _StreamReaderToWithAsyncRead:
def __init__(self, response):
self.response = response
self.decoder = codecs.getincrementaldecoder(_UTF_8_encoding)()
async def read(self, size: int) -> str:
raw_bytes = (await self.response.read(size))
if not raw_bytes:
return self.decoder.decode(b'', final=True)
return self.decoder.decode(raw_bytes, final=False)

View File

@@ -0,0 +1,290 @@
"""
Flux employs a basic data model built from basic data types.
The data model consists of tables, records, columns.
"""
import codecs
import csv
from http.client import HTTPResponse
from json import JSONEncoder
from typing import List, Iterator
from influxdb_client.rest import _UTF_8_encoding
class FluxStructure:
"""The data model consists of tables, records, columns."""
pass
class FluxStructureEncoder(JSONEncoder):
"""The FluxStructure encoder to encode query results to JSON."""
def default(self, obj):
"""Return serializable objects for JSONEncoder."""
import datetime
if isinstance(obj, FluxStructure):
return obj.__dict__
elif isinstance(obj, (datetime.datetime, datetime.date)):
return obj.isoformat()
return super().default(obj)
class FluxTable(FluxStructure):
"""
A table is set of records with a common set of columns and a group key.
The table can be serialized into JSON by::
import json
from influxdb_client.client.flux_table import FluxStructureEncoder
output = json.dumps(tables, cls=FluxStructureEncoder, indent=2)
print(output)
"""
def __init__(self) -> None:
"""Initialize defaults."""
self.columns = []
self.records = []
def get_group_key(self):
"""
Group key is a list of columns.
A tables group key denotes which subset of the entire dataset is assigned to the table.
"""
return list(filter(lambda column: (column.group is True), self.columns))
def __str__(self):
"""Return formatted output."""
cls_name = type(self).__name__
return cls_name + "() columns: " + str(len(self.columns)) + ", records: " + str(len(self.records))
def __repr__(self):
"""Format for inspection."""
return f"<{type(self).__name__}: {len(self.columns)} columns, {len(self.records)} records>"
def __iter__(self):
"""Iterate over records."""
return iter(self.records)
class FluxColumn(FluxStructure):
"""A column has a label and a data type."""
def __init__(self, index=None, label=None, data_type=None, group=None, default_value=None) -> None:
"""Initialize defaults."""
self.default_value = default_value
self.group = group
self.data_type = data_type
self.label = label
self.index = index
def __repr__(self):
"""Format for inspection."""
fields = [repr(self.index)] + [
f'{name}={getattr(self, name)!r}' for name in (
'label', 'data_type', 'group', 'default_value'
) if getattr(self, name) is not None
]
return f"{type(self).__name__}({', '.join(fields)})"
class FluxRecord(FluxStructure):
"""A record is a tuple of named values and is represented using an object type."""
def __init__(self, table, values=None) -> None:
"""Initialize defaults."""
if values is None:
values = {}
self.table = table
self.values = values
self.row = []
def get_start(self):
"""Get '_start' value."""
return self["_start"]
def get_stop(self):
"""Get '_stop' value."""
return self["_stop"]
def get_time(self):
"""Get timestamp."""
return self["_time"]
def get_value(self):
"""Get field value."""
return self["_value"]
def get_field(self):
"""Get field name."""
return self["_field"]
def get_measurement(self):
"""Get measurement name."""
return self["_measurement"]
def __getitem__(self, key):
"""Get value by key."""
return self.values.__getitem__(key)
def __setitem__(self, key, value):
"""Set value with key and value."""
return self.values.__setitem__(key, value)
def __str__(self):
"""Return formatted output."""
cls_name = type(self).__name__
return cls_name + "() table: " + str(self.table) + ", " + str(self.values)
def __repr__(self):
"""Format for inspection."""
return f"<{type(self).__name__}: field={self.values.get('_field')}, value={self.values.get('_value')}>"
class TableList(List[FluxTable]):
""":class:`~influxdb_client.client.flux_table.FluxTable` list with additionally functional to better handle of query result.""" # noqa: E501
def to_values(self, columns: List['str'] = None) -> List[List[object]]:
"""
Serialize query results to a flattened list of values.
:param columns: if not ``None`` then only specified columns are presented in results
:return: :class:`~list` of values
Output example:
.. code-block:: python
[
['New York', datetime.datetime(2022, 6, 7, 11, 3, 22, 917593, tzinfo=tzutc()), 24.3],
['Prague', datetime.datetime(2022, 6, 7, 11, 3, 22, 917593, tzinfo=tzutc()), 25.3],
...
]
Configure required columns:
.. code-block:: python
from influxdb_client import InfluxDBClient
with InfluxDBClient(url="http://localhost:8086", token="my-token", org="my-org") as client:
# Query: using Table structure
tables = client.query_api().query('from(bucket:"my-bucket") |> range(start: -10m)')
# Serialize to values
output = tables.to_values(columns=['location', '_time', '_value'])
print(output)
"""
def filter_values(record):
if columns is not None:
return [record.values.get(k) for k in columns]
return record.values.values()
return self._to_values(filter_values)
def to_json(self, columns: List['str'] = None, **kwargs) -> str:
"""
Serialize query results to a JSON formatted :class:`~str`.
:param columns: if not ``None`` then only specified columns are presented in results
:return: :class:`~str`
The query results is flattened to array:
.. code-block:: javascript
[
{
"_measurement": "mem",
"_start": "2021-06-23T06:50:11.897825+00:00",
"_stop": "2021-06-25T06:50:11.897825+00:00",
"_time": "2020-02-27T16:20:00.897825+00:00",
"region": "north",
"_field": "usage",
"_value": 15
},
{
"_measurement": "mem",
"_start": "2021-06-23T06:50:11.897825+00:00",
"_stop": "2021-06-25T06:50:11.897825+00:00",
"_time": "2020-02-27T16:20:01.897825+00:00",
"region": "west",
"_field": "usage",
"_value": 10
},
...
]
The JSON format could be configured via ``**kwargs`` arguments:
.. code-block:: python
from influxdb_client import InfluxDBClient
with InfluxDBClient(url="http://localhost:8086", token="my-token", org="my-org") as client:
# Query: using Table structure
tables = client.query_api().query('from(bucket:"my-bucket") |> range(start: -10m)')
# Serialize to JSON
output = tables.to_json(indent=5)
print(output)
For all available options see - `json.dump <https://docs.python.org/3/library/json.html#json.dump>`_.
"""
if 'indent' not in kwargs:
kwargs['indent'] = 2
def filter_values(record):
if columns is not None:
return {k: v for (k, v) in record.values.items() if k in columns}
return record.values
import json
return json.dumps(self._to_values(filter_values), cls=FluxStructureEncoder, **kwargs)
def _to_values(self, mapping):
return [mapping(record) for table in self for record in table.records]
class CSVIterator(Iterator[List[str]]):
""":class:`Iterator[List[str]]` with additionally functional to better handle of query result."""
def __init__(self, response: HTTPResponse) -> None:
"""Initialize ``csv.reader``."""
self.delegate = csv.reader(codecs.iterdecode(response, _UTF_8_encoding))
def __iter__(self):
"""Return an iterator object."""
return self
def __next__(self):
"""Retrieve the next item from the iterator."""
row = self.delegate.__next__()
while not row:
row = self.delegate.__next__()
return row
def to_values(self) -> List[List[str]]:
"""
Serialize query results to a flattened list of values.
:return: :class:`~list` of values
Output example:
.. code-block:: python
[
['New York', '2022-06-14T08:00:51.749072045Z', '24.3'],
['Prague', '2022-06-14T08:00:51.749072045Z', '25.3'],
...
]
"""
return list(self.__iter__())

View File

@@ -0,0 +1,438 @@
"""InfluxDBClient is client for API defined in https://github.com/influxdata/influxdb/blob/master/http/swagger.yml."""
from __future__ import absolute_import
import logging
import warnings
from influxdb_client import HealthCheck, HealthService, Ready, ReadyService, PingService, \
InvokableScriptsApi
from influxdb_client.client._base import _BaseClient
from influxdb_client.client.authorizations_api import AuthorizationsApi
from influxdb_client.client.bucket_api import BucketsApi
from influxdb_client.client.delete_api import DeleteApi
from influxdb_client.client.labels_api import LabelsApi
from influxdb_client.client.organizations_api import OrganizationsApi
from influxdb_client.client.query_api import QueryApi, QueryOptions
from influxdb_client.client.tasks_api import TasksApi
from influxdb_client.client.users_api import UsersApi
from influxdb_client.client.write_api import WriteApi, WriteOptions, PointSettings
logger = logging.getLogger('influxdb_client.client.influxdb_client')
class InfluxDBClient(_BaseClient):
"""InfluxDBClient is client for InfluxDB v2."""
def __init__(self, url, token: str = None, debug=None, timeout=10_000, enable_gzip=False, org: str = None,
default_tags: dict = None, **kwargs) -> None:
"""
Initialize defaults.
:param url: InfluxDB server API url (ex. http://localhost:8086).
:param token: ``token`` to authenticate to the InfluxDB API
:param debug: enable verbose logging of http requests
:param timeout: HTTP client timeout setting for a request specified in milliseconds.
If one number provided, it will be total request timeout.
It can also be a pair (tuple) of (connection, read) timeouts.
:param enable_gzip: Enable Gzip compression for http requests. Currently, only the "Write" and "Query" endpoints
supports the Gzip compression.
:param org: organization name (used as a default in Query, Write and Delete API)
:key bool verify_ssl: Set this to false to skip verifying SSL certificate when calling API from https server.
:key str ssl_ca_cert: Set this to customize the certificate file to verify the peer.
:key str cert_file: Path to the certificate that will be used for mTLS authentication.
:key str cert_key_file: Path to the file contains private key for mTLS certificate.
:key str cert_key_password: String or function which returns password for decrypting the mTLS private key.
:key ssl.SSLContext ssl_context: Specify a custom Python SSL Context for the TLS/ mTLS handshake.
Be aware that only delivered certificate/ key files or an SSL Context are
possible.
:key str proxy: Set this to configure the http proxy to be used (ex. http://localhost:3128)
:key str proxy_headers: A dictionary containing headers that will be sent to the proxy. Could be used for proxy
authentication.
:key int connection_pool_maxsize: Number of connections to save that can be reused by urllib3.
Defaults to "multiprocessing.cpu_count() * 5".
:key urllib3.util.retry.Retry retries: Set the default retry strategy that is used for all HTTP requests
except batching writes. As a default there is no one retry strategy.
:key bool auth_basic: Set this to true to enable basic authentication when talking to a InfluxDB 1.8.x that
does not use auth-enabled but is protected by a reverse proxy with basic authentication.
(defaults to false, don't set to true when talking to InfluxDB 2)
:key str username: ``username`` to authenticate via username and password credentials to the InfluxDB 2.x
:key str password: ``password`` to authenticate via username and password credentials to the InfluxDB 2.x
:key list[str] profilers: list of enabled Flux profilers
"""
super().__init__(url=url, token=token, debug=debug, timeout=timeout, enable_gzip=enable_gzip, org=org,
default_tags=default_tags, http_client_logger="urllib3", **kwargs)
from .._sync.api_client import ApiClient
self.api_client = ApiClient(configuration=self.conf, header_name=self.auth_header_name,
header_value=self.auth_header_value, retries=self.retries)
def __enter__(self):
"""
Enter the runtime context related to this object.
It will bind this methods return value to the target(s)
specified in the `as` clause of the statement.
return: self instance
"""
return self
def __exit__(self, exc_type, exc_value, traceback):
"""Exit the runtime context related to this object and close the client."""
self.close()
@classmethod
def from_config_file(cls, config_file: str = "config.ini", debug=None, enable_gzip=False, **kwargs):
"""
Configure client via configuration file. The configuration has to be under 'influx' section.
:param config_file: Path to configuration file
:param debug: Enable verbose logging of http requests
:param enable_gzip: Enable Gzip compression for http requests. Currently, only the "Write" and "Query" endpoints
supports the Gzip compression.
:key config_name: Name of the configuration section of the configuration file
:key str proxy_headers: A dictionary containing headers that will be sent to the proxy. Could be used for proxy
authentication.
:key urllib3.util.retry.Retry retries: Set the default retry strategy that is used for all HTTP requests
except batching writes. As a default there is no one retry strategy.
:key ssl.SSLContext ssl_context: Specify a custom Python SSL Context for the TLS/ mTLS handshake.
Be aware that only delivered certificate/ key files or an SSL Context are
possible.
The supported formats:
- https://docs.python.org/3/library/configparser.html
- https://toml.io/en/
- https://www.json.org/json-en.html
Configuration options:
- url
- org
- token
- timeout,
- verify_ssl
- ssl_ca_cert
- cert_file
- cert_key_file
- cert_key_password
- connection_pool_maxsize
- auth_basic
- profilers
- proxy
config.ini example::
[influx2]
url=http://localhost:8086
org=my-org
token=my-token
timeout=6000
connection_pool_maxsize=25
auth_basic=false
profilers=query,operator
proxy=http:proxy.domain.org:8080
[tags]
id = 132-987-655
customer = California Miner
data_center = ${env.data_center}
config.toml example::
[influx2]
url = "http://localhost:8086"
token = "my-token"
org = "my-org"
timeout = 6000
connection_pool_maxsize = 25
auth_basic = false
profilers="query, operator"
proxy = "http://proxy.domain.org:8080"
[tags]
id = "132-987-655"
customer = "California Miner"
data_center = "${env.data_center}"
config.json example::
{
"url": "http://localhost:8086",
"token": "my-token",
"org": "my-org",
"active": true,
"timeout": 6000,
"connection_pool_maxsize": 55,
"auth_basic": false,
"profilers": "query, operator",
"tags": {
"id": "132-987-655",
"customer": "California Miner",
"data_center": "${env.data_center}"
}
}
"""
return InfluxDBClient._from_config_file(config_file=config_file, debug=debug, enable_gzip=enable_gzip, **kwargs)
@classmethod
def from_env_properties(cls, debug=None, enable_gzip=False, **kwargs):
"""
Configure client via environment properties.
:param debug: Enable verbose logging of http requests
:param enable_gzip: Enable Gzip compression for http requests. Currently, only the "Write" and "Query" endpoints
supports the Gzip compression.
:key str proxy: Set this to configure the http proxy to be used (ex. http://localhost:3128)
:key str proxy_headers: A dictionary containing headers that will be sent to the proxy. Could be used for proxy
authentication.
:key urllib3.util.retry.Retry retries: Set the default retry strategy that is used for all HTTP requests
except batching writes. As a default there is no one retry strategy.
:key ssl.SSLContext ssl_context: Specify a custom Python SSL Context for the TLS/ mTLS handshake.
Be aware that only delivered certificate/ key files or an SSL Context are
possible.
Supported environment properties:
- INFLUXDB_V2_URL
- INFLUXDB_V2_ORG
- INFLUXDB_V2_TOKEN
- INFLUXDB_V2_TIMEOUT
- INFLUXDB_V2_VERIFY_SSL
- INFLUXDB_V2_SSL_CA_CERT
- INFLUXDB_V2_CERT_FILE
- INFLUXDB_V2_CERT_KEY_FILE
- INFLUXDB_V2_CERT_KEY_PASSWORD
- INFLUXDB_V2_CONNECTION_POOL_MAXSIZE
- INFLUXDB_V2_AUTH_BASIC
- INFLUXDB_V2_PROFILERS
- INFLUXDB_V2_TAG
"""
return InfluxDBClient._from_env_properties(debug=debug, enable_gzip=enable_gzip, **kwargs)
def write_api(self, write_options=WriteOptions(), point_settings=PointSettings(), **kwargs) -> WriteApi:
"""
Create Write API instance.
Example:
.. code-block:: python
from influxdb_client import InfluxDBClient
from influxdb_client.client.write_api import SYNCHRONOUS
# Initialize SYNCHRONOUS instance of WriteApi
with InfluxDBClient(url="http://localhost:8086", token="my-token", org="my-org") as client:
write_api = client.write_api(write_options=SYNCHRONOUS)
If you would like to use a **background batching**, you have to configure client like this:
.. code-block:: python
from influxdb_client import InfluxDBClient
# Initialize background batching instance of WriteApi
with InfluxDBClient(url="http://localhost:8086", token="my-token", org="my-org") as client:
with client.write_api() as write_api:
pass
There is also possibility to use callbacks to notify about state of background batches:
.. code-block:: python
from influxdb_client import InfluxDBClient
from influxdb_client.client.exceptions import InfluxDBError
class BatchingCallback(object):
def success(self, conf: (str, str, str), data: str):
print(f"Written batch: {conf}, data: {data}")
def error(self, conf: (str, str, str), data: str, exception: InfluxDBError):
print(f"Cannot write batch: {conf}, data: {data} due: {exception}")
def retry(self, conf: (str, str, str), data: str, exception: InfluxDBError):
print(f"Retryable error occurs for batch: {conf}, data: {data} retry: {exception}")
with InfluxDBClient(url="http://localhost:8086", token="my-token", org="my-org") as client:
callback = BatchingCallback()
with client.write_api(success_callback=callback.success,
error_callback=callback.error,
retry_callback=callback.retry) as write_api:
pass
:param write_options: Write API configuration
:param point_settings: settings to store default tags
:key success_callback: The callable ``callback`` to run after successfully writen a batch.
The callable must accept two arguments:
- `Tuple`: ``(bucket, organization, precision)``
- `str`: written data
**[batching mode]**
:key error_callback: The callable ``callback`` to run after unsuccessfully writen a batch.
The callable must accept three arguments:
- `Tuple`: ``(bucket, organization, precision)``
- `str`: written data
- `Exception`: an occurred error
**[batching mode]**
:key retry_callback: The callable ``callback`` to run after retryable error occurred.
The callable must accept three arguments:
- `Tuple`: ``(bucket, organization, precision)``
- `str`: written data
- `Exception`: an retryable error
**[batching mode]**
:return: write api instance
"""
return WriteApi(influxdb_client=self, write_options=write_options, point_settings=point_settings, **kwargs)
def query_api(self, query_options: QueryOptions = QueryOptions()) -> QueryApi:
"""
Create an Query API instance.
:param query_options: optional query api configuration
:return: Query api instance
"""
return QueryApi(self, query_options)
def invokable_scripts_api(self) -> InvokableScriptsApi:
"""
Create an InvokableScripts API instance.
:return: InvokableScripts API instance
"""
return InvokableScriptsApi(self)
def close(self):
"""Shutdown the client."""
self.__del__()
def __del__(self):
"""Shutdown the client."""
if self.api_client:
self.api_client.__del__()
self.api_client = None
def buckets_api(self) -> BucketsApi:
"""
Create the Bucket API instance.
:return: buckets api
"""
return BucketsApi(self)
def authorizations_api(self) -> AuthorizationsApi:
"""
Create the Authorizations API instance.
:return: authorizations api
"""
return AuthorizationsApi(self)
def users_api(self) -> UsersApi:
"""
Create the Users API instance.
:return: users api
"""
return UsersApi(self)
def organizations_api(self) -> OrganizationsApi:
"""
Create the Organizations API instance.
:return: organizations api
"""
return OrganizationsApi(self)
def tasks_api(self) -> TasksApi:
"""
Create the Tasks API instance.
:return: tasks api
"""
return TasksApi(self)
def labels_api(self) -> LabelsApi:
"""
Create the Labels API instance.
:return: labels api
"""
return LabelsApi(self)
def health(self) -> HealthCheck:
"""
Get the health of an instance.
:return: HealthCheck
"""
warnings.warn("This method is deprecated. Call 'ping()' instead.", DeprecationWarning)
health_service = HealthService(self.api_client)
try:
health = health_service.get_health()
return health
except Exception as e:
return HealthCheck(name="influxdb", message=str(e), status="fail")
def ping(self) -> bool:
"""
Return the status of InfluxDB instance.
:return: The status of InfluxDB.
"""
ping_service = PingService(self.api_client)
try:
ping_service.get_ping()
return True
except Exception as ex:
logger.debug("Unexpected error during /ping: %s", ex)
return False
def version(self) -> str:
"""
Return the version of the connected InfluxDB Server.
:return: The version of InfluxDB.
"""
ping_service = PingService(self.api_client)
response = ping_service.get_ping_with_http_info(_return_http_data_only=False)
return ping_service.response_header(response)
def build(self) -> str:
"""
Return the build type of the connected InfluxDB Server.
:return: The type of InfluxDB build.
"""
ping_service = PingService(self.api_client)
return ping_service.build_type()
def ready(self) -> Ready:
"""
Get The readiness of the InfluxDB 2.0.
:return: Ready
"""
ready_service = ReadyService(self.api_client)
return ready_service.get_ready()
def delete_api(self) -> DeleteApi:
"""
Get the delete metrics API instance.
:return: delete api
"""
return DeleteApi(self)

View File

@@ -0,0 +1,301 @@
"""InfluxDBClientAsync is client for API defined in https://github.com/influxdata/openapi/blob/master/contracts/oss.yml.""" # noqa: E501
import logging
import sys
from influxdb_client import PingService
from influxdb_client.client._base import _BaseClient
from influxdb_client.client.delete_api_async import DeleteApiAsync
from influxdb_client.client.query_api import QueryOptions
from influxdb_client.client.query_api_async import QueryApiAsync
from influxdb_client.client.write_api import PointSettings
from influxdb_client.client.write_api_async import WriteApiAsync
logger = logging.getLogger('influxdb_client.client.influxdb_client_async')
class InfluxDBClientAsync(_BaseClient):
"""InfluxDBClientAsync is client for InfluxDB v2."""
def __init__(self, url, token: str = None, org: str = None, debug=None, timeout=10_000, enable_gzip=False,
**kwargs) -> None:
"""
Initialize defaults.
:param url: InfluxDB server API url (ex. http://localhost:8086).
:param token: ``token`` to authenticate to the InfluxDB 2.x
:param org: organization name (used as a default in Query, Write and Delete API)
:param debug: enable verbose logging of http requests
:param timeout: The maximal number of milliseconds for the whole HTTP request including
connection establishment, request sending and response reading.
It can also be a :class:`~aiohttp.ClientTimeout` which is directly pass to ``aiohttp``.
:param enable_gzip: Enable Gzip compression for http requests. Currently, only the "Write" and "Query" endpoints
supports the Gzip compression.
:key bool verify_ssl: Set this to false to skip verifying SSL certificate when calling API from https server.
:key str ssl_ca_cert: Set this to customize the certificate file to verify the peer.
:key str cert_file: Path to the certificate that will be used for mTLS authentication.
:key str cert_key_file: Path to the file contains private key for mTLS certificate.
:key str cert_key_password: String or function which returns password for decrypting the mTLS private key.
:key ssl.SSLContext ssl_context: Specify a custom Python SSL Context for the TLS/ mTLS handshake.
Be aware that only delivered certificate/ key files or an SSL Context are
possible.
:key str proxy: Set this to configure the http proxy to be used (ex. http://localhost:3128)
:key str proxy_headers: A dictionary containing headers that will be sent to the proxy. Could be used for proxy
authentication.
:key int connection_pool_maxsize: The total number of simultaneous connections.
Defaults to "multiprocessing.cpu_count() * 5".
:key bool auth_basic: Set this to true to enable basic authentication when talking to a InfluxDB 1.8.x that
does not use auth-enabled but is protected by a reverse proxy with basic authentication.
(defaults to false, don't set to true when talking to InfluxDB 2)
:key str username: ``username`` to authenticate via username and password credentials to the InfluxDB 2.x
:key str password: ``password`` to authenticate via username and password credentials to the InfluxDB 2.x
:key bool allow_redirects: If set to ``False``, do not follow HTTP redirects. ``True`` by default.
:key int max_redirects: Maximum number of HTTP redirects to follow. ``10`` by default.
:key dict client_session_kwargs: Additional configuration arguments for :class:`~aiohttp.ClientSession`
:key type client_session_type: Type of aiohttp client to use. Useful for third party wrappers like
``aiohttp-retry``. :class:`~aiohttp.ClientSession` by default.
:key list[str] profilers: list of enabled Flux profilers
"""
super().__init__(url=url, token=token, org=org, debug=debug, timeout=timeout, enable_gzip=enable_gzip,
http_client_logger="aiohttp.client", **kwargs)
# compatibility with Python 3.6
if sys.version_info[:2] >= (3, 7):
from asyncio import get_running_loop
else:
from asyncio import _get_running_loop as get_running_loop
# check present asynchronous context
try:
loop = get_running_loop()
# compatibility with Python 3.6
if loop is None:
raise RuntimeError('no running event loop')
except RuntimeError:
from influxdb_client.client.exceptions import InfluxDBError
message = "The async client should be initialised inside async coroutine " \
"otherwise there can be unexpected behaviour."
raise InfluxDBError(response=None, message=message)
from .._async.api_client import ApiClientAsync
self.api_client = ApiClientAsync(configuration=self.conf, header_name=self.auth_header_name,
header_value=self.auth_header_value, **kwargs)
async def __aenter__(self) -> 'InfluxDBClientAsync':
"""
Enter the runtime context related to this object.
return: self instance
"""
return self
async def __aexit__(self, exc_type, exc, tb) -> None:
"""Shutdown the client."""
await self.close()
async def close(self):
"""Shutdown the client."""
if self.api_client:
await self.api_client.close()
self.api_client = None
@classmethod
def from_config_file(cls, config_file: str = "config.ini", debug=None, enable_gzip=False, **kwargs):
"""
Configure client via configuration file. The configuration has to be under 'influx' section.
:param config_file: Path to configuration file
:param debug: Enable verbose logging of http requests
:param enable_gzip: Enable Gzip compression for http requests. Currently, only the "Write" and "Query" endpoints
supports the Gzip compression.
:key config_name: Name of the configuration section of the configuration file
:key str proxy_headers: A dictionary containing headers that will be sent to the proxy. Could be used for proxy
authentication.
:key urllib3.util.retry.Retry retries: Set the default retry strategy that is used for all HTTP requests
except batching writes. As a default there is no one retry strategy.
:key ssl.SSLContext ssl_context: Specify a custom Python SSL Context for the TLS/ mTLS handshake.
Be aware that only delivered certificate/ key files or an SSL Context are
possible.
The supported formats:
- https://docs.python.org/3/library/configparser.html
- https://toml.io/en/
- https://www.json.org/json-en.html
Configuration options:
- url
- org
- token
- timeout,
- verify_ssl
- ssl_ca_cert
- cert_file
- cert_key_file
- cert_key_password
- connection_pool_maxsize
- auth_basic
- profilers
- proxy
config.ini example::
[influx2]
url=http://localhost:8086
org=my-org
token=my-token
timeout=6000
connection_pool_maxsize=25
auth_basic=false
profilers=query,operator
proxy=http:proxy.domain.org:8080
[tags]
id = 132-987-655
customer = California Miner
data_center = ${env.data_center}
config.toml example::
[influx2]
url = "http://localhost:8086"
token = "my-token"
org = "my-org"
timeout = 6000
connection_pool_maxsize = 25
auth_basic = false
profilers="query, operator"
proxy = "http://proxy.domain.org:8080"
[tags]
id = "132-987-655"
customer = "California Miner"
data_center = "${env.data_center}"
config.json example::
{
"url": "http://localhost:8086",
"token": "my-token",
"org": "my-org",
"active": true,
"timeout": 6000,
"connection_pool_maxsize": 55,
"auth_basic": false,
"profilers": "query, operator",
"tags": {
"id": "132-987-655",
"customer": "California Miner",
"data_center": "${env.data_center}"
}
}
"""
return InfluxDBClientAsync._from_config_file(config_file=config_file, debug=debug,
enable_gzip=enable_gzip, **kwargs)
@classmethod
def from_env_properties(cls, debug=None, enable_gzip=False, **kwargs):
"""
Configure client via environment properties.
:param debug: Enable verbose logging of http requests
:param enable_gzip: Enable Gzip compression for http requests. Currently, only the "Write" and "Query" endpoints
supports the Gzip compression.
:key str proxy: Set this to configure the http proxy to be used (ex. http://localhost:3128)
:key str proxy_headers: A dictionary containing headers that will be sent to the proxy. Could be used for proxy
authentication.
:key urllib3.util.retry.Retry retries: Set the default retry strategy that is used for all HTTP requests
except batching writes. As a default there is no one retry strategy.
:key ssl.SSLContext ssl_context: Specify a custom Python SSL Context for the TLS/ mTLS handshake.
Be aware that only delivered certificate/ key files or an SSL Context are
possible.
Supported environment properties:
- INFLUXDB_V2_URL
- INFLUXDB_V2_ORG
- INFLUXDB_V2_TOKEN
- INFLUXDB_V2_TIMEOUT
- INFLUXDB_V2_VERIFY_SSL
- INFLUXDB_V2_SSL_CA_CERT
- INFLUXDB_V2_CERT_FILE
- INFLUXDB_V2_CERT_KEY_FILE
- INFLUXDB_V2_CERT_KEY_PASSWORD
- INFLUXDB_V2_CONNECTION_POOL_MAXSIZE
- INFLUXDB_V2_AUTH_BASIC
- INFLUXDB_V2_PROFILERS
- INFLUXDB_V2_TAG
"""
return InfluxDBClientAsync._from_env_properties(debug=debug, enable_gzip=enable_gzip, **kwargs)
async def ping(self) -> bool:
"""
Return the status of InfluxDB instance.
:return: The status of InfluxDB.
"""
ping_service = PingService(self.api_client)
try:
await ping_service.get_ping_async()
return True
except Exception as ex:
logger.debug("Unexpected error during /ping: %s", ex)
raise ex
async def version(self) -> str:
"""
Return the version of the connected InfluxDB Server.
:return: The version of InfluxDB.
"""
ping_service = PingService(self.api_client)
response = await ping_service.get_ping_async(_return_http_data_only=False)
return ping_service.response_header(response)
async def build(self) -> str:
"""
Return the build type of the connected InfluxDB Server.
:return: The type of InfluxDB build.
"""
ping_service = PingService(self.api_client)
return await ping_service.build_type_async()
def query_api(self, query_options: QueryOptions = QueryOptions()) -> QueryApiAsync:
"""
Create an asynchronous Query API instance.
:param query_options: optional query api configuration
:return: Query api instance
"""
return QueryApiAsync(self, query_options)
def write_api(self, point_settings=PointSettings()) -> WriteApiAsync:
"""
Create an asynchronous Write API instance.
Example:
.. code-block:: python
from influxdb_client_async import InfluxDBClientAsync
# Initialize async/await instance of Write API
async with InfluxDBClientAsync(url="http://localhost:8086", token="my-token", org="my-org") as client:
write_api = client.write_api()
:param point_settings: settings to store default tags
:return: write api instance
"""
return WriteApiAsync(influxdb_client=self, point_settings=point_settings)
def delete_api(self) -> DeleteApiAsync:
"""
Get the asynchronous delete metrics API instance.
:return: delete api
"""
return DeleteApiAsync(self)

View File

@@ -0,0 +1,293 @@
"""
Use API invokable scripts to create custom InfluxDB API endpoints that query, process, and shape data.
API invokable scripts let you assign scripts to API endpoints and then execute them as standard REST operations
in InfluxDB Cloud.
"""
from typing import List, Iterator, Generator, Any
from influxdb_client import Script, InvokableScriptsService, ScriptCreateRequest, ScriptUpdateRequest, \
ScriptInvocationParams
from influxdb_client.client._base import _BaseQueryApi
from influxdb_client.client.flux_csv_parser import FluxResponseMetadataMode
from influxdb_client.client.flux_table import FluxRecord, TableList, CSVIterator
class InvokableScriptsApi(_BaseQueryApi):
"""Use API invokable scripts to create custom InfluxDB API endpoints that query, process, and shape data."""
def __init__(self, influxdb_client):
"""Initialize defaults."""
self._influxdb_client = influxdb_client
self._invokable_scripts_service = InvokableScriptsService(influxdb_client.api_client)
def create_script(self, create_request: ScriptCreateRequest) -> Script:
"""Create a script.
:param ScriptCreateRequest create_request: The script to create. (required)
:return: The created script.
"""
return self._invokable_scripts_service.post_scripts(script_create_request=create_request)
def update_script(self, script_id: str, update_request: ScriptUpdateRequest) -> Script:
"""Update a script.
:param str script_id: The ID of the script to update. (required)
:param ScriptUpdateRequest update_request: Script updates to apply (required)
:return: The updated.
"""
return self._invokable_scripts_service.patch_scripts_id(script_id=script_id,
script_update_request=update_request)
def delete_script(self, script_id: str) -> None:
"""Delete a script.
:param str script_id: The ID of the script to delete. (required)
:return: None
"""
self._invokable_scripts_service.delete_scripts_id(script_id=script_id)
def find_scripts(self, **kwargs):
"""List scripts.
:key int limit: The number of scripts to return.
:key int offset: The offset for pagination.
:return: List of scripts.
:rtype: list[Script]
"""
return self._invokable_scripts_service.get_scripts(**kwargs).scripts
def invoke_script(self, script_id: str, params: dict = None) -> TableList:
"""
Invoke synchronously a script and return result as a TableList.
The bind parameters referenced in the script are substitutes with `params` key-values sent in the request body.
:param str script_id: The ID of the script to invoke. (required)
:param params: bind parameters
:return: :class:`~influxdb_client.client.flux_table.FluxTable` list wrapped into
:class:`~influxdb_client.client.flux_table.TableList`
:rtype: TableList
Serialization the query results to flattened list of values via :func:`~influxdb_client.client.flux_table.TableList.to_values`:
.. code-block:: python
from influxdb_client import InfluxDBClient
with InfluxDBClient(url="https://us-west-2-1.aws.cloud2.influxdata.com", token="my-token", org="my-org") as client:
# Query: using Table structure
tables = client.invokable_scripts_api().invoke_script(script_id="script-id")
# Serialize to values
output = tables.to_values(columns=['location', '_time', '_value'])
print(output)
.. code-block:: python
[
['New York', datetime.datetime(2022, 6, 7, 11, 3, 22, 917593, tzinfo=tzutc()), 24.3],
['Prague', datetime.datetime(2022, 6, 7, 11, 3, 22, 917593, tzinfo=tzutc()), 25.3],
...
]
Serialization the query results to JSON via :func:`~influxdb_client.client.flux_table.TableList.to_json`:
.. code-block:: python
from influxdb_client import InfluxDBClient
with InfluxDBClient(url="https://us-west-2-1.aws.cloud2.influxdata.com", token="my-token", org="my-org") as client:
# Query: using Table structure
tables = client.invokable_scripts_api().invoke_script(script_id="script-id")
# Serialize to JSON
output = tables.to_json(indent=5)
print(output)
.. code-block:: javascript
[
{
"_measurement": "mem",
"_start": "2021-06-23T06:50:11.897825+00:00",
"_stop": "2021-06-25T06:50:11.897825+00:00",
"_time": "2020-02-27T16:20:00.897825+00:00",
"region": "north",
"_field": "usage",
"_value": 15
},
{
"_measurement": "mem",
"_start": "2021-06-23T06:50:11.897825+00:00",
"_stop": "2021-06-25T06:50:11.897825+00:00",
"_time": "2020-02-27T16:20:01.897825+00:00",
"region": "west",
"_field": "usage",
"_value": 10
},
...
]
""" # noqa: E501
response = self._invokable_scripts_service \
.post_scripts_id_invoke(script_id=script_id,
script_invocation_params=ScriptInvocationParams(params=params),
async_req=False,
_preload_content=False,
_return_http_data_only=False)
return self._to_tables(response, query_options=None, response_metadata_mode=FluxResponseMetadataMode.only_names)
def invoke_script_stream(self, script_id: str, params: dict = None) -> Generator['FluxRecord', Any, None]:
"""
Invoke synchronously a script and return result as a Generator['FluxRecord'].
The bind parameters referenced in the script are substitutes with `params` key-values sent in the request body.
:param str script_id: The ID of the script to invoke. (required)
:param params: bind parameters
:return: Stream of FluxRecord.
:rtype: Generator['FluxRecord']
"""
response = self._invokable_scripts_service \
.post_scripts_id_invoke(script_id=script_id,
script_invocation_params=ScriptInvocationParams(params=params),
async_req=False,
_preload_content=False,
_return_http_data_only=False)
return self._to_flux_record_stream(response, query_options=None,
response_metadata_mode=FluxResponseMetadataMode.only_names)
def invoke_script_data_frame(self, script_id: str, params: dict = None, data_frame_index: List[str] = None):
"""
Invoke synchronously a script and return Pandas DataFrame.
The bind parameters referenced in the script are substitutes with `params` key-values sent in the request body.
.. note:: If the ``script`` returns tables with differing schemas than the client generates a :class:`~DataFrame` for each of them.
:param str script_id: The ID of the script to invoke. (required)
:param List[str] data_frame_index: The list of columns that are used as DataFrame index.
:param params: bind parameters
:return: :class:`~DataFrame` or :class:`~List[DataFrame]`
.. warning:: For the optimal processing of the query results use the ``pivot() function`` which align results as a table.
.. code-block:: text
from(bucket:"my-bucket")
|> range(start: -5m, stop: now())
|> filter(fn: (r) => r._measurement == "mem")
|> pivot(rowKey:["_time"], columnKey: ["_field"], valueColumn: "_value")
For more info see:
- https://docs.influxdata.com/resources/videos/pivots-in-flux/
- https://docs.influxdata.com/flux/latest/stdlib/universe/pivot/
- https://docs.influxdata.com/flux/latest/stdlib/influxdata/influxdb/schema/fieldsascols/
""" # noqa: E501
_generator = self.invoke_script_data_frame_stream(script_id=script_id,
params=params,
data_frame_index=data_frame_index)
return self._to_data_frames(_generator)
def invoke_script_data_frame_stream(self, script_id: str, params: dict = None, data_frame_index: List[str] = None):
"""
Invoke synchronously a script and return stream of Pandas DataFrame as a Generator['pd.DataFrame'].
The bind parameters referenced in the script are substitutes with `params` key-values sent in the request body.
.. note:: If the ``script`` returns tables with differing schemas than the client generates a :class:`~DataFrame` for each of them.
:param str script_id: The ID of the script to invoke. (required)
:param List[str] data_frame_index: The list of columns that are used as DataFrame index.
:param params: bind parameters
:return: :class:`~Generator[DataFrame]`
.. warning:: For the optimal processing of the query results use the ``pivot() function`` which align results as a table.
.. code-block:: text
from(bucket:"my-bucket")
|> range(start: -5m, stop: now())
|> filter(fn: (r) => r._measurement == "mem")
|> pivot(rowKey:["_time"], columnKey: ["_field"], valueColumn: "_value")
For more info see:
- https://docs.influxdata.com/resources/videos/pivots-in-flux/
- https://docs.influxdata.com/flux/latest/stdlib/universe/pivot/
- https://docs.influxdata.com/flux/latest/stdlib/influxdata/influxdb/schema/fieldsascols/
""" # noqa: E501
response = self._invokable_scripts_service \
.post_scripts_id_invoke(script_id=script_id,
script_invocation_params=ScriptInvocationParams(params=params),
async_req=False,
_preload_content=False,
_return_http_data_only=False)
return self._to_data_frame_stream(data_frame_index, response, query_options=None,
response_metadata_mode=FluxResponseMetadataMode.only_names)
def invoke_script_csv(self, script_id: str, params: dict = None) -> CSVIterator:
"""
Invoke synchronously a script and return result as a CSV iterator. Each iteration returns a row of the CSV file.
The bind parameters referenced in the script are substitutes with `params` key-values sent in the request body.
:param str script_id: The ID of the script to invoke. (required)
:param params: bind parameters
:return: :class:`~Iterator[List[str]]` wrapped into :class:`~influxdb_client.client.flux_table.CSVIterator`
:rtype: CSVIterator
Serialization the query results to flattened list of values via :func:`~influxdb_client.client.flux_table.CSVIterator.to_values`:
.. code-block:: python
from influxdb_client import InfluxDBClient
with InfluxDBClient(url="http://localhost:8086", token="my-token", org="my-org") as client:
# Query: using CSV iterator
csv_iterator = client.invokable_scripts_api().invoke_script_csv(script_id="script-id")
# Serialize to values
output = csv_iterator.to_values()
print(output)
.. code-block:: python
[
['', 'result', 'table', '_start', '_stop', '_time', '_value', '_field', '_measurement', 'location']
['', '', '0', '2022-06-16', '2022-06-16', '2022-06-16', '24.3', 'temperature', 'my_measurement', 'New York']
['', '', '1', '2022-06-16', '2022-06-16', '2022-06-16', '25.3', 'temperature', 'my_measurement', 'Prague']
...
]
""" # noqa: E501
response = self._invokable_scripts_service \
.post_scripts_id_invoke(script_id=script_id,
script_invocation_params=ScriptInvocationParams(params=params),
async_req=False,
_preload_content=False)
return self._to_csv(response)
def invoke_script_raw(self, script_id: str, params: dict = None) -> Iterator[List[str]]:
"""
Invoke synchronously a script and return result as raw unprocessed result as a str.
The bind parameters referenced in the script are substitutes with `params` key-values sent in the request body.
:param str script_id: The ID of the script to invoke. (required)
:param params: bind parameters
:return: Result as a str.
"""
response = self._invokable_scripts_service \
.post_scripts_id_invoke(script_id=script_id,
script_invocation_params=ScriptInvocationParams(params=params),
async_req=False,
_preload_content=True)
return response

View File

@@ -0,0 +1,96 @@
"""Labels are a way to add visual metadata to dashboards, tasks, and other items in the InfluxDB UI."""
from typing import List, Dict, Union
from influxdb_client import LabelsService, LabelCreateRequest, Label, LabelUpdate
class LabelsApi(object):
"""Implementation for '/api/v2/labels' endpoint."""
def __init__(self, influxdb_client):
"""Initialize defaults."""
self._influxdb_client = influxdb_client
self._service = LabelsService(influxdb_client.api_client)
def create_label(self, name: str, org_id: str, properties: Dict[str, str] = None) -> Label:
"""
Create a new label.
:param name: label name
:param org_id: organization id
:param properties: optional label properties
:return: created label
"""
label_request = LabelCreateRequest(org_id=org_id, name=name, properties=properties)
return self._service.post_labels(label_create_request=label_request).label
def update_label(self, label: Label):
"""
Update an existing label name and properties.
:param label: label
:return: the updated label
"""
label_update = LabelUpdate()
label_update.properties = label.properties
label_update.name = label.name
return self._service.patch_labels_id(label_id=label.id, label_update=label_update).label
def delete_label(self, label: Union[str, Label]):
"""
Delete the label.
:param label: label id or Label
"""
label_id = None
if isinstance(label, str):
label_id = label
if isinstance(label, Label):
label_id = label.id
return self._service.delete_labels_id(label_id=label_id)
def clone_label(self, cloned_name: str, label: Label) -> Label:
"""
Create the new instance of the label as a copy existing label.
:param cloned_name: new label name
:param label: existing label
:return: clonned Label
"""
cloned_properties = None
if label.properties is not None:
cloned_properties = label.properties.copy()
return self.create_label(name=cloned_name, properties=cloned_properties, org_id=label.org_id)
def find_labels(self, **kwargs) -> List['Label']:
"""
Get all available labels.
:key str org_id: The organization ID.
:return: labels
"""
return self._service.get_labels(**kwargs).labels
def find_label_by_id(self, label_id: str):
"""
Retrieve the label by id.
:param label_id:
:return: Label
"""
return self._service.get_labels_id(label_id=label_id).label
def find_label_by_org(self, org_id) -> List['Label']:
"""
Get the list of all labels for given organization.
:param org_id: organization id
:return: list of labels
"""
return self._service.get_labels(org_id=org_id).labels

View File

@@ -0,0 +1,64 @@
"""Use the influxdb_client with python native logging."""
import logging
from influxdb_client import InfluxDBClient
class InfluxLoggingHandler(logging.Handler):
"""
InfluxLoggingHandler instances dispatch logging events to influx.
There is no need to set a Formatter.
The raw input will be passed on to the influx write api.
"""
DEFAULT_LOG_RECORD_KEYS = list(logging.makeLogRecord({}).__dict__.keys()) + ['message']
def __init__(self, *, url, token, org, bucket, client_args=None, write_api_args=None):
"""
Initialize defaults.
The arguments `client_args` and `write_api_args` can be dicts of kwargs.
They are passed on to the InfluxDBClient and write_api calls respectively.
"""
super().__init__()
self.bucket = bucket
client_args = {} if client_args is None else client_args
self.client = InfluxDBClient(url=url, token=token, org=org, **client_args)
write_api_args = {} if write_api_args is None else write_api_args
self.write_api = self.client.write_api(**write_api_args)
def __del__(self):
"""Make sure all resources are closed."""
self.close()
def close(self) -> None:
"""Close the write_api, client and logger."""
self.write_api.close()
self.client.close()
super().close()
def emit(self, record: logging.LogRecord) -> None:
"""Emit a record via the influxDB WriteApi."""
try:
message = self.format(record)
extra = self._get_extra_values(record)
return self.write_api.write(record=message, **extra)
except (KeyboardInterrupt, SystemExit):
raise
except (Exception,):
self.handleError(record)
def _get_extra_values(self, record: logging.LogRecord) -> dict:
"""
Extract all items from the record that were injected via extra.
Example: `logging.debug(msg, extra={key: value, ...})`.
"""
extra = {'bucket': self.bucket}
extra.update({key: value for key, value in record.__dict__.items()
if key not in self.DEFAULT_LOG_RECORD_KEYS})
return extra

View File

@@ -0,0 +1,60 @@
"""
An organization is a workspace for a group of users.
All dashboards, tasks, buckets, members, etc., belong to an organization.
"""
from influxdb_client import OrganizationsService, UsersService, Organization, PatchOrganizationRequest
class OrganizationsApi(object):
"""Implementation for '/api/v2/orgs' endpoint."""
def __init__(self, influxdb_client):
"""Initialize defaults."""
self._influxdb_client = influxdb_client
self._organizations_service = OrganizationsService(influxdb_client.api_client)
self._users_service = UsersService(influxdb_client.api_client)
def me(self):
"""Return the current authenticated user."""
user = self._users_service.get_me()
return user
def find_organization(self, org_id):
"""Retrieve an organization."""
return self._organizations_service.get_orgs_id(org_id=org_id)
def find_organizations(self, **kwargs):
"""
List all organizations.
:key int offset: Offset for pagination
:key int limit: Limit for pagination
:key bool descending:
:key str org: Filter organizations to a specific organization name.
:key str org_id: Filter organizations to a specific organization ID.
:key str user_id: Filter organizations to a specific user ID.
"""
return self._organizations_service.get_orgs(**kwargs).orgs
def create_organization(self, name: str = None, organization: Organization = None) -> Organization:
"""Create an organization."""
if organization is None:
organization = Organization(name=name)
return self._organizations_service.post_orgs(post_organization_request=organization)
def update_organization(self, organization: Organization) -> Organization:
"""Update an organization.
:param organization: Organization update to apply (required)
:return: Organization
"""
request = PatchOrganizationRequest(name=organization.name,
description=organization.description)
return self._organizations_service.patch_orgs_id(org_id=organization.id, patch_organization_request=request)
def delete_organization(self, org_id: str):
"""Delete an organization."""
return self._organizations_service.delete_orgs_id(org_id=org_id)

View File

@@ -0,0 +1,310 @@
"""
Querying InfluxDB by FluxLang.
Flux is InfluxDatas functional data scripting language designed for querying, analyzing, and acting on data.
"""
from typing import List, Generator, Any, Callable
from influxdb_client import Dialect
from influxdb_client.client._base import _BaseQueryApi
from influxdb_client.client.flux_table import FluxRecord, TableList, CSVIterator
class QueryOptions(object):
"""Query options."""
def __init__(self, profilers: List[str] = None, profiler_callback: Callable = None) -> None:
"""
Initialize query options.
:param profilers: list of enabled flux profilers
:param profiler_callback: callback function return profilers (FluxRecord)
"""
self.profilers = profilers
self.profiler_callback = profiler_callback
class QueryApi(_BaseQueryApi):
"""Implementation for '/api/v2/query' endpoint."""
def __init__(self, influxdb_client, query_options=QueryOptions()):
"""
Initialize query client.
:param influxdb_client: influxdb client
"""
super().__init__(influxdb_client=influxdb_client, query_options=query_options)
def query_csv(self, query: str, org=None, dialect: Dialect = _BaseQueryApi.default_dialect, params: dict = None) \
-> CSVIterator:
"""
Execute the Flux query and return results as a CSV iterator. Each iteration returns a row of the CSV file.
:param query: a Flux query
:param str, Organization org: specifies the organization for executing the query;
Take the ``ID``, ``Name`` or ``Organization``.
If not specified the default value from ``InfluxDBClient.org`` is used.
:param dialect: csv dialect format
:param params: bind parameters
:return: :class:`~Iterator[List[str]]` wrapped into :class:`~influxdb_client.client.flux_table.CSVIterator`
:rtype: CSVIterator
Serialization the query results to flattened list of values via :func:`~influxdb_client.client.flux_table.CSVIterator.to_values`:
.. code-block:: python
from influxdb_client import InfluxDBClient
with InfluxDBClient(url="http://localhost:8086", token="my-token", org="my-org") as client:
# Query: using CSV iterator
csv_iterator = client.query_api().query_csv('from(bucket:"my-bucket") |> range(start: -10m)')
# Serialize to values
output = csv_iterator.to_values()
print(output)
.. code-block:: python
[
['#datatype', 'string', 'long', 'dateTime:RFC3339', 'dateTime:RFC3339', 'dateTime:RFC3339', 'double', 'string', 'string', 'string']
['#group', 'false', 'false', 'true', 'true', 'false', 'false', 'true', 'true', 'true']
['#default', '_result', '', '', '', '', '', '', '', '']
['', 'result', 'table', '_start', '_stop', '_time', '_value', '_field', '_measurement', 'location']
['', '', '0', '2022-06-16', '2022-06-16', '2022-06-16', '24.3', 'temperature', 'my_measurement', 'New York']
['', '', '1', '2022-06-16', '2022-06-16', '2022-06-16', '25.3', 'temperature', 'my_measurement', 'Prague']
...
]
If you would like to turn off `Annotated CSV header's <https://docs.influxdata.com/influxdb/latest/reference/syntax/annotated-csv/>`_ you can use following code:
.. code-block:: python
from influxdb_client import InfluxDBClient, Dialect
with InfluxDBClient(url="http://localhost:8086", token="my-token", org="my-org") as client:
# Query: using CSV iterator
csv_iterator = client.query_api().query_csv('from(bucket:"my-bucket") |> range(start: -10m)',
dialect=Dialect(header=False, annotations=[]))
for csv_line in csv_iterator:
print(csv_line)
.. code-block:: python
[
['', '_result', '0', '2022-06-16', '2022-06-16', '2022-06-16', '24.3', 'temperature', 'my_measurement', 'New York']
['', '_result', '1', '2022-06-16', '2022-06-16', '2022-06-16', '25.3', 'temperature', 'my_measurement', 'Prague']
...
]
""" # noqa: E501
org = self._org_param(org)
response = self._query_api.post_query(org=org, query=self._create_query(query, dialect, params),
async_req=False, _preload_content=False)
return self._to_csv(response)
def query_raw(self, query: str, org=None, dialect=_BaseQueryApi.default_dialect, params: dict = None):
"""
Execute synchronous Flux query and return result as raw unprocessed result as a str.
:param query: a Flux query
:param str, Organization org: specifies the organization for executing the query;
Take the ``ID``, ``Name`` or ``Organization``.
If not specified the default value from ``InfluxDBClient.org`` is used.
:param dialect: csv dialect format
:param params: bind parameters
:return: str
"""
org = self._org_param(org)
result = self._query_api.post_query(org=org, query=self._create_query(query, dialect, params), async_req=False,
_preload_content=False)
return result
def query(self, query: str, org=None, params: dict = None) -> TableList:
"""Execute synchronous Flux query and return result as a :class:`~influxdb_client.client.flux_table.FluxTable` list.
:param query: the Flux query
:param str, Organization org: specifies the organization for executing the query;
Take the ``ID``, ``Name`` or ``Organization``.
If not specified the default value from ``InfluxDBClient.org`` is used.
:param params: bind parameters
:return: :class:`~influxdb_client.client.flux_table.FluxTable` list wrapped into
:class:`~influxdb_client.client.flux_table.TableList`
:rtype: TableList
Serialization the query results to flattened list of values via :func:`~influxdb_client.client.flux_table.TableList.to_values`:
.. code-block:: python
from influxdb_client import InfluxDBClient
with InfluxDBClient(url="http://localhost:8086", token="my-token", org="my-org") as client:
# Query: using Table structure
tables = client.query_api().query('from(bucket:"my-bucket") |> range(start: -10m)')
# Serialize to values
output = tables.to_values(columns=['location', '_time', '_value'])
print(output)
.. code-block:: python
[
['New York', datetime.datetime(2022, 6, 7, 11, 3, 22, 917593, tzinfo=tzutc()), 24.3],
['Prague', datetime.datetime(2022, 6, 7, 11, 3, 22, 917593, tzinfo=tzutc()), 25.3],
...
]
Serialization the query results to JSON via :func:`~influxdb_client.client.flux_table.TableList.to_json`:
.. code-block:: python
from influxdb_client import InfluxDBClient
with InfluxDBClient(url="http://localhost:8086", token="my-token", org="my-org") as client:
# Query: using Table structure
tables = client.query_api().query('from(bucket:"my-bucket") |> range(start: -10m)')
# Serialize to JSON
output = tables.to_json(indent=5)
print(output)
.. code-block:: javascript
[
{
"_measurement": "mem",
"_start": "2021-06-23T06:50:11.897825+00:00",
"_stop": "2021-06-25T06:50:11.897825+00:00",
"_time": "2020-02-27T16:20:00.897825+00:00",
"region": "north",
"_field": "usage",
"_value": 15
},
{
"_measurement": "mem",
"_start": "2021-06-23T06:50:11.897825+00:00",
"_stop": "2021-06-25T06:50:11.897825+00:00",
"_time": "2020-02-27T16:20:01.897825+00:00",
"region": "west",
"_field": "usage",
"_value": 10
},
...
]
""" # noqa: E501
org = self._org_param(org)
response = self._query_api.post_query(org=org, query=self._create_query(query, self.default_dialect, params),
async_req=False, _preload_content=False, _return_http_data_only=False)
return self._to_tables(response, query_options=self._get_query_options())
def query_stream(self, query: str, org=None, params: dict = None) -> Generator['FluxRecord', Any, None]:
"""
Execute synchronous Flux query and return stream of FluxRecord as a Generator['FluxRecord'].
:param query: the Flux query
:param str, Organization org: specifies the organization for executing the query;
Take the ``ID``, ``Name`` or ``Organization``.
If not specified the default value from ``InfluxDBClient.org`` is used.
:param params: bind parameters
:return: Generator['FluxRecord']
"""
org = self._org_param(org)
response = self._query_api.post_query(org=org, query=self._create_query(query, self.default_dialect, params),
async_req=False, _preload_content=False, _return_http_data_only=False)
return self._to_flux_record_stream(response, query_options=self._get_query_options())
def query_data_frame(self, query: str, org=None, data_frame_index: List[str] = None, params: dict = None,
use_extension_dtypes: bool = False):
"""
Execute synchronous Flux query and return Pandas DataFrame.
.. note:: If the ``query`` returns tables with differing schemas than the client generates a :class:`~DataFrame` for each of them.
:param query: the Flux query
:param str, Organization org: specifies the organization for executing the query;
Take the ``ID``, ``Name`` or ``Organization``.
If not specified the default value from ``InfluxDBClient.org`` is used.
:param data_frame_index: the list of columns that are used as DataFrame index
:param params: bind parameters
:param use_extension_dtypes: set to ``True`` to use panda's extension data types.
Useful for queries with ``pivot`` function.
When data has missing values, column data type may change (to ``object`` or ``float64``).
Nullable extension types (``Int64``, ``Float64``, ``boolean``) support ``panda.NA`` value.
For more info, see https://pandas.pydata.org/docs/user_guide/missing_data.html.
:return: :class:`~DataFrame` or :class:`~List[DataFrame]`
.. warning:: For the optimal processing of the query results use the ``pivot() function`` which align results as a table.
.. code-block:: text
from(bucket:"my-bucket")
|> range(start: -5m, stop: now())
|> filter(fn: (r) => r._measurement == "mem")
|> pivot(rowKey:["_time"], columnKey: ["_field"], valueColumn: "_value")
For more info see:
- https://docs.influxdata.com/resources/videos/pivots-in-flux/
- https://docs.influxdata.com/flux/latest/stdlib/universe/pivot/
- https://docs.influxdata.com/flux/latest/stdlib/influxdata/influxdb/schema/fieldsascols/
""" # noqa: E501
_generator = self.query_data_frame_stream(query, org=org, data_frame_index=data_frame_index, params=params,
use_extension_dtypes=use_extension_dtypes)
return self._to_data_frames(_generator)
def query_data_frame_stream(self, query: str, org=None, data_frame_index: List[str] = None, params: dict = None,
use_extension_dtypes: bool = False):
"""
Execute synchronous Flux query and return stream of Pandas DataFrame as a :class:`~Generator[DataFrame]`.
.. note:: If the ``query`` returns tables with differing schemas than the client generates a :class:`~DataFrame` for each of them.
:param query: the Flux query
:param str, Organization org: specifies the organization for executing the query;
Take the ``ID``, ``Name`` or ``Organization``.
If not specified the default value from ``InfluxDBClient.org`` is used.
:param data_frame_index: the list of columns that are used as DataFrame index
:param params: bind parameters
:param use_extension_dtypes: set to ``True`` to use panda's extension data types.
Useful for queries with ``pivot`` function.
When data has missing values, column data type may change (to ``object`` or ``float64``).
Nullable extension types (``Int64``, ``Float64``, ``boolean``) support ``panda.NA`` value.
For more info, see https://pandas.pydata.org/docs/user_guide/missing_data.html.
:return: :class:`~Generator[DataFrame]`
.. warning:: For the optimal processing of the query results use the ``pivot() function`` which align results as a table.
.. code-block:: text
from(bucket:"my-bucket")
|> range(start: -5m, stop: now())
|> filter(fn: (r) => r._measurement == "mem")
|> pivot(rowKey:["_time"], columnKey: ["_field"], valueColumn: "_value")
For more info see:
- https://docs.influxdata.com/resources/videos/pivots-in-flux/
- https://docs.influxdata.com/flux/latest/stdlib/universe/pivot/
- https://docs.influxdata.com/flux/latest/stdlib/influxdata/influxdb/schema/fieldsascols/
""" # noqa: E501
org = self._org_param(org)
response = self._query_api.post_query(org=org, query=self._create_query(query, self.default_dialect, params,
dataframe_query=True),
async_req=False, _preload_content=False, _return_http_data_only=False)
return self._to_data_frame_stream(data_frame_index=data_frame_index,
response=response,
query_options=self._get_query_options(),
use_extension_dtypes=use_extension_dtypes)
def __del__(self):
"""Close QueryAPI."""
pass

View File

@@ -0,0 +1,236 @@
"""
Querying InfluxDB by FluxLang.
Flux is InfluxDatas functional data scripting language designed for querying, analyzing, and acting on data.
"""
from typing import List, AsyncGenerator
from influxdb_client.client._base import _BaseQueryApi
from influxdb_client.client.flux_table import FluxRecord, TableList
from influxdb_client.client.query_api import QueryOptions
from influxdb_client.rest import _UTF_8_encoding, ApiException
from .._async.rest import RESTResponseAsync
class QueryApiAsync(_BaseQueryApi):
"""Asynchronous implementation for '/api/v2/query' endpoint."""
def __init__(self, influxdb_client, query_options=QueryOptions()):
"""
Initialize query client.
:param influxdb_client: influxdb client
"""
super().__init__(influxdb_client=influxdb_client, query_options=query_options)
async def query(self, query: str, org=None, params: dict = None) -> TableList:
"""
Execute asynchronous Flux query and return result as a :class:`~influxdb_client.client.flux_table.FluxTable` list.
:param query: the Flux query
:param str, Organization org: specifies the organization for executing the query;
Take the ``ID``, ``Name`` or ``Organization``.
If not specified the default value from ``InfluxDBClientAsync.org`` is used.
:param params: bind parameters
:return: :class:`~influxdb_client.client.flux_table.FluxTable` list wrapped into
:class:`~influxdb_client.client.flux_table.TableList`
:rtype: TableList
Serialization the query results to flattened list of values via :func:`~influxdb_client.client.flux_table.TableList.to_values`:
.. code-block:: python
from influxdb_client import InfluxDBClient
async with InfluxDBClientAsync(url="http://localhost:8086", token="my-token", org="my-org") as client:
# Query: using Table structure
tables = await client.query_api().query('from(bucket:"my-bucket") |> range(start: -10m)')
# Serialize to values
output = tables.to_values(columns=['location', '_time', '_value'])
print(output)
.. code-block:: python
[
['New York', datetime.datetime(2022, 6, 7, 11, 3, 22, 917593, tzinfo=tzutc()), 24.3],
['Prague', datetime.datetime(2022, 6, 7, 11, 3, 22, 917593, tzinfo=tzutc()), 25.3],
...
]
Serialization the query results to JSON via :func:`~influxdb_client.client.flux_table.TableList.to_json`:
.. code-block:: python
from influxdb_client.client.influxdb_client_async import InfluxDBClientAsync
async with InfluxDBClientAsync(url="http://localhost:8086", token="my-token", org="my-org") as client:
# Query: using Table structure
tables = await client.query_api().query('from(bucket:"my-bucket") |> range(start: -10m)')
# Serialize to JSON
output = tables.to_json(indent=5)
print(output)
.. code-block:: javascript
[
{
"_measurement": "mem",
"_start": "2021-06-23T06:50:11.897825+00:00",
"_stop": "2021-06-25T06:50:11.897825+00:00",
"_time": "2020-02-27T16:20:00.897825+00:00",
"region": "north",
"_field": "usage",
"_value": 15
},
{
"_measurement": "mem",
"_start": "2021-06-23T06:50:11.897825+00:00",
"_stop": "2021-06-25T06:50:11.897825+00:00",
"_time": "2020-02-27T16:20:01.897825+00:00",
"region": "west",
"_field": "usage",
"_value": 10
},
...
]
""" # noqa: E501
org = self._org_param(org)
response = await self._post_query(org=org, query=self._create_query(query, self.default_dialect, params))
return await self._to_tables_async(response, query_options=self._get_query_options())
async def query_stream(self, query: str, org=None, params: dict = None) -> AsyncGenerator['FluxRecord', None]:
"""
Execute asynchronous Flux query and return stream of :class:`~influxdb_client.client.flux_table.FluxRecord` as an AsyncGenerator[:class:`~influxdb_client.client.flux_table.FluxRecord`].
:param query: the Flux query
:param str, Organization org: specifies the organization for executing the query;
Take the ``ID``, ``Name`` or ``Organization``.
If not specified the default value from ``InfluxDBClientAsync.org`` is used.
:param params: bind parameters
:return: AsyncGenerator[:class:`~influxdb_client.client.flux_table.FluxRecord`]
""" # noqa: E501
org = self._org_param(org)
response = await self._post_query(org=org, query=self._create_query(query, self.default_dialect, params))
return await self._to_flux_record_stream_async(response, query_options=self._get_query_options())
async def query_data_frame(self, query: str, org=None, data_frame_index: List[str] = None, params: dict = None,
use_extension_dtypes: bool = False):
"""
Execute asynchronous Flux query and return :class:`~pandas.core.frame.DataFrame`.
.. note:: If the ``query`` returns tables with differing schemas than the client generates a :class:`~DataFrame` for each of them.
:param query: the Flux query
:param str, Organization org: specifies the organization for executing the query;
Take the ``ID``, ``Name`` or ``Organization``.
If not specified the default value from ``InfluxDBClientAsync.org`` is used.
:param data_frame_index: the list of columns that are used as DataFrame index
:param params: bind parameters
:param use_extension_dtypes: set to ``True`` to use panda's extension data types.
Useful for queries with ``pivot`` function.
When data has missing values, column data type may change (to ``object`` or ``float64``).
Nullable extension types (``Int64``, ``Float64``, ``boolean``) support ``panda.NA`` value.
For more info, see https://pandas.pydata.org/docs/user_guide/missing_data.html.
:return: :class:`~DataFrame` or :class:`~List[DataFrame]`
.. warning:: For the optimal processing of the query results use the ``pivot() function`` which align results as a table.
.. code-block:: text
from(bucket:"my-bucket")
|> range(start: -5m, stop: now())
|> filter(fn: (r) => r._measurement == "mem")
|> pivot(rowKey:["_time"], columnKey: ["_field"], valueColumn: "_value")
For more info see:
- https://docs.influxdata.com/resources/videos/pivots-in-flux/
- https://docs.influxdata.com/flux/latest/stdlib/universe/pivot/
- https://docs.influxdata.com/flux/latest/stdlib/influxdata/influxdb/schema/fieldsascols/
""" # noqa: E501
_generator = await self.query_data_frame_stream(query, org=org, data_frame_index=data_frame_index,
params=params, use_extension_dtypes=use_extension_dtypes)
dataframes = []
async for dataframe in _generator:
dataframes.append(dataframe)
return self._to_data_frames(dataframes)
async def query_data_frame_stream(self, query: str, org=None, data_frame_index: List[str] = None,
params: dict = None, use_extension_dtypes: bool = False):
"""
Execute asynchronous Flux query and return stream of :class:`~pandas.core.frame.DataFrame` as an AsyncGenerator[:class:`~pandas.core.frame.DataFrame`].
.. note:: If the ``query`` returns tables with differing schemas than the client generates a :class:`~DataFrame` for each of them.
:param query: the Flux query
:param str, Organization org: specifies the organization for executing the query;
Take the ``ID``, ``Name`` or ``Organization``.
If not specified the default value from ``InfluxDBClientAsync.org`` is used.
:param data_frame_index: the list of columns that are used as DataFrame index
:param params: bind parameters
:param use_extension_dtypes: set to ``True`` to use panda's extension data types.
Useful for queries with ``pivot`` function.
When data has missing values, column data type may change (to ``object`` or ``float64``).
Nullable extension types (``Int64``, ``Float64``, ``boolean``) support ``panda.NA`` value.
For more info, see https://pandas.pydata.org/docs/user_guide/missing_data.html.
:return: :class:`AsyncGenerator[:class:`DataFrame`]`
.. warning:: For the optimal processing of the query results use the ``pivot() function`` which align results as a table.
.. code-block:: text
from(bucket:"my-bucket")
|> range(start: -5m, stop: now())
|> filter(fn: (r) => r._measurement == "mem")
|> pivot(rowKey:["_time"], columnKey: ["_field"], valueColumn: "_value")
For more info see:
- https://docs.influxdata.com/resources/videos/pivots-in-flux/
- https://docs.influxdata.com/flux/latest/stdlib/universe/pivot/
- https://docs.influxdata.com/flux/latest/stdlib/influxdata/influxdb/schema/fieldsascols/
""" # noqa: E501
org = self._org_param(org)
response = await self._post_query(org=org, query=self._create_query(query, self.default_dialect, params,
dataframe_query=True))
return await self._to_data_frame_stream_async(data_frame_index=data_frame_index, response=response,
query_options=self._get_query_options(),
use_extension_dtypes=use_extension_dtypes)
async def query_raw(self, query: str, org=None, dialect=_BaseQueryApi.default_dialect, params: dict = None):
"""
Execute asynchronous Flux query and return result as raw unprocessed result as a str.
:param query: a Flux query
:param str, Organization org: specifies the organization for executing the query;
Take the ``ID``, ``Name`` or ``Organization``.
If not specified the default value from ``InfluxDBClientAsync.org`` is used.
:param dialect: csv dialect format
:param params: bind parameters
:return: :class:`~str`
"""
org = self._org_param(org)
result = await self._post_query(org=org, query=self._create_query(query, dialect, params))
raw_bytes = await result.read()
return raw_bytes.decode(_UTF_8_encoding)
async def _post_query(self, org, query):
response = await self._query_api.post_query_async(org=org,
query=query,
async_req=False,
_preload_content=False,
_return_http_data_only=True)
if not 200 <= response.status <= 299:
data = await response.read()
raise ApiException(http_resp=RESTResponseAsync(response, data))
return response

View File

@@ -0,0 +1,226 @@
"""
Process and analyze your data with tasks in the InfluxDB task engine.
Use tasks (scheduled Flux queries) to input a data stream and then analyze, modify, and act on the data accordingly.
"""
import datetime
from typing import List
from influxdb_client import TasksService, Task, TaskCreateRequest, TaskUpdateRequest, LabelResponse, LabelMapping, \
AddResourceMemberRequestBody, RunManually, Run, LogEvent
from influxdb_client.client._pages import _Paginated
class TasksApi(object):
"""Implementation for '/api/v2/tasks' endpoint."""
def __init__(self, influxdb_client):
"""Initialize defaults."""
self._influxdb_client = influxdb_client
self._service = TasksService(influxdb_client.api_client)
def find_task_by_id(self, task_id) -> Task:
"""Retrieve a task."""
task = self._service.get_tasks_id(task_id)
return task
def find_tasks(self, **kwargs):
"""List all tasks up to set limit (max 500).
:key str name: only returns tasks with the specified name
:key str after: returns tasks after specified ID
:key str user: filter tasks to a specific user ID
:key str org: filter tasks to a specific organization name
:key str org_id: filter tasks to a specific organization ID
:key int limit: the number of tasks to return
:return: Tasks
"""
return self._service.get_tasks(**kwargs).tasks
def find_tasks_iter(self, **kwargs):
"""Iterate over all tasks with pagination.
:key str name: only returns tasks with the specified name
:key str after: returns tasks after specified ID
:key str user: filter tasks to a specific user ID
:key str org: filter tasks to a specific organization name
:key str org_id: filter tasks to a specific organization ID
:key int limit: the number of tasks in one page
:return: Tasks iterator
"""
return _Paginated(self._service.get_tasks, lambda response: response.tasks).find_iter(**kwargs)
def create_task(self, task: Task = None, task_create_request: TaskCreateRequest = None) -> Task:
"""Create a new task."""
if task_create_request is not None:
return self._service.post_tasks(task_create_request)
if task is not None:
request = TaskCreateRequest(flux=task.flux, org_id=task.org_id, org=task.org, description=task.description,
status=task.status)
return self.create_task(task_create_request=request)
raise ValueError("task or task_create_request must be not None")
@staticmethod
def _create_task(name: str, flux: str, every, cron, org_id: str) -> Task:
task = Task(id=0, name=name, org_id=org_id, status="active", flux=flux)
repetition = ""
if every is not None:
repetition += "every: "
repetition += every
if cron is not None:
repetition += "cron: "
repetition += '"' + cron + '"'
flux_with_options = '{} \n\noption task = {{name: "{}", {}}}'.format(flux, name, repetition)
task.flux = flux_with_options
return task
def create_task_every(self, name, flux, every, organization) -> Task:
"""Create a new task with every repetition schedule."""
task = self._create_task(name, flux, every, None, organization.id)
return self.create_task(task)
def create_task_cron(self, name: str, flux: str, cron: str, org_id: str) -> Task:
"""Create a new task with cron repetition schedule."""
task = self._create_task(name=name, flux=flux, cron=cron, org_id=org_id, every=None)
return self.create_task(task)
def delete_task(self, task_id: str):
"""Delete a task."""
if task_id is not None:
return self._service.delete_tasks_id(task_id=task_id)
def update_task(self, task: Task) -> Task:
"""Update a task."""
req = TaskUpdateRequest(flux=task.flux, description=task.description, every=task.every, cron=task.cron,
status=task.status, offset=task.offset)
return self.update_task_request(task_id=task.id, task_update_request=req)
def update_task_request(self, task_id, task_update_request: TaskUpdateRequest) -> Task:
"""Update a task."""
return self._service.patch_tasks_id(task_id=task_id, task_update_request=task_update_request)
def clone_task(self, task: Task) -> Task:
"""Clone a task."""
cloned = Task(id=0, name=task.name, org_id=task.org_id, org=task.org, flux=task.flux, status="active")
created = self.create_task(cloned)
if task.id:
labels = self.get_labels(task.id)
for label in labels.labels:
self.add_label(label.id, created.id)
return created
def get_labels(self, task_id):
"""List all labels for a task."""
return self._service.get_tasks_id_labels(task_id=task_id)
def add_label(self, label_id: str, task_id: str) -> LabelResponse:
"""Add a label to a task."""
label_mapping = LabelMapping(label_id=label_id)
return self._service.post_tasks_id_labels(task_id=task_id, label_mapping=label_mapping)
def delete_label(self, label_id: str, task_id: str):
"""Delete a label from a task."""
return self._service.delete_tasks_id_labels_id(task_id=task_id, label_id=label_id)
def get_members(self, task_id: str):
"""List all task members."""
return self._service.get_tasks_id_members(task_id=task_id).users
def add_member(self, member_id, task_id):
"""Add a member to a task."""
user = AddResourceMemberRequestBody(id=member_id)
return self._service.post_tasks_id_members(task_id=task_id, add_resource_member_request_body=user)
def delete_member(self, member_id, task_id):
"""Remove a member from a task."""
return self._service.delete_tasks_id_members_id(user_id=member_id, task_id=task_id)
def get_owners(self, task_id):
"""List all owners of a task."""
return self._service.get_tasks_id_owners(task_id=task_id).users
def add_owner(self, owner_id, task_id):
"""Add an owner to a task."""
user = AddResourceMemberRequestBody(id=owner_id)
return self._service.post_tasks_id_owners(task_id=task_id, add_resource_member_request_body=user)
def delete_owner(self, owner_id, task_id):
"""Remove an owner from a task."""
return self._service.delete_tasks_id_owners_id(user_id=owner_id, task_id=task_id)
def get_runs(self, task_id, **kwargs) -> List['Run']:
"""
Retrieve list of run records for a task.
:param task_id: task id
:key str after: returns runs after specified ID
:key int limit: the number of runs to return
:key datetime after_time: filter runs to those scheduled after this time, RFC3339
:key datetime before_time: filter runs to those scheduled before this time, RFC3339
"""
return self._service.get_tasks_id_runs(task_id=task_id, **kwargs).runs
def get_run(self, task_id: str, run_id: str) -> Run:
"""
Get run record for specific task and run id.
:param task_id: task id
:param run_id: run id
:return: Run for specified task and run id
"""
return self._service.get_tasks_id_runs_id(task_id=task_id, run_id=run_id)
def get_run_logs(self, task_id: str, run_id: str) -> List['LogEvent']:
"""Retrieve all logs for a run."""
return self._service.get_tasks_id_runs_id_logs(task_id=task_id, run_id=run_id).events
def run_manually(self, task_id: str, scheduled_for: datetime = None):
"""
Manually start a run of the task now overriding the current schedule.
:param task_id:
:param scheduled_for: planned execution
"""
r = RunManually(scheduled_for=scheduled_for)
return self._service.post_tasks_id_runs(task_id=task_id, run_manually=r)
def retry_run(self, task_id: str, run_id: str):
"""
Retry a task run.
:param task_id: task id
:param run_id: run id
"""
return self._service.post_tasks_id_runs_id_retry(task_id=task_id, run_id=run_id)
def cancel_run(self, task_id: str, run_id: str):
"""
Cancel a currently running run.
:param task_id:
:param run_id:
"""
return self._service.delete_tasks_id_runs_id(task_id=task_id, run_id=run_id)
def get_logs(self, task_id: str) -> List['LogEvent']:
"""
Retrieve all logs for a task.
:param task_id: task id
"""
return self._service.get_tasks_id_logs(task_id=task_id).events
def find_tasks_by_user(self, task_user_id):
"""List all tasks by user."""
return self.find_tasks(user=task_user_id)

View File

@@ -0,0 +1,80 @@
"""
Users are those with access to InfluxDB.
To grant a user permission to access data, add them as a member of an organization
and provide them with an authentication token.
"""
from typing import Union
from influxdb_client import UsersService, User, Users, UserResponse, PasswordResetBody
class UsersApi(object):
"""Implementation for '/api/v2/users' endpoint."""
def __init__(self, influxdb_client):
"""Initialize defaults."""
self._influxdb_client = influxdb_client
self._service = UsersService(influxdb_client.api_client)
def me(self) -> User:
"""Return the current authenticated user."""
user = self._service.get_me()
return user
def create_user(self, name: str) -> User:
"""Create a user."""
user = User(name=name)
return self._service.post_users(user=user)
def update_user(self, user: User) -> UserResponse:
"""Update a user.
:param user: User update to apply (required)
:return: User
"""
return self._service.patch_users_id(user_id=user.id, user=user)
def update_password(self, user: Union[str, User, UserResponse], password: str) -> None:
"""Update a password.
:param user: User to update password (required)
:param password: New password (required)
:return: None
"""
user_id = self._user_id(user)
return self._service.post_users_id_password(user_id=user_id, password_reset_body=PasswordResetBody(password))
def delete_user(self, user: Union[str, User, UserResponse]) -> None:
"""Delete a user.
:param user: user id or User
:return: None
"""
user_id = self._user_id(user)
return self._service.delete_users_id(user_id=user_id)
def find_users(self, **kwargs) -> Users:
"""List all users.
:key int offset: The offset for pagination. The number of records to skip.
:key int limit: Limits the number of records returned. Default is `20`.
:key str after: The last resource ID from which to seek from (but not including).
This is to be used instead of `offset`.
:key str name: The user name.
:key str id: The user ID.
:return: Buckets
"""
return self._service.get_users(**kwargs)
def _user_id(self, user: Union[str, User, UserResponse]):
if isinstance(user, User):
user_id = user.id
elif isinstance(user, UserResponse):
user_id = user.id
else:
user_id = user
return user_id

View File

@@ -0,0 +1 @@
"""Utils package."""

View File

@@ -0,0 +1,101 @@
"""Utils to get right Date parsing function."""
import datetime
from sys import version_info
import threading
from datetime import timezone as tz
from dateutil import parser
date_helper = None
lock_ = threading.Lock()
class DateHelper:
"""
DateHelper to groups different implementations of date operations.
If you would like to serialize the query results to custom timezone, you can use following code:
.. code-block:: python
from influxdb_client.client.util import date_utils
from influxdb_client.client.util.date_utils import DateHelper
import dateutil.parser
from dateutil import tz
def parse_date(date_string: str):
return dateutil.parser.parse(date_string).astimezone(tz.gettz('ETC/GMT+2'))
date_utils.date_helper = DateHelper()
date_utils.date_helper.parse_date = parse_date
"""
def __init__(self, timezone: datetime.tzinfo = tz.utc) -> None:
"""
Initialize defaults.
:param timezone: Default timezone used for serialization "datetime" without "tzinfo".
Default value is "UTC".
"""
self.timezone = timezone
def parse_date(self, date_string: str):
"""
Parse string into Date or Timestamp.
:return: Returns a :class:`datetime.datetime` object or compliant implementation
like :class:`class 'pandas._libs.tslibs.timestamps.Timestamp`
"""
pass
def to_nanoseconds(self, delta):
"""
Get number of nanoseconds in timedelta.
Solution comes from v1 client. Thx.
https://github.com/influxdata/influxdb-python/pull/811
"""
nanoseconds_in_days = delta.days * 86400 * 10 ** 9
nanoseconds_in_seconds = delta.seconds * 10 ** 9
nanoseconds_in_micros = delta.microseconds * 10 ** 3
return nanoseconds_in_days + nanoseconds_in_seconds + nanoseconds_in_micros
def to_utc(self, value: datetime):
"""
Convert datetime to UTC timezone.
:param value: datetime
:return: datetime in UTC
"""
if not value.tzinfo:
return self.to_utc(value.replace(tzinfo=self.timezone))
else:
return value.astimezone(tz.utc)
def get_date_helper() -> DateHelper:
"""
Return DateHelper with proper implementation.
If there is a 'ciso8601' than use 'ciso8601.parse_datetime' else
use 'datetime.fromisoformat' (Python >= 3.11) or 'dateutil.parse' (Python < 3.11).
"""
global date_helper
if date_helper is None:
with lock_:
# avoid duplicate initialization
if date_helper is None:
_date_helper = DateHelper()
try:
import ciso8601
_date_helper.parse_date = ciso8601.parse_datetime
except ModuleNotFoundError:
if (version_info.major, version_info.minor) >= (3, 11):
_date_helper.parse_date = datetime.datetime.fromisoformat
else:
_date_helper.parse_date = parser.parse
date_helper = _date_helper
return date_helper

View File

@@ -0,0 +1,15 @@
"""Pandas date utils."""
from influxdb_client.client.util.date_utils import DateHelper
from influxdb_client.extras import pd
class PandasDateTimeHelper(DateHelper):
"""DateHelper that use Pandas library with nanosecond precision."""
def parse_date(self, date_string: str):
"""Parse date string into `class 'pandas._libs.tslibs.timestamps.Timestamp`."""
return pd.to_datetime(date_string)
def to_nanoseconds(self, delta):
"""Get number of nanoseconds with nanos precision."""
return super().to_nanoseconds(delta) + (delta.nanoseconds if hasattr(delta, 'nanoseconds') else 0)

View File

@@ -0,0 +1,50 @@
"""Functions to share utility across client classes."""
from influxdb_client.rest import ApiException
def _is_id(value):
"""
Check if the value is valid InfluxDB ID.
:param value: to check
:return: True if provided parameter is valid InfluxDB ID.
"""
if value and len(value) == 16:
try:
int(value, 16)
return True
except ValueError:
return False
return False
def get_org_query_param(org, client, required_id=False):
"""
Get required type of Org query parameter.
:param str, Organization org: value provided as a parameter into API (optional)
:param InfluxDBClient client: with default value for Org parameter
:param bool required_id: true if the query param has to be a ID
:return: request type of org query parameter or None
"""
_org = client.org if org is None else org
if 'Organization' in type(_org).__name__:
_org = _org.id
if required_id and _org and not _is_id(_org):
try:
organizations = client.organizations_api().find_organizations(org=_org)
if len(organizations) < 1:
from influxdb_client.client.exceptions import InfluxDBError
message = f"The client cannot find organization with name: '{_org}' " \
"to determine their ID. Are you using token with sufficient permission?"
raise InfluxDBError(response=None, message=message)
return organizations[0].id
except ApiException as e:
if e.status == 404:
from influxdb_client.client.exceptions import InfluxDBError
message = f"The client cannot find organization with name: '{_org}' " \
"to determine their ID."
raise InfluxDBError(response=None, message=message)
raise e
return _org

View File

@@ -0,0 +1,205 @@
"""
Helpers classes to make easier use the client in multiprocessing environment.
For more information how the multiprocessing works see Python's
`reference docs <https://docs.python.org/3/library/multiprocessing.html>`_.
"""
import logging
import multiprocessing
from influxdb_client import InfluxDBClient, WriteOptions
from influxdb_client.client.exceptions import InfluxDBError
logger = logging.getLogger('influxdb_client.client.util.multiprocessing_helper')
def _success_callback(conf: (str, str, str), data: str):
"""Successfully writen batch."""
logger.debug(f"Written batch: {conf}, data: {data}")
def _error_callback(conf: (str, str, str), data: str, exception: InfluxDBError):
"""Unsuccessfully writen batch."""
logger.debug(f"Cannot write batch: {conf}, data: {data} due: {exception}")
def _retry_callback(conf: (str, str, str), data: str, exception: InfluxDBError):
"""Retryable error."""
logger.debug(f"Retryable error occurs for batch: {conf}, data: {data} retry: {exception}")
class _PoisonPill:
"""To notify process to terminate."""
pass
class MultiprocessingWriter(multiprocessing.Process):
"""
The Helper class to write data into InfluxDB in independent OS process.
Example:
.. code-block:: python
from influxdb_client import WriteOptions
from influxdb_client.client.util.multiprocessing_helper import MultiprocessingWriter
def main():
writer = MultiprocessingWriter(url="http://localhost:8086", token="my-token", org="my-org",
write_options=WriteOptions(batch_size=100))
writer.start()
for x in range(1, 1000):
writer.write(bucket="my-bucket", record=f"mem,tag=a value={x}i {x}")
writer.__del__()
if __name__ == '__main__':
main()
How to use with context_manager:
.. code-block:: python
from influxdb_client import WriteOptions
from influxdb_client.client.util.multiprocessing_helper import MultiprocessingWriter
def main():
with MultiprocessingWriter(url="http://localhost:8086", token="my-token", org="my-org",
write_options=WriteOptions(batch_size=100)) as writer:
for x in range(1, 1000):
writer.write(bucket="my-bucket", record=f"mem,tag=a value={x}i {x}")
if __name__ == '__main__':
main()
How to handle batch events:
.. code-block:: python
from influxdb_client import WriteOptions
from influxdb_client.client.exceptions import InfluxDBError
from influxdb_client.client.util.multiprocessing_helper import MultiprocessingWriter
class BatchingCallback(object):
def success(self, conf: (str, str, str), data: str):
print(f"Written batch: {conf}, data: {data}")
def error(self, conf: (str, str, str), data: str, exception: InfluxDBError):
print(f"Cannot write batch: {conf}, data: {data} due: {exception}")
def retry(self, conf: (str, str, str), data: str, exception: InfluxDBError):
print(f"Retryable error occurs for batch: {conf}, data: {data} retry: {exception}")
def main():
callback = BatchingCallback()
with MultiprocessingWriter(url="http://localhost:8086", token="my-token", org="my-org",
success_callback=callback.success,
error_callback=callback.error,
retry_callback=callback.retry) as writer:
for x in range(1, 1000):
writer.write(bucket="my-bucket", record=f"mem,tag=a value={x}i {x}")
if __name__ == '__main__':
main()
"""
__started__ = False
__disposed__ = False
def __init__(self, **kwargs) -> None:
"""
Initialize defaults.
For more information how to initialize the writer see the examples above.
:param kwargs: arguments are passed into ``__init__`` function of ``InfluxDBClient`` and ``write_api``.
"""
multiprocessing.Process.__init__(self)
self.kwargs = kwargs
self.client = None
self.write_api = None
self.queue_ = multiprocessing.Manager().Queue()
def write(self, **kwargs) -> None:
"""
Append time-series data into underlying queue.
For more information how to pass arguments see the examples above.
:param kwargs: arguments are passed into ``write`` function of ``WriteApi``
:return: None
"""
assert self.__disposed__ is False, 'Cannot write data: the writer is closed.'
assert self.__started__ is True, 'Cannot write data: the writer is not started.'
self.queue_.put(kwargs)
def run(self):
"""Initialize ``InfluxDBClient`` and waits for data to writes into InfluxDB."""
# Initialize Client and Write API
self.client = InfluxDBClient(**self.kwargs)
self.write_api = self.client.write_api(write_options=self.kwargs.get('write_options', WriteOptions()),
success_callback=self.kwargs.get('success_callback', _success_callback),
error_callback=self.kwargs.get('error_callback', _error_callback),
retry_callback=self.kwargs.get('retry_callback', _retry_callback))
# Infinite loop - until poison pill
while True:
next_record = self.queue_.get()
if type(next_record) is _PoisonPill:
# Poison pill means break the loop
self.terminate()
self.queue_.task_done()
break
self.write_api.write(**next_record)
self.queue_.task_done()
def start(self) -> None:
"""Start independent process for writing data into InfluxDB."""
super().start()
self.__started__ = True
def terminate(self) -> None:
"""
Cleanup resources in independent process.
This function **cannot be used** to terminate the ``MultiprocessingWriter``.
If you want to finish your writes please call: ``__del__``.
"""
if self.write_api:
logger.info("flushing data...")
self.write_api.__del__()
self.write_api = None
if self.client:
self.client.__del__()
self.client = None
logger.info("closed")
def __enter__(self):
"""Enter the runtime context related to this object."""
self.start()
return self
def __exit__(self, exc_type, exc_value, traceback):
"""Exit the runtime context related to this object."""
self.__del__()
def __del__(self):
"""Dispose the client and write_api."""
if self.__started__:
self.queue_.put(_PoisonPill())
self.queue_.join()
self.join()
self.queue_ = None
self.__started__ = False
self.__disposed__ = True

View File

@@ -0,0 +1,52 @@
"""The warnings message definition."""
import warnings
class MissingPivotFunction(UserWarning):
"""User warning about missing pivot() function."""
@staticmethod
def print_warning(query: str):
"""Print warning about missing pivot() function and how to deal with that."""
if 'fieldsAsCols' in query or 'pivot' in query:
return
message = f"""The query doesn't contains the pivot() function.
The result will not be shaped to optimal processing by pandas.DataFrame. Use the pivot() function by:
{query} |> pivot(rowKey:["_time"], columnKey: ["_field"], valueColumn: "_value")
You can disable this warning by:
import warnings
from influxdb_client.client.warnings import MissingPivotFunction
warnings.simplefilter("ignore", MissingPivotFunction)
For more info see:
- https://docs.influxdata.com/resources/videos/pivots-in-flux/
- https://docs.influxdata.com/flux/latest/stdlib/universe/pivot/
- https://docs.influxdata.com/flux/latest/stdlib/influxdata/influxdb/schema/fieldsascols/
"""
warnings.warn(message, MissingPivotFunction)
class CloudOnlyWarning(UserWarning):
"""User warning about availability only on the InfluxDB Cloud."""
@staticmethod
def print_warning(api_name: str, doc_url: str):
"""Print warning about availability only on the InfluxDB Cloud."""
message = f"""The '{api_name}' is available only on the InfluxDB Cloud.
For more info see:
- {doc_url}
- https://docs.influxdata.com/influxdb/cloud/
You can disable this warning by:
import warnings
from influxdb_client.client.warnings import CloudOnlyWarning
warnings.simplefilter("ignore", CloudOnlyWarning)
"""
warnings.warn(message, CloudOnlyWarning)

View File

@@ -0,0 +1,56 @@
# flake8: noqa
"""
InfluxDB OSS API Service.
The InfluxDB v2 API provides a programmatic interface for all interactions with InfluxDB. Access the InfluxDB API using the `/api/v2/` endpoint. # noqa: E501
OpenAPI spec version: 2.0.0
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
# import apis into api package
from influxdb_client.service.authorizations_service import AuthorizationsService
from influxdb_client.service.backup_service import BackupService
from influxdb_client.service.bucket_schemas_service import BucketSchemasService
from influxdb_client.service.buckets_service import BucketsService
from influxdb_client.service.cells_service import CellsService
from influxdb_client.service.checks_service import ChecksService
from influxdb_client.service.config_service import ConfigService
from influxdb_client.service.dbr_ps_service import DBRPsService
from influxdb_client.service.dashboards_service import DashboardsService
from influxdb_client.service.delete_service import DeleteService
from influxdb_client.service.health_service import HealthService
from influxdb_client.service.invokable_scripts_service import InvokableScriptsService
from influxdb_client.service.labels_service import LabelsService
from influxdb_client.service.legacy_authorizations_service import LegacyAuthorizationsService
from influxdb_client.service.metrics_service import MetricsService
from influxdb_client.service.notification_endpoints_service import NotificationEndpointsService
from influxdb_client.service.notification_rules_service import NotificationRulesService
from influxdb_client.service.organizations_service import OrganizationsService
from influxdb_client.service.ping_service import PingService
from influxdb_client.service.query_service import QueryService
from influxdb_client.service.ready_service import ReadyService
from influxdb_client.service.remote_connections_service import RemoteConnectionsService
from influxdb_client.service.replications_service import ReplicationsService
from influxdb_client.service.resources_service import ResourcesService
from influxdb_client.service.restore_service import RestoreService
from influxdb_client.service.routes_service import RoutesService
from influxdb_client.service.rules_service import RulesService
from influxdb_client.service.scraper_targets_service import ScraperTargetsService
from influxdb_client.service.secrets_service import SecretsService
from influxdb_client.service.setup_service import SetupService
from influxdb_client.service.signin_service import SigninService
from influxdb_client.service.signout_service import SignoutService
from influxdb_client.service.sources_service import SourcesService
from influxdb_client.service.tasks_service import TasksService
from influxdb_client.service.telegraf_plugins_service import TelegrafPluginsService
from influxdb_client.service.telegrafs_service import TelegrafsService
from influxdb_client.service.templates_service import TemplatesService
from influxdb_client.service.users_service import UsersService
from influxdb_client.service.variables_service import VariablesService
from influxdb_client.service.views_service import ViewsService
from influxdb_client.service.write_service import WriteService

View File

@@ -0,0 +1,290 @@
"""
Functions for serialize Pandas DataFrame.
Much of the code here is inspired by that in the aioinflux packet found here: https://github.com/gusutabopb/aioinflux
"""
import logging
import math
import re
from influxdb_client import WritePrecision
from influxdb_client.client.write.point import _ESCAPE_KEY, _ESCAPE_STRING, _ESCAPE_MEASUREMENT, DEFAULT_WRITE_PRECISION
logger = logging.getLogger('influxdb_client.client.write.dataframe_serializer')
def _itertuples(data_frame):
cols = [data_frame.iloc[:, k] for k in range(len(data_frame.columns))]
return zip(data_frame.index, *cols)
class DataframeSerializer:
"""Serialize DataFrame into LineProtocols."""
def __init__(self, data_frame, point_settings, precision=DEFAULT_WRITE_PRECISION, chunk_size: int = None,
**kwargs) -> None:
"""
Init serializer.
:param data_frame: Pandas DataFrame to serialize
:param point_settings: Default Tags
:param precision: The precision for the unix timestamps within the body line-protocol.
:param chunk_size: The size of chunk for serializing into chunks.
:key data_frame_measurement_name: name of measurement for writing Pandas DataFrame
:key data_frame_tag_columns: list of DataFrame columns which are tags, rest columns will be fields
:key data_frame_timestamp_column: name of DataFrame column which contains a timestamp. The column can be defined as a :class:`~str` value
formatted as `2018-10-26`, `2018-10-26 12:00`, `2018-10-26 12:00:00-05:00`
or other formats and types supported by `pandas.to_datetime <https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.to_datetime.html#pandas.to_datetime>`_ - ``DataFrame``
:key data_frame_timestamp_timezone: name of the timezone which is used for timestamp column - ``DataFrame``
""" # noqa: E501
# This function is hard to understand but for good reason:
# the approach used here is considerably more efficient
# than the alternatives.
#
# We build up a Python expression that efficiently converts a data point
# tuple into line-protocol entry, and then evaluate the expression
# as a lambda so that we can call it. This avoids the overhead of
# invoking a function on every data value - we only have one function
# call per row instead. The expression consists of exactly
# one f-string, so we build up the parts of it as segments
# that are concatenated together to make the full f-string inside
# the lambda.
#
# Things are made a little more complex because fields and tags with NaN
# values and empty tags are omitted from the generated line-protocol
# output.
#
# As an example, say we have a data frame with two value columns:
# a float
# b int
#
# This will generate a lambda expression to be evaluated that looks like
# this:
#
# lambda p: f"""{measurement_name} {keys[0]}={p[1]},{keys[1]}={p[2]}i {p[0].value}"""
#
# This lambda is then executed for each row p.
#
# When NaNs are present, the expression looks like this (split
# across two lines to satisfy the code-style checker)
#
# lambda p: f"""{measurement_name} {"" if pd.isna(p[1])
# else f"{keys[0]}={p[1]}"},{keys[1]}={p[2]}i {p[0].value}"""
#
# When there's a NaN value in column a, we'll end up with a comma at the start of the
# fields, so we run a regexp substitution after generating the line-protocol entries
# to remove this.
#
# We're careful to run these potentially costly extra steps only when NaN values actually
# exist in the data.
from ...extras import pd, np
if not isinstance(data_frame, pd.DataFrame):
raise TypeError('Must be DataFrame, but type was: {0}.'
.format(type(data_frame)))
data_frame_measurement_name = kwargs.get('data_frame_measurement_name')
if data_frame_measurement_name is None:
raise TypeError('"data_frame_measurement_name" is a Required Argument')
timestamp_column = kwargs.get('data_frame_timestamp_column', None)
timestamp_timezone = kwargs.get('data_frame_timestamp_timezone', None)
data_frame = data_frame.copy(deep=False)
data_frame_timestamp = data_frame.index if timestamp_column is None else data_frame[timestamp_column]
if isinstance(data_frame_timestamp, pd.PeriodIndex):
data_frame_timestamp = data_frame_timestamp.to_timestamp()
else:
# TODO: this is almost certainly not what you want
# when the index is the default RangeIndex.
# Instead, it would probably be better to leave
# out the timestamp unless a time column is explicitly
# enabled.
data_frame_timestamp = pd.to_datetime(data_frame_timestamp, unit=precision)
if timestamp_timezone:
if isinstance(data_frame_timestamp, pd.DatetimeIndex):
data_frame_timestamp = data_frame_timestamp.tz_localize(timestamp_timezone)
else:
data_frame_timestamp = data_frame_timestamp.dt.tz_localize(timestamp_timezone)
if hasattr(data_frame_timestamp, 'tzinfo') and data_frame_timestamp.tzinfo is None:
data_frame_timestamp = data_frame_timestamp.tz_localize('UTC')
if timestamp_column is None:
data_frame.index = data_frame_timestamp
else:
data_frame[timestamp_column] = data_frame_timestamp
data_frame_tag_columns = kwargs.get('data_frame_tag_columns')
data_frame_tag_columns = set(data_frame_tag_columns or [])
# keys holds a list of string keys.
keys = []
# tags holds a list of tag f-string segments ordered alphabetically by tag key.
tags = []
# fields holds a list of field f-string segments ordered alphebetically by field key
fields = []
# field_indexes holds the index into each row of all the fields.
field_indexes = []
if point_settings.defaultTags:
for key, value in point_settings.defaultTags.items():
# Avoid overwriting existing data if there's a column
# that already exists with the default tag's name.
# Note: when a new column is added, the old DataFrame
# that we've made a shallow copy of is unaffected.
# TODO: when there are NaN or empty values in
# the column, we could make a deep copy of the
# data and fill in those values with the default tag value.
if key not in data_frame.columns:
data_frame[key] = value
data_frame_tag_columns.add(key)
# Get a list of all the columns sorted by field/tag key.
# We want to iterate through the columns in sorted order
# so that we know when we're on the first field so we
# can know whether a comma is needed for that
# field.
columns = sorted(enumerate(data_frame.dtypes.items()), key=lambda col: col[1][0])
# null_columns has a bool value for each column holding
# whether that column contains any null (NaN or None) values.
null_columns = data_frame.isnull().any()
timestamp_index = 0
# Iterate through the columns building up the expression for each column.
for index, (key, value) in columns:
key = str(key)
key_format = f'{{keys[{len(keys)}]}}'
keys.append(key.translate(_ESCAPE_KEY))
# The field index is one more than the column index because the
# time index is at column zero in the finally zipped-together
# result columns.
field_index = index + 1
val_format = f'p[{field_index}]'
if key in data_frame_tag_columns:
# This column is a tag column.
if null_columns.iloc[index]:
key_value = f"""{{
'' if {val_format} == '' or pd.isna({val_format}) else
f',{key_format}={{str({val_format}).translate(_ESCAPE_STRING)}}'
}}"""
else:
key_value = f',{key_format}={{str({val_format}).translate(_ESCAPE_KEY)}}'
tags.append(key_value)
continue
elif timestamp_column is not None and key in timestamp_column:
timestamp_index = field_index
continue
# This column is a field column.
# Note: no comma separator is needed for the first field.
# It's important to omit it because when the first
# field column has no nulls, we don't run the comma-removal
# regexp substitution step.
sep = '' if len(field_indexes) == 0 else ','
if issubclass(value.type, np.integer) or issubclass(value.type, np.floating) or issubclass(value.type, np.bool_): # noqa: E501
suffix = 'i' if issubclass(value.type, np.integer) else ''
if null_columns.iloc[index]:
field_value = f"""{{"" if pd.isna({val_format}) else f"{sep}{key_format}={{{val_format}}}{suffix}"}}""" # noqa: E501
else:
field_value = f"{sep}{key_format}={{{val_format}}}{suffix}"
else:
if null_columns.iloc[index]:
field_value = f"""{{
'' if pd.isna({val_format}) else
f'{sep}{key_format}="{{str({val_format}).translate(_ESCAPE_STRING)}}"'
}}"""
else:
field_value = f'''{sep}{key_format}="{{str({val_format}).translate(_ESCAPE_STRING)}}"'''
field_indexes.append(field_index)
fields.append(field_value)
measurement_name = str(data_frame_measurement_name).translate(_ESCAPE_MEASUREMENT)
tags = ''.join(tags)
fields = ''.join(fields)
timestamp = '{p[%s].value}' % timestamp_index
if precision == WritePrecision.US:
timestamp = '{int(p[%s].value / 1e3)}' % timestamp_index
elif precision == WritePrecision.MS:
timestamp = '{int(p[%s].value / 1e6)}' % timestamp_index
elif precision == WritePrecision.S:
timestamp = '{int(p[%s].value / 1e9)}' % timestamp_index
f = eval(f'lambda p: f"""{{measurement_name}}{tags} {fields} {timestamp}"""', {
'measurement_name': measurement_name,
'_ESCAPE_KEY': _ESCAPE_KEY,
'_ESCAPE_STRING': _ESCAPE_STRING,
'keys': keys,
'pd': pd,
})
for k, v in dict(data_frame.dtypes).items():
if k in data_frame_tag_columns:
data_frame = data_frame.replace({k: ''}, np.nan)
def _any_not_nan(p, indexes):
return any(map(lambda x: not pd.isna(p[x]), indexes))
self.data_frame = data_frame
self.f = f
self.field_indexes = field_indexes
self.first_field_maybe_null = null_columns.iloc[field_indexes[0] - 1]
self._any_not_nan = _any_not_nan
#
# prepare chunks
#
if chunk_size is not None:
self.number_of_chunks = int(math.ceil(len(data_frame) / float(chunk_size)))
self.chunk_size = chunk_size
else:
self.number_of_chunks = None
def serialize(self, chunk_idx: int = None):
"""
Serialize chunk into LineProtocols.
:param chunk_idx: The index of chunk to serialize. If `None` then serialize whole dataframe.
"""
if chunk_idx is None:
chunk = self.data_frame
else:
logger.debug("Serialize chunk %s/%s ...", chunk_idx + 1, self.number_of_chunks)
chunk = self.data_frame[chunk_idx * self.chunk_size:(chunk_idx + 1) * self.chunk_size]
if self.first_field_maybe_null:
# When the first field is null (None/NaN), we'll have
# a spurious leading comma which needs to be removed.
lp = (re.sub('^(( |[^ ])* ),([a-zA-Z0-9])(.*)', '\\1\\3\\4', self.f(p))
for p in filter(lambda x: self._any_not_nan(x, self.field_indexes), _itertuples(chunk)))
return list(lp)
else:
return list(map(self.f, _itertuples(chunk)))
def number_of_chunks(self):
"""
Return the number of chunks.
:return: number of chunks or None if chunk_size is not specified.
"""
return self.number_of_chunks
def data_frame_to_list_of_points(data_frame, point_settings, precision=DEFAULT_WRITE_PRECISION, **kwargs):
"""
Serialize DataFrame into LineProtocols.
:param data_frame: Pandas DataFrame to serialize
:param point_settings: Default Tags
:param precision: The precision for the unix timestamps within the body line-protocol.
:key data_frame_measurement_name: name of measurement for writing Pandas DataFrame
:key data_frame_tag_columns: list of DataFrame columns which are tags, rest columns will be fields
:key data_frame_timestamp_column: name of DataFrame column which contains a timestamp. The column can be defined as a :class:`~str` value
formatted as `2018-10-26`, `2018-10-26 12:00`, `2018-10-26 12:00:00-05:00`
or other formats and types supported by `pandas.to_datetime <https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.to_datetime.html#pandas.to_datetime>`_ - ``DataFrame``
:key data_frame_timestamp_timezone: name of the timezone which is used for timestamp column - ``DataFrame``
""" # noqa: E501
return DataframeSerializer(data_frame, point_settings, precision, **kwargs).serialize()

View File

@@ -0,0 +1,371 @@
"""Point data structure to represent LineProtocol."""
import math
import warnings
from builtins import int
from datetime import datetime, timedelta, timezone
from decimal import Decimal
from numbers import Integral
from influxdb_client.client.util.date_utils import get_date_helper
from influxdb_client.domain.write_precision import WritePrecision
EPOCH = datetime.fromtimestamp(0, tz=timezone.utc)
DEFAULT_WRITE_PRECISION = WritePrecision.NS
_ESCAPE_MEASUREMENT = str.maketrans({
',': r'\,',
' ': r'\ ',
'\n': r'\n',
'\t': r'\t',
'\r': r'\r',
})
_ESCAPE_KEY = str.maketrans({
',': r'\,',
'=': r'\=',
' ': r'\ ',
'\n': r'\n',
'\t': r'\t',
'\r': r'\r',
})
_ESCAPE_STRING = str.maketrans({
'"': r'\"',
'\\': r'\\',
})
try:
import numpy as np
_HAS_NUMPY = True
except ModuleNotFoundError:
_HAS_NUMPY = False
class Point(object):
"""
Point defines the values that will be written to the database.
Ref: https://docs.influxdata.com/influxdb/latest/reference/key-concepts/data-elements/#point
"""
@staticmethod
def measurement(measurement):
"""Create a new Point with specified measurement name."""
p = Point(measurement)
return p
@staticmethod
def from_dict(dictionary: dict, write_precision: WritePrecision = DEFAULT_WRITE_PRECISION, **kwargs):
"""
Initialize point from 'dict' structure.
The expected dict structure is:
- measurement
- tags
- fields
- time
Example:
.. code-block:: python
# Use default dictionary structure
dict_structure = {
"measurement": "h2o_feet",
"tags": {"location": "coyote_creek"},
"fields": {"water_level": 1.0},
"time": 1
}
point = Point.from_dict(dict_structure, WritePrecision.NS)
Example:
.. code-block:: python
# Use custom dictionary structure
dictionary = {
"name": "sensor_pt859",
"location": "warehouse_125",
"version": "2021.06.05.5874",
"pressure": 125,
"temperature": 10,
"created": 1632208639,
}
point = Point.from_dict(dictionary,
write_precision=WritePrecision.S,
record_measurement_key="name",
record_time_key="created",
record_tag_keys=["location", "version"],
record_field_keys=["pressure", "temperature"])
Int Types:
The following example shows how to configure the types of integers fields.
It is useful when you want to serialize integers always as ``float`` to avoid ``field type conflict``
or use ``unsigned 64-bit integer`` as the type for serialization.
.. code-block:: python
# Use custom dictionary structure
dict_structure = {
"measurement": "h2o_feet",
"tags": {"location": "coyote_creek"},
"fields": {
"water_level": 1.0,
"some_counter": 108913123234
},
"time": 1
}
point = Point.from_dict(dict_structure, field_types={"some_counter": "uint"})
:param dictionary: dictionary for serialize into data Point
:param write_precision: sets the precision for the supplied time values
:key record_measurement_key: key of dictionary with specified measurement
:key record_measurement_name: static measurement name for data Point
:key record_time_key: key of dictionary with specified timestamp
:key record_tag_keys: list of dictionary keys to use as a tag
:key record_field_keys: list of dictionary keys to use as a field
:key field_types: optional dictionary to specify types of serialized fields. Currently, is supported customization for integer types.
Possible integers types:
- ``int`` - serialize integers as "**Signed 64-bit integers**" - ``9223372036854775807i`` (default behaviour)
- ``uint`` - serialize integers as "**Unsigned 64-bit integers**" - ``9223372036854775807u``
- ``float`` - serialize integers as "**IEEE-754 64-bit floating-point numbers**". Useful for unify number types in your pipeline to avoid field type conflict - ``9223372036854775807``
The ``field_types`` can be also specified as part of incoming dictionary. For more info see an example above.
:return: new data point
""" # noqa: E501
measurement_ = kwargs.get('record_measurement_name', None)
if measurement_ is None:
measurement_ = dictionary[kwargs.get('record_measurement_key', 'measurement')]
point = Point(measurement_)
record_tag_keys = kwargs.get('record_tag_keys', None)
if record_tag_keys is not None:
for tag_key in record_tag_keys:
if tag_key in dictionary:
point.tag(tag_key, dictionary[tag_key])
elif 'tags' in dictionary:
for tag_key, tag_value in dictionary['tags'].items():
point.tag(tag_key, tag_value)
record_field_keys = kwargs.get('record_field_keys', None)
if record_field_keys is not None:
for field_key in record_field_keys:
if field_key in dictionary:
point.field(field_key, dictionary[field_key])
else:
for field_key, field_value in dictionary['fields'].items():
point.field(field_key, field_value)
record_time_key = kwargs.get('record_time_key', 'time')
if record_time_key in dictionary:
point.time(dictionary[record_time_key], write_precision=write_precision)
_field_types = kwargs.get('field_types', {})
if 'field_types' in dictionary:
_field_types = dictionary['field_types']
# Map API fields types to Line Protocol types postfix:
# - int: 'i'
# - uint: 'u'
# - float: ''
point._field_types = dict(map(
lambda item: (item[0], 'i' if item[1] == 'int' else 'u' if item[1] == 'uint' else ''),
_field_types.items()
))
return point
def __init__(self, measurement_name):
"""Initialize defaults."""
self._tags = {}
self._fields = {}
self._name = measurement_name
self._time = None
self._write_precision = DEFAULT_WRITE_PRECISION
self._field_types = {}
def time(self, time, write_precision=DEFAULT_WRITE_PRECISION):
"""
Specify timestamp for DataPoint with declared precision.
If time doesn't have specified timezone we assume that timezone is UTC.
Examples::
Point.measurement("h2o").field("val", 1).time("2009-11-10T23:00:00.123456Z")
Point.measurement("h2o").field("val", 1).time(1257894000123456000)
Point.measurement("h2o").field("val", 1).time(datetime(2009, 11, 10, 23, 0, 0, 123456))
Point.measurement("h2o").field("val", 1).time(1257894000123456000, write_precision=WritePrecision.NS)
:param time: the timestamp for your data
:param write_precision: sets the precision for the supplied time values
:return: this point
"""
self._write_precision = write_precision
self._time = time
return self
def tag(self, key, value):
"""Add tag with key and value."""
self._tags[key] = value
return self
def field(self, field, value):
"""Add field with key and value."""
self._fields[field] = value
return self
def to_line_protocol(self, precision=None):
"""
Create LineProtocol.
:param precision: required precision of LineProtocol. If it's not set then use the precision from ``Point``.
"""
_measurement = _escape_key(self._name, _ESCAPE_MEASUREMENT)
if _measurement.startswith("#"):
message = f"""The measurement name '{_measurement}' start with '#'.
The output Line protocol will be interpret as a comment by InfluxDB. For more info see:
- https://docs.influxdata.com/influxdb/latest/reference/syntax/line-protocol/#comments
"""
warnings.warn(message, SyntaxWarning)
_tags = _append_tags(self._tags)
_fields = _append_fields(self._fields, self._field_types)
if not _fields:
return ""
_time = _append_time(self._time, self._write_precision if precision is None else precision)
return f"{_measurement}{_tags}{_fields}{_time}"
@property
def write_precision(self):
"""Get precision."""
return self._write_precision
@classmethod
def set_str_rep(cls, rep_function):
"""Set the string representation for all Points."""
cls.__str___rep = rep_function
def __str__(self):
"""Create string representation of this Point."""
return self.to_line_protocol()
def __eq__(self, other):
"""Return true iff other is equal to self."""
if not isinstance(other, Point):
return False
# assume points are equal iff their instance fields are equal
return (self._tags == other._tags and
self._fields == other._fields and
self._name == other._name and
self._time == other._time and
self._write_precision == other._write_precision and
self._field_types == other._field_types)
def _append_tags(tags):
_return = []
for tag_key, tag_value in sorted(tags.items()):
if tag_value is None:
continue
tag = _escape_key(tag_key)
value = _escape_tag_value(tag_value)
if tag != '' and value != '':
_return.append(f'{tag}={value}')
return f"{',' if _return else ''}{','.join(_return)} "
def _append_fields(fields, field_types):
_return = []
for field, value in sorted(fields.items()):
if value is None:
continue
if isinstance(value, float) or isinstance(value, Decimal) or _np_is_subtype(value, 'float'):
if not math.isfinite(value):
continue
s = str(value)
# It's common to represent whole numbers as floats
# and the trailing ".0" that Python produces is unnecessary
# in line-protocol, inconsistent with other line-protocol encoders,
# and takes more space than needed, so trim it off.
if s.endswith('.0'):
s = s[:-2]
_return.append(f'{_escape_key(field)}={s}')
elif (isinstance(value, int) or _np_is_subtype(value, 'int')) and not isinstance(value, bool):
_type = field_types.get(field, "i")
_return.append(f'{_escape_key(field)}={str(value)}{_type}')
elif isinstance(value, bool):
_return.append(f'{_escape_key(field)}={str(value).lower()}')
elif isinstance(value, str):
_return.append(f'{_escape_key(field)}="{_escape_string(value)}"')
else:
raise ValueError(f'Type: "{type(value)}" of field: "{field}" is not supported.')
return f"{','.join(_return)}"
def _append_time(time, write_precision) -> str:
if time is None:
return ''
return f" {int(_convert_timestamp(time, write_precision))}"
def _escape_key(tag, escape_list=None) -> str:
if escape_list is None:
escape_list = _ESCAPE_KEY
return str(tag).translate(escape_list)
def _escape_tag_value(value) -> str:
ret = _escape_key(value)
if ret.endswith('\\'):
ret += ' '
return ret
def _escape_string(value) -> str:
return str(value).translate(_ESCAPE_STRING)
def _convert_timestamp(timestamp, precision=DEFAULT_WRITE_PRECISION):
date_helper = get_date_helper()
if isinstance(timestamp, Integral):
return timestamp # assume precision is correct if timestamp is int
if isinstance(timestamp, str):
timestamp = date_helper.parse_date(timestamp)
if isinstance(timestamp, timedelta) or isinstance(timestamp, datetime):
if isinstance(timestamp, datetime):
timestamp = date_helper.to_utc(timestamp) - EPOCH
ns = date_helper.to_nanoseconds(timestamp)
if precision is None or precision == WritePrecision.NS:
return ns
elif precision == WritePrecision.US:
return ns / 1e3
elif precision == WritePrecision.MS:
return ns / 1e6
elif precision == WritePrecision.S:
return ns / 1e9
raise ValueError(timestamp)
def _np_is_subtype(value, np_type):
if not _HAS_NUMPY or not hasattr(value, 'dtype'):
return False
if np_type == 'float':
return np.issubdtype(value, np.floating)
elif np_type == 'int':
return np.issubdtype(value, np.integer)
return False

View File

@@ -0,0 +1,148 @@
"""Implementation for Retry strategy during HTTP requests."""
import logging
from datetime import datetime, timedelta
from itertools import takewhile
from random import random
from typing import Callable
from urllib3 import Retry
from urllib3.exceptions import MaxRetryError, ResponseError
from influxdb_client.client.exceptions import InfluxDBError
logger = logging.getLogger('influxdb_client.client.write.retry')
class WritesRetry(Retry):
"""
Writes retry configuration.
The next delay is computed as random value between range
`retry_interval * exponential_base^(attempts-1)` and `retry_interval * exponential_base^(attempts)
Example:
for retry_interval=5, exponential_base=2, max_retry_delay=125, total=5
retry delays are random distributed values within the ranges of
[5-10, 10-20, 20-40, 40-80, 80-125]
"""
def __init__(self, jitter_interval=0, max_retry_delay=125, exponential_base=2, max_retry_time=180, total=5,
retry_interval=5, retry_callback: Callable[[Exception], int] = None, **kw):
"""
Initialize defaults.
:param int jitter_interval: random milliseconds when retrying writes
:param num max_retry_delay: maximum delay when retrying write in seconds
:param int max_retry_time: maximum total retry timeout in seconds,
attempt after this timout throws MaxRetryError
:param int total: maximum number of retries
:param num retry_interval: initial first retry delay range in seconds
:param int exponential_base: base for the exponential retry delay,
:param Callable[[Exception], int] retry_callback: the callable ``callback`` to run after retryable
error occurred.
The callable must accept one argument:
- `Exception`: an retryable error
"""
super().__init__(**kw)
self.jitter_interval = jitter_interval
self.total = total
self.retry_interval = retry_interval
self.max_retry_delay = max_retry_delay
self.max_retry_time = max_retry_time
self.exponential_base = exponential_base
self.retry_timeout = datetime.now() + timedelta(seconds=max_retry_time)
self.retry_callback = retry_callback
def new(self, **kw):
"""Initialize defaults."""
if 'jitter_interval' not in kw:
kw['jitter_interval'] = self.jitter_interval
if 'retry_interval' not in kw:
kw['retry_interval'] = self.retry_interval
if 'max_retry_delay' not in kw:
kw['max_retry_delay'] = self.max_retry_delay
if 'max_retry_time' not in kw:
kw['max_retry_time'] = self.max_retry_time
if 'exponential_base' not in kw:
kw['exponential_base'] = self.exponential_base
if 'retry_callback' not in kw:
kw['retry_callback'] = self.retry_callback
new = super().new(**kw)
new.retry_timeout = self.retry_timeout
return new
def is_retry(self, method, status_code, has_retry_after=False):
"""is_retry doesn't require retry_after header. If there is not Retry-After we will use backoff."""
if not self._is_method_retryable(method):
return False
return self.total and (status_code >= 429)
def get_backoff_time(self):
"""Variant of exponential backoff with initial and max delay and a random jitter delay."""
# We want to consider only the last consecutive errors sequence (Ignore redirects).
consecutive_errors_len = len(
list(
takewhile(lambda x: x.redirect_location is None, reversed(self.history))
)
)
# First fail doesn't increase backoff
consecutive_errors_len -= 1
if consecutive_errors_len < 0:
return 0
range_start = self.retry_interval
range_stop = self.retry_interval * self.exponential_base
i = 1
while i <= consecutive_errors_len:
i += 1
range_start = range_stop
range_stop = range_stop * self.exponential_base
if range_stop > self.max_retry_delay:
break
if range_stop > self.max_retry_delay:
range_stop = self.max_retry_delay
return range_start + (range_stop - range_start) * self._random()
def get_retry_after(self, response):
"""Get the value of Retry-After header and append random jitter delay."""
retry_after = super().get_retry_after(response)
if retry_after:
retry_after += self._jitter_delay()
return retry_after
def increment(self, method=None, url=None, response=None, error=None, _pool=None, _stacktrace=None):
"""Return a new Retry object with incremented retry counters."""
if self.retry_timeout < datetime.now():
raise MaxRetryError(_pool, url, error or ResponseError("max_retry_time exceeded"))
new_retry = super().increment(method, url, response, error, _pool, _stacktrace)
if response is not None:
parsed_error = InfluxDBError(response=response)
elif error is not None:
parsed_error = error
else:
parsed_error = f"Failed request to: {url}"
message = f"The retriable error occurred during request. Reason: '{parsed_error}'."
if isinstance(parsed_error, InfluxDBError):
message += f" Retry in {parsed_error.retry_after}s."
if self.retry_callback:
self.retry_callback(parsed_error)
logger.warning(message)
return new_retry
def _jitter_delay(self):
return self.jitter_interval * random()
def _random(self):
return random()

View File

@@ -0,0 +1,587 @@
"""Collect and write time series data to InfluxDB Cloud or InfluxDB OSS."""
# coding: utf-8
import logging
import os
import warnings
from collections import defaultdict
from datetime import timedelta
from enum import Enum
from random import random
from time import sleep
from typing import Union, Any, Iterable, NamedTuple
import reactivex as rx
from reactivex import operators as ops, Observable
from reactivex.scheduler import ThreadPoolScheduler
from reactivex.subject import Subject
from influxdb_client import WritePrecision
from influxdb_client.client._base import _BaseWriteApi, _HAS_DATACLASS
from influxdb_client.client.util.helpers import get_org_query_param
from influxdb_client.client.write.dataframe_serializer import DataframeSerializer
from influxdb_client.client.write.point import Point, DEFAULT_WRITE_PRECISION
from influxdb_client.client.write.retry import WritesRetry
from influxdb_client.rest import _UTF_8_encoding
logger = logging.getLogger('influxdb_client.client.write_api')
if _HAS_DATACLASS:
import dataclasses
from dataclasses import dataclass
class WriteType(Enum):
"""Configuration which type of writes will client use."""
batching = 1
asynchronous = 2
synchronous = 3
class WriteOptions(object):
"""Write configuration."""
def __init__(self, write_type: WriteType = WriteType.batching,
batch_size=1_000, flush_interval=1_000,
jitter_interval=0,
retry_interval=5_000,
max_retries=5,
max_retry_delay=125_000,
max_retry_time=180_000,
exponential_base=2,
max_close_wait=300_000,
write_scheduler=ThreadPoolScheduler(max_workers=1)) -> None:
"""
Create write api configuration.
:param write_type: methods of write (batching, asynchronous, synchronous)
:param batch_size: the number of data point to collect in batch
:param flush_interval: flush data at least in this interval (milliseconds)
:param jitter_interval: this is primarily to avoid large write spikes for users running a large number of
client instances ie, a jitter of 5s and flush duration 10s means flushes will happen every 10-15s
(milliseconds)
:param retry_interval: the time to wait before retry unsuccessful write (milliseconds)
:param max_retries: the number of max retries when write fails, 0 means retry is disabled
:param max_retry_delay: the maximum delay between each retry attempt in milliseconds
:param max_retry_time: total timeout for all retry attempts in milliseconds, if 0 retry is disabled
:param exponential_base: base for the exponential retry delay
:parama max_close_wait: the maximum time to wait for writes to be flushed if close() is called
:param write_scheduler:
"""
self.write_type = write_type
self.batch_size = batch_size
self.flush_interval = flush_interval
self.jitter_interval = jitter_interval
self.retry_interval = retry_interval
self.max_retries = max_retries
self.max_retry_delay = max_retry_delay
self.max_retry_time = max_retry_time
self.exponential_base = exponential_base
self.write_scheduler = write_scheduler
self.max_close_wait = max_close_wait
def to_retry_strategy(self, **kwargs):
"""
Create a Retry strategy from write options.
:key retry_callback: The callable ``callback`` to run after retryable error occurred.
The callable must accept one argument:
- `Exception`: an retryable error
"""
return WritesRetry(
total=self.max_retries,
retry_interval=self.retry_interval / 1_000,
jitter_interval=self.jitter_interval / 1_000,
max_retry_delay=self.max_retry_delay / 1_000,
max_retry_time=self.max_retry_time / 1_000,
exponential_base=self.exponential_base,
retry_callback=kwargs.get("retry_callback", None),
allowed_methods=["POST"])
def __getstate__(self):
"""Return a dict of attributes that you want to pickle."""
state = self.__dict__.copy()
# Remove write scheduler
del state['write_scheduler']
return state
def __setstate__(self, state):
"""Set your object with the provided dict."""
self.__dict__.update(state)
# Init default write Scheduler
self.write_scheduler = ThreadPoolScheduler(max_workers=1)
SYNCHRONOUS = WriteOptions(write_type=WriteType.synchronous)
ASYNCHRONOUS = WriteOptions(write_type=WriteType.asynchronous)
class PointSettings(object):
"""Settings to store default tags."""
def __init__(self, **default_tags) -> None:
"""
Create point settings for write api.
:param default_tags: Default tags which will be added to each point written by api.
"""
self.defaultTags = dict()
for key, val in default_tags.items():
self.add_default_tag(key, val)
@staticmethod
def _get_value(value):
if value.startswith("${env."):
return os.environ.get(value[6:-1])
return value
def add_default_tag(self, key, value) -> None:
"""Add new default tag with key and value."""
self.defaultTags[key] = self._get_value(value)
class _BatchItemKey(object):
def __init__(self, bucket, org, precision=DEFAULT_WRITE_PRECISION) -> None:
self.bucket = bucket
self.org = org
self.precision = precision
pass
def __hash__(self) -> int:
return hash((self.bucket, self.org, self.precision))
def __eq__(self, o: object) -> bool:
return isinstance(o, self.__class__) \
and self.bucket == o.bucket and self.org == o.org and self.precision == o.precision
def __str__(self) -> str:
return '_BatchItemKey[bucket:\'{}\', org:\'{}\', precision:\'{}\']' \
.format(str(self.bucket), str(self.org), str(self.precision))
class _BatchItem(object):
def __init__(self, key: _BatchItemKey, data, size=1) -> None:
self.key = key
self.data = data
self.size = size
pass
def to_key_tuple(self) -> (str, str, str):
return self.key.bucket, self.key.org, self.key.precision
def __str__(self) -> str:
return '_BatchItem[key:\'{}\', size: \'{}\']' \
.format(str(self.key), str(self.size))
class _BatchResponse(object):
def __init__(self, data: _BatchItem, exception: Exception = None):
self.data = data
self.exception = exception
pass
def __str__(self) -> str:
return '_BatchResponse[status:\'{}\', \'{}\']' \
.format("failed" if self.exception else "success", str(self.data))
def _body_reduce(batch_items):
return b'\n'.join(map(lambda batch_item: batch_item.data, batch_items))
class WriteApi(_BaseWriteApi):
"""
Implementation for '/api/v2/write' endpoint.
Example:
.. code-block:: python
from influxdb_client import InfluxDBClient
from influxdb_client.client.write_api import SYNCHRONOUS
# Initialize SYNCHRONOUS instance of WriteApi
with InfluxDBClient(url="http://localhost:8086", token="my-token", org="my-org") as client:
write_api = client.write_api(write_options=SYNCHRONOUS)
"""
def __init__(self,
influxdb_client,
write_options: WriteOptions = WriteOptions(),
point_settings: PointSettings = PointSettings(),
**kwargs) -> None:
"""
Initialize defaults.
:param influxdb_client: with default settings (organization)
:param write_options: write api configuration
:param point_settings: settings to store default tags.
:key success_callback: The callable ``callback`` to run after successfully writen a batch.
The callable must accept two arguments:
- `Tuple`: ``(bucket, organization, precision)``
- `str`: written data
**[batching mode]**
:key error_callback: The callable ``callback`` to run after unsuccessfully writen a batch.
The callable must accept three arguments:
- `Tuple`: ``(bucket, organization, precision)``
- `str`: written data
- `Exception`: an occurred error
**[batching mode]**
:key retry_callback: The callable ``callback`` to run after retryable error occurred.
The callable must accept three arguments:
- `Tuple`: ``(bucket, organization, precision)``
- `str`: written data
- `Exception`: an retryable error
**[batching mode]**
"""
super().__init__(influxdb_client=influxdb_client, point_settings=point_settings)
self._write_options = write_options
self._success_callback = kwargs.get('success_callback', None)
self._error_callback = kwargs.get('error_callback', None)
self._retry_callback = kwargs.get('retry_callback', None)
self._window_scheduler = None
if self._write_options.write_type is WriteType.batching:
# Define Subject that listen incoming data and produces writes into InfluxDB
self._subject = Subject()
self._window_scheduler = ThreadPoolScheduler(1)
self._disposable = self._subject.pipe(
# Split incoming data to windows by batch_size or flush_interval
ops.window_with_time_or_count(count=write_options.batch_size,
timespan=timedelta(milliseconds=write_options.flush_interval),
scheduler=self._window_scheduler),
# Map window into groups defined by 'organization', 'bucket' and 'precision'
ops.flat_map(lambda window: window.pipe(
# Group window by 'organization', 'bucket' and 'precision'
ops.group_by(lambda batch_item: batch_item.key),
# Create batch (concatenation line protocols by \n)
ops.map(lambda group: group.pipe(
ops.to_iterable(),
ops.map(lambda xs: _BatchItem(key=group.key, data=_body_reduce(xs), size=len(xs))))),
ops.merge_all())),
# Write data into InfluxDB (possibility to retry if its fail)
ops.filter(lambda batch: batch.size > 0),
ops.map(mapper=lambda batch: self._to_response(data=batch, delay=self._jitter_delay())),
ops.merge_all()) \
.subscribe(self._on_next, self._on_error, self._on_complete)
else:
self._subject = None
self._disposable = None
if self._write_options.write_type is WriteType.asynchronous:
message = """The 'WriteType.asynchronous' is deprecated and will be removed in future major version.
You can use native asynchronous version of the client:
- https://influxdb-client.readthedocs.io/en/stable/usage.html#how-to-use-asyncio
"""
warnings.warn(message, DeprecationWarning)
def write(self, bucket: str, org: str = None,
record: Union[
str, Iterable['str'], Point, Iterable['Point'], dict, Iterable['dict'], bytes, Iterable['bytes'],
Observable, NamedTuple, Iterable['NamedTuple'], 'dataclass', Iterable['dataclass']
] = None,
write_precision: WritePrecision = DEFAULT_WRITE_PRECISION, **kwargs) -> Any:
"""
Write time-series data into InfluxDB.
:param str bucket: specifies the destination bucket for writes (required)
:param str, Organization org: specifies the destination organization for writes;
take the ID, Name or Organization.
If not specified the default value from ``InfluxDBClient.org`` is used.
:param WritePrecision write_precision: specifies the precision for the unix timestamps within
the body line-protocol. The precision specified on a Point has precedes
and is use for write.
:param record: Point, Line Protocol, Dictionary, NamedTuple, Data Classes, Pandas DataFrame or
RxPY Observable to write
:key data_frame_measurement_name: name of measurement for writing Pandas DataFrame - ``DataFrame``
:key data_frame_tag_columns: list of DataFrame columns which are tags,
rest columns will be fields - ``DataFrame``
:key data_frame_timestamp_column: name of DataFrame column which contains a timestamp. The column can be defined as a :class:`~str` value
formatted as `2018-10-26`, `2018-10-26 12:00`, `2018-10-26 12:00:00-05:00`
or other formats and types supported by `pandas.to_datetime <https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.to_datetime.html#pandas.to_datetime>`_ - ``DataFrame``
:key data_frame_timestamp_timezone: name of the timezone which is used for timestamp column - ``DataFrame``
:key record_measurement_key: key of record with specified measurement -
``dictionary``, ``NamedTuple``, ``dataclass``
:key record_measurement_name: static measurement name - ``dictionary``, ``NamedTuple``, ``dataclass``
:key record_time_key: key of record with specified timestamp - ``dictionary``, ``NamedTuple``, ``dataclass``
:key record_tag_keys: list of record keys to use as a tag - ``dictionary``, ``NamedTuple``, ``dataclass``
:key record_field_keys: list of record keys to use as a field - ``dictionary``, ``NamedTuple``, ``dataclass``
Example:
.. code-block:: python
# Record as Line Protocol
write_api.write("my-bucket", "my-org", "h2o_feet,location=us-west level=125i 1")
# Record as Dictionary
dictionary = {
"measurement": "h2o_feet",
"tags": {"location": "us-west"},
"fields": {"level": 125},
"time": 1
}
write_api.write("my-bucket", "my-org", dictionary)
# Record as Point
from influxdb_client import Point
point = Point("h2o_feet").tag("location", "us-west").field("level", 125).time(1)
write_api.write("my-bucket", "my-org", point)
DataFrame:
If the ``data_frame_timestamp_column`` is not specified the index of `Pandas DataFrame <https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.html>`_
is used as a ``timestamp`` for written data. The index can be `PeriodIndex <https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.PeriodIndex.html#pandas.PeriodIndex>`_
or its must be transformable to ``datetime`` by
`pandas.to_datetime <https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.to_datetime.html#pandas.to_datetime>`_.
If you would like to transform a column to ``PeriodIndex``, you can use something like:
.. code-block:: python
import pandas as pd
# DataFrame
data_frame = ...
# Set column as Index
data_frame.set_index('column_name', inplace=True)
# Transform index to PeriodIndex
data_frame.index = pd.to_datetime(data_frame.index, unit='s')
""" # noqa: E501
org = get_org_query_param(org=org, client=self._influxdb_client)
self._append_default_tags(record)
if self._write_options.write_type is WriteType.batching:
return self._write_batching(bucket, org, record,
write_precision, **kwargs)
payloads = defaultdict(list)
self._serialize(record, write_precision, payloads, **kwargs)
_async_req = True if self._write_options.write_type == WriteType.asynchronous else False
def write_payload(payload):
final_string = b'\n'.join(payload[1])
return self._post_write(_async_req, bucket, org, final_string, payload[0])
results = list(map(write_payload, payloads.items()))
if not _async_req:
return None
elif len(results) == 1:
return results[0]
return results
def flush(self):
"""Flush data."""
# TODO
pass
def close(self):
"""Flush data and dispose a batching buffer."""
self.__del__()
def __enter__(self):
"""
Enter the runtime context related to this object.
It will bind this methods return value to the target(s)
specified in the `as` clause of the statement.
return: self instance
"""
return self
def __exit__(self, exc_type, exc_val, exc_tb):
"""Exit the runtime context related to this object and close the WriteApi."""
self.close()
def __del__(self):
"""Close WriteApi."""
if self._subject:
self._subject.on_completed()
self._subject.dispose()
self._subject = None
"""
We impose a maximum wait time to ensure that we do not cause a deadlock if the
background thread has exited abnormally
Each iteration waits 100ms, but sleep expects the unit to be seconds so convert
the maximum wait time to seconds.
We keep a counter of how long we've waited
"""
max_wait_time = self._write_options.max_close_wait / 1000
waited = 0
sleep_period = 0.1
# Wait for writing to finish
while not self._disposable.is_disposed:
sleep(sleep_period)
waited += sleep_period
# Have we reached the upper limit?
if waited >= max_wait_time:
logger.warning(
"Reached max_close_wait (%s seconds) waiting for batches to finish writing. Force closing",
max_wait_time
)
break
if self._window_scheduler:
self._window_scheduler.executor.shutdown(wait=False)
self._window_scheduler = None
if self._disposable:
self._disposable = None
pass
def _write_batching(self, bucket, org, data,
precision=DEFAULT_WRITE_PRECISION,
**kwargs):
if isinstance(data, bytes):
_key = _BatchItemKey(bucket, org, precision)
self._subject.on_next(_BatchItem(key=_key, data=data))
elif isinstance(data, str):
self._write_batching(bucket, org, data.encode(_UTF_8_encoding),
precision, **kwargs)
elif isinstance(data, Point):
self._write_batching(bucket, org, data.to_line_protocol(), data.write_precision, **kwargs)
elif isinstance(data, dict):
self._write_batching(bucket, org, Point.from_dict(data, write_precision=precision, **kwargs),
precision, **kwargs)
elif 'DataFrame' in type(data).__name__:
serializer = DataframeSerializer(data, self._point_settings, precision, self._write_options.batch_size,
**kwargs)
for chunk_idx in range(serializer.number_of_chunks):
self._write_batching(bucket, org,
serializer.serialize(chunk_idx),
precision, **kwargs)
elif hasattr(data, "_asdict"):
# noinspection PyProtectedMember
self._write_batching(bucket, org, data._asdict(), precision, **kwargs)
elif _HAS_DATACLASS and dataclasses.is_dataclass(data):
self._write_batching(bucket, org, dataclasses.asdict(data), precision, **kwargs)
elif isinstance(data, Iterable):
for item in data:
self._write_batching(bucket, org, item, precision, **kwargs)
elif isinstance(data, Observable):
data.subscribe(lambda it: self._write_batching(bucket, org, it, precision, **kwargs))
pass
return None
def _http(self, batch_item: _BatchItem):
logger.debug("Write time series data into InfluxDB: %s", batch_item)
if self._retry_callback:
def _retry_callback_delegate(exception):
return self._retry_callback(batch_item.to_key_tuple(), batch_item.data, exception)
else:
_retry_callback_delegate = None
retry = self._write_options.to_retry_strategy(retry_callback=_retry_callback_delegate)
self._post_write(False, batch_item.key.bucket, batch_item.key.org, batch_item.data,
batch_item.key.precision, urlopen_kw={'retries': retry})
logger.debug("Write request finished %s", batch_item)
return _BatchResponse(data=batch_item)
def _post_write(self, _async_req, bucket, org, body, precision, **kwargs):
return self._write_service.post_write(org=org, bucket=bucket, body=body, precision=precision,
async_req=_async_req,
content_type="text/plain; charset=utf-8",
**kwargs)
def _to_response(self, data: _BatchItem, delay: timedelta):
return rx.of(data).pipe(
ops.subscribe_on(self._write_options.write_scheduler),
# use delay if its specified
ops.delay(duetime=delay, scheduler=self._write_options.write_scheduler),
# invoke http call
ops.map(lambda x: self._http(x)),
# catch exception to fail batch response
ops.catch(handler=lambda exception, source: rx.just(_BatchResponse(exception=exception, data=data))),
)
def _jitter_delay(self):
return timedelta(milliseconds=random() * self._write_options.jitter_interval)
def _on_next(self, response: _BatchResponse):
if response.exception:
logger.error("The batch item wasn't processed successfully because: %s", response.exception)
if self._error_callback:
try:
self._error_callback(response.data.to_key_tuple(), response.data.data, response.exception)
except Exception as e:
"""
Unfortunately, because callbacks are user-provided generic code, exceptions can be entirely
arbitrary
We trap it, log that it occurred and then proceed - there's not much more that we can
really do.
"""
logger.error("The configured error callback threw an exception: %s", e)
else:
logger.debug("The batch item: %s was processed successfully.", response)
if self._success_callback:
try:
self._success_callback(response.data.to_key_tuple(), response.data.data)
except Exception as e:
logger.error("The configured success callback threw an exception: %s", e)
@staticmethod
def _on_error(ex):
logger.error("unexpected error during batching: %s", ex)
def _on_complete(self):
self._disposable.dispose()
logger.info("the batching processor was disposed")
def __getstate__(self):
"""Return a dict of attributes that you want to pickle."""
state = self.__dict__.copy()
# Remove rx
del state['_subject']
del state['_disposable']
del state['_window_scheduler']
del state['_write_service']
return state
def __setstate__(self, state):
"""Set your object with the provided dict."""
self.__dict__.update(state)
# Init Rx
self.__init__(self._influxdb_client,
self._write_options,
self._point_settings,
success_callback=self._success_callback,
error_callback=self._error_callback,
retry_callback=self._retry_callback)

View File

@@ -0,0 +1,125 @@
"""Collect and async write time series data to InfluxDB Cloud or InfluxDB OSS."""
import logging
from collections import defaultdict
from typing import Union, Iterable, NamedTuple
from influxdb_client import Point, WritePrecision
from influxdb_client.client._base import _BaseWriteApi, _HAS_DATACLASS
from influxdb_client.client.util.helpers import get_org_query_param
from influxdb_client.client.write.point import DEFAULT_WRITE_PRECISION
from influxdb_client.client.write_api import PointSettings
logger = logging.getLogger('influxdb_client.client.write_api_async')
if _HAS_DATACLASS:
from dataclasses import dataclass
class WriteApiAsync(_BaseWriteApi):
"""
Implementation for '/api/v2/write' endpoint.
Example:
.. code-block:: python
from influxdb_client_async import InfluxDBClientAsync
# Initialize async/await instance of Write API
async with InfluxDBClientAsync(url="http://localhost:8086", token="my-token", org="my-org") as client:
write_api = client.write_api()
"""
def __init__(self, influxdb_client, point_settings: PointSettings = PointSettings()) -> None:
"""
Initialize defaults.
:param influxdb_client: with default settings (organization)
:param point_settings: settings to store default tags.
"""
super().__init__(influxdb_client=influxdb_client, point_settings=point_settings)
async def write(self, bucket: str, org: str = None,
record: Union[str, Iterable['str'], Point, Iterable['Point'], dict, Iterable['dict'], bytes,
Iterable['bytes'], NamedTuple, Iterable['NamedTuple'], 'dataclass',
Iterable['dataclass']] = None,
write_precision: WritePrecision = DEFAULT_WRITE_PRECISION, **kwargs) -> bool:
"""
Write time-series data into InfluxDB.
:param str bucket: specifies the destination bucket for writes (required)
:param str, Organization org: specifies the destination organization for writes;
take the ID, Name or Organization.
If not specified the default value from ``InfluxDBClientAsync.org`` is used.
:param WritePrecision write_precision: specifies the precision for the unix timestamps within
the body line-protocol. The precision specified on a Point has precedes
and is use for write.
:param record: Point, Line Protocol, Dictionary, NamedTuple, Data Classes, Pandas DataFrame
:key data_frame_measurement_name: name of measurement for writing Pandas DataFrame - ``DataFrame``
:key data_frame_tag_columns: list of DataFrame columns which are tags,
rest columns will be fields - ``DataFrame``
:key data_frame_timestamp_column: name of DataFrame column which contains a timestamp. The column can be defined as a :class:`~str` value
formatted as `2018-10-26`, `2018-10-26 12:00`, `2018-10-26 12:00:00-05:00`
or other formats and types supported by `pandas.to_datetime <https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.to_datetime.html#pandas.to_datetime>`_ - ``DataFrame``
:key data_frame_timestamp_timezone: name of the timezone which is used for timestamp column - ``DataFrame``
:key record_measurement_key: key of record with specified measurement -
``dictionary``, ``NamedTuple``, ``dataclass``
:key record_measurement_name: static measurement name - ``dictionary``, ``NamedTuple``, ``dataclass``
:key record_time_key: key of record with specified timestamp - ``dictionary``, ``NamedTuple``, ``dataclass``
:key record_tag_keys: list of record keys to use as a tag - ``dictionary``, ``NamedTuple``, ``dataclass``
:key record_field_keys: list of record keys to use as a field - ``dictionary``, ``NamedTuple``, ``dataclass``
:return: ``True`` for successfully accepted data, otherwise raise an exception
Example:
.. code-block:: python
# Record as Line Protocol
await write_api.write("my-bucket", "my-org", "h2o_feet,location=us-west level=125i 1")
# Record as Dictionary
dictionary = {
"measurement": "h2o_feet",
"tags": {"location": "us-west"},
"fields": {"level": 125},
"time": 1
}
await write_api.write("my-bucket", "my-org", dictionary)
# Record as Point
from influxdb_client import Point
point = Point("h2o_feet").tag("location", "us-west").field("level", 125).time(1)
await write_api.write("my-bucket", "my-org", point)
DataFrame:
If the ``data_frame_timestamp_column`` is not specified the index of `Pandas DataFrame <https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.html>`_
is used as a ``timestamp`` for written data. The index can be `PeriodIndex <https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.PeriodIndex.html#pandas.PeriodIndex>`_
or its must be transformable to ``datetime`` by
`pandas.to_datetime <https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.to_datetime.html#pandas.to_datetime>`_.
If you would like to transform a column to ``PeriodIndex``, you can use something like:
.. code-block:: python
import pandas as pd
# DataFrame
data_frame = ...
# Set column as Index
data_frame.set_index('column_name', inplace=True)
# Transform index to PeriodIndex
data_frame.index = pd.to_datetime(data_frame.index, unit='s')
""" # noqa: E501
org = get_org_query_param(org=org, client=self._influxdb_client)
self._append_default_tags(record)
payloads = defaultdict(list)
self._serialize(record, write_precision, payloads, precision_from_point=False, **kwargs)
# joint list by \n
body = b'\n'.join(payloads[write_precision])
response = await self._write_service.post_write_async(org=org, bucket=bucket, body=body,
precision=write_precision, async_req=False,
_return_http_data_only=False,
content_type="text/plain; charset=utf-8")
return response[1] in (201, 204)

View File

@@ -0,0 +1,283 @@
# coding: utf-8
"""
InfluxDB OSS API Service.
The InfluxDB v2 API provides a programmatic interface for all interactions with InfluxDB. Access the InfluxDB API using the `/api/v2/` endpoint. # noqa: E501
OpenAPI spec version: 2.0.0
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import copy
import logging
import multiprocessing
import sys
import urllib3
class TypeWithDefault(type):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
def __init__(cls, name, bases, dct):
"""Initialize with defaults."""
super(TypeWithDefault, cls).__init__(name, bases, dct)
cls._default = None
def __call__(cls):
"""Call self as a function."""
if cls._default is None:
cls._default = type.__call__(cls)
return copy.copy(cls._default)
def set_default(cls, default):
"""Set dafaults."""
cls._default = copy.copy(default)
class Configuration(object, metaclass=TypeWithDefault):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
def __init__(self):
"""Initialize configuration."""
# Default Base url
self.host = "http://localhost/api/v2"
# Temp file folder for downloading files
self.temp_folder_path = None
# Authentication Settings
# dict to store API key(s)
self.api_key = {}
# dict to store API prefix (e.g. Bearer)
self.api_key_prefix = {}
# Username for HTTP basic authentication
self.username = ""
# Password for HTTP basic authentication
self.password = ""
# Logging Settings
self.loggers = {}
# Log format
self.logger_format = '%(asctime)s %(levelname)s %(message)s'
# Log stream handler
self.logger_stream_handler = None
# Log file handler
self.logger_file_handler = None
# Debug file location
self.logger_file = None
# Debug switch
self.debug = False
# SSL/TLS verification
# Set this to false to skip verifying SSL certificate when calling API
# from https server.
self.verify_ssl = True
# Set this to customize the certificate file to verify the peer.
self.ssl_ca_cert = None
# client certificate file
self.cert_file = None
# client key file
self.cert_key_file = None
# client key file password
self.cert_key_password = None
# Set this to True/False to enable/disable SSL hostname verification.
self.assert_hostname = None
# Set this to specify a custom ssl context to inject this context inside the urllib3 connection pool.
self.ssl_context = None
# urllib3 connection pool's maximum number of connections saved
# per pool. urllib3 uses 1 connection as default value, but this is
# not the best value when you are making a lot of possibly parallel
# requests to the same host, which is often the case here.
# cpu_count * 5 is used as default value to increase performance.
self.connection_pool_maxsize = multiprocessing.cpu_count() * 5
# Timeout setting for a request. If one number provided, it will be total request timeout.
# It can also be a pair (tuple) of (connection, read) timeouts.
self.timeout = None
# Set to True/False to enable basic authentication when using proxied InfluxDB 1.8.x with no auth-enabled
self.auth_basic = False
# Proxy URL
self.proxy = None
# A dictionary containing headers that will be sent to the proxy
self.proxy_headers = None
# Safe chars for path_param
self.safe_chars_for_path_param = ''
@property
def logger_file(self):
"""Logger file.
If the logger_file is None, then add stream handler and remove file
handler. Otherwise, add file handler and remove stream handler.
:param value: The logger_file path.
:type: str
"""
return self.__logger_file
@logger_file.setter
def logger_file(self, value):
"""Logger file.
If the logger_file is None, then add stream handler and remove file
handler. Otherwise, add file handler and remove stream handler.
:param value: The logger_file path.
:type: str
"""
self.__logger_file = value
if self.__logger_file:
# If set logging file,
# then add file handler and remove stream handler.
self.logger_file_handler = logging.FileHandler(self.__logger_file)
self.logger_file_handler.setFormatter(self.logger_formatter)
for _, logger in self.loggers.items():
logger.addHandler(self.logger_file_handler)
@property
def debug(self):
"""Debug status.
:param value: The debug status, True or False.
:type: bool
"""
return self.__debug
@debug.setter
def debug(self, value):
"""Debug status.
:param value: The debug status, True or False.
:type: bool
"""
self.__debug = value
if self.__debug:
# if debug status is True, turn on debug logging
for name, logger in self.loggers.items():
logger.setLevel(logging.DEBUG)
if name == 'influxdb_client.client.http':
# makes sure to do not duplicate stdout handler
if not any(map(lambda h: isinstance(h, logging.StreamHandler) and h.stream == sys.stdout,
logger.handlers)):
logger.addHandler(logging.StreamHandler(sys.stdout))
# we use 'influxdb_client.client.http' logger instead of this
# httplib.HTTPConnection.debuglevel = 1
else:
# if debug status is False, turn off debug logging,
# setting log level to default `logging.WARNING`
for _, logger in self.loggers.items():
logger.setLevel(logging.WARNING)
# we use 'influxdb_client.client.http' logger instead of this
# httplib.HTTPConnection.debuglevel = 0
@property
def logger_format(self):
"""Logger format.
The logger_formatter will be updated when sets logger_format.
:param value: The format string.
:type: str
"""
return self.__logger_format
@logger_format.setter
def logger_format(self, value):
"""Logger format.
The logger_formatter will be updated when sets logger_format.
:param value: The format string.
:type: str
"""
self.__logger_format = value
self.logger_formatter = logging.Formatter(self.__logger_format)
def get_api_key_with_prefix(self, identifier):
"""Get API key (with prefix if set).
:param identifier: The identifier of apiKey.
:return: The token for api key authentication.
"""
if (self.api_key.get(identifier) and
self.api_key_prefix.get(identifier)):
return self.api_key_prefix[identifier] + ' ' + self.api_key[identifier] # noqa: E501
elif self.api_key.get(identifier):
return self.api_key[identifier]
def get_basic_auth_token(self):
"""Get HTTP basic authentication header (string).
:return: The token for basic HTTP authentication.
"""
return urllib3.util.make_headers(
basic_auth=self.username + ':' + self.password
).get('authorization')
def auth_settings(self):
"""Get Auth Settings dict for api client.
:return: The Auth Settings information dict.
"""
return {
'BasicAuthentication':
{
'type': 'basic',
'in': 'header',
'key': 'Authorization',
'value': self.get_basic_auth_token()
},
'TokenAuthentication':
{
'type': 'api_key',
'in': 'header',
'key': 'Authorization',
'value': self.get_api_key_with_prefix('Authorization')
},
}
def to_debug_report(self):
"""Get the essential information for debugging.
:return: The report for debugging.
"""
from influxdb_client import VERSION
return "Python SDK Debug Report:\n"\
"OS: {env}\n"\
"Python Version: {pyversion}\n"\
"Version of the API: 2.0.0\n"\
"SDK Package Version: {client_version}".\
format(env=sys.platform, pyversion=sys.version, client_version=VERSION)
def update_request_header_params(self, path: str, params: dict):
"""Update header params based on custom settings.
:param path: Resource path
:param params: Header parameters dict to be updated.
"""
pass
def update_request_body(self, path: str, body):
"""Update http body based on custom settings.
:param path: Resource path
:param body: Request body to be updated.
:return: Updated body
"""
return body

View File

@@ -0,0 +1,335 @@
# coding: utf-8
# flake8: noqa
"""
InfluxDB OSS API Service.
The InfluxDB v2 API provides a programmatic interface for all interactions with InfluxDB. Access the InfluxDB API using the `/api/v2/` endpoint. # noqa: E501
OpenAPI spec version: 2.0.0
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
# import models into model package
from influxdb_client.domain.ast_response import ASTResponse
from influxdb_client.domain.add_resource_member_request_body import AddResourceMemberRequestBody
from influxdb_client.domain.analyze_query_response import AnalyzeQueryResponse
from influxdb_client.domain.analyze_query_response_errors import AnalyzeQueryResponseErrors
from influxdb_client.domain.array_expression import ArrayExpression
from influxdb_client.domain.authorization import Authorization
from influxdb_client.domain.authorization_post_request import AuthorizationPostRequest
from influxdb_client.domain.authorization_update_request import AuthorizationUpdateRequest
from influxdb_client.domain.authorizations import Authorizations
from influxdb_client.domain.axes import Axes
from influxdb_client.domain.axis import Axis
from influxdb_client.domain.axis_scale import AxisScale
from influxdb_client.domain.bad_statement import BadStatement
from influxdb_client.domain.band_view_properties import BandViewProperties
from influxdb_client.domain.binary_expression import BinaryExpression
from influxdb_client.domain.block import Block
from influxdb_client.domain.boolean_literal import BooleanLiteral
from influxdb_client.domain.bucket import Bucket
from influxdb_client.domain.bucket_links import BucketLinks
from influxdb_client.domain.bucket_metadata_manifest import BucketMetadataManifest
from influxdb_client.domain.bucket_retention_rules import BucketRetentionRules
from influxdb_client.domain.bucket_shard_mapping import BucketShardMapping
from influxdb_client.domain.buckets import Buckets
from influxdb_client.domain.builder_aggregate_function_type import BuilderAggregateFunctionType
from influxdb_client.domain.builder_config import BuilderConfig
from influxdb_client.domain.builder_config_aggregate_window import BuilderConfigAggregateWindow
from influxdb_client.domain.builder_functions_type import BuilderFunctionsType
from influxdb_client.domain.builder_tags_type import BuilderTagsType
from influxdb_client.domain.builtin_statement import BuiltinStatement
from influxdb_client.domain.call_expression import CallExpression
from influxdb_client.domain.cell import Cell
from influxdb_client.domain.cell_links import CellLinks
from influxdb_client.domain.cell_update import CellUpdate
from influxdb_client.domain.cell_with_view_properties import CellWithViewProperties
from influxdb_client.domain.check import Check
from influxdb_client.domain.check_base import CheckBase
from influxdb_client.domain.check_base_links import CheckBaseLinks
from influxdb_client.domain.check_discriminator import CheckDiscriminator
from influxdb_client.domain.check_patch import CheckPatch
from influxdb_client.domain.check_status_level import CheckStatusLevel
from influxdb_client.domain.check_view_properties import CheckViewProperties
from influxdb_client.domain.checks import Checks
from influxdb_client.domain.column_data_type import ColumnDataType
from influxdb_client.domain.column_semantic_type import ColumnSemanticType
from influxdb_client.domain.conditional_expression import ConditionalExpression
from influxdb_client.domain.config import Config
from influxdb_client.domain.constant_variable_properties import ConstantVariableProperties
from influxdb_client.domain.create_cell import CreateCell
from influxdb_client.domain.create_dashboard_request import CreateDashboardRequest
from influxdb_client.domain.custom_check import CustomCheck
from influxdb_client.domain.dbrp import DBRP
from influxdb_client.domain.dbrp_create import DBRPCreate
from influxdb_client.domain.dbrp_get import DBRPGet
from influxdb_client.domain.dbrp_update import DBRPUpdate
from influxdb_client.domain.dbr_ps import DBRPs
from influxdb_client.domain.dashboard import Dashboard
from influxdb_client.domain.dashboard_color import DashboardColor
from influxdb_client.domain.dashboard_query import DashboardQuery
from influxdb_client.domain.dashboard_with_view_properties import DashboardWithViewProperties
from influxdb_client.domain.dashboards import Dashboards
from influxdb_client.domain.date_time_literal import DateTimeLiteral
from influxdb_client.domain.deadman_check import DeadmanCheck
from influxdb_client.domain.decimal_places import DecimalPlaces
from influxdb_client.domain.delete_predicate_request import DeletePredicateRequest
from influxdb_client.domain.dialect import Dialect
from influxdb_client.domain.dict_expression import DictExpression
from influxdb_client.domain.dict_item import DictItem
from influxdb_client.domain.duration import Duration
from influxdb_client.domain.duration_literal import DurationLiteral
from influxdb_client.domain.error import Error
from influxdb_client.domain.expression import Expression
from influxdb_client.domain.expression_statement import ExpressionStatement
from influxdb_client.domain.field import Field
from influxdb_client.domain.file import File
from influxdb_client.domain.float_literal import FloatLiteral
from influxdb_client.domain.flux_response import FluxResponse
from influxdb_client.domain.flux_suggestion import FluxSuggestion
from influxdb_client.domain.flux_suggestions import FluxSuggestions
from influxdb_client.domain.function_expression import FunctionExpression
from influxdb_client.domain.gauge_view_properties import GaugeViewProperties
from influxdb_client.domain.greater_threshold import GreaterThreshold
from influxdb_client.domain.http_notification_endpoint import HTTPNotificationEndpoint
from influxdb_client.domain.http_notification_rule import HTTPNotificationRule
from influxdb_client.domain.http_notification_rule_base import HTTPNotificationRuleBase
from influxdb_client.domain.health_check import HealthCheck
from influxdb_client.domain.heatmap_view_properties import HeatmapViewProperties
from influxdb_client.domain.histogram_view_properties import HistogramViewProperties
from influxdb_client.domain.identifier import Identifier
from influxdb_client.domain.import_declaration import ImportDeclaration
from influxdb_client.domain.index_expression import IndexExpression
from influxdb_client.domain.integer_literal import IntegerLiteral
from influxdb_client.domain.is_onboarding import IsOnboarding
from influxdb_client.domain.label import Label
from influxdb_client.domain.label_create_request import LabelCreateRequest
from influxdb_client.domain.label_mapping import LabelMapping
from influxdb_client.domain.label_response import LabelResponse
from influxdb_client.domain.label_update import LabelUpdate
from influxdb_client.domain.labels_response import LabelsResponse
from influxdb_client.domain.language_request import LanguageRequest
from influxdb_client.domain.legacy_authorization_post_request import LegacyAuthorizationPostRequest
from influxdb_client.domain.lesser_threshold import LesserThreshold
from influxdb_client.domain.line_plus_single_stat_properties import LinePlusSingleStatProperties
from influxdb_client.domain.line_protocol_error import LineProtocolError
from influxdb_client.domain.line_protocol_length_error import LineProtocolLengthError
from influxdb_client.domain.links import Links
from influxdb_client.domain.list_stacks_response import ListStacksResponse
from influxdb_client.domain.log_event import LogEvent
from influxdb_client.domain.logical_expression import LogicalExpression
from influxdb_client.domain.logs import Logs
from influxdb_client.domain.map_variable_properties import MapVariableProperties
from influxdb_client.domain.markdown_view_properties import MarkdownViewProperties
from influxdb_client.domain.measurement_schema import MeasurementSchema
from influxdb_client.domain.measurement_schema_column import MeasurementSchemaColumn
from influxdb_client.domain.measurement_schema_create_request import MeasurementSchemaCreateRequest
from influxdb_client.domain.measurement_schema_list import MeasurementSchemaList
from influxdb_client.domain.measurement_schema_update_request import MeasurementSchemaUpdateRequest
from influxdb_client.domain.member_assignment import MemberAssignment
from influxdb_client.domain.member_expression import MemberExpression
from influxdb_client.domain.metadata_backup import MetadataBackup
from influxdb_client.domain.model_property import ModelProperty
from influxdb_client.domain.mosaic_view_properties import MosaicViewProperties
from influxdb_client.domain.node import Node
from influxdb_client.domain.notification_endpoint import NotificationEndpoint
from influxdb_client.domain.notification_endpoint_base import NotificationEndpointBase
from influxdb_client.domain.notification_endpoint_base_links import NotificationEndpointBaseLinks
from influxdb_client.domain.notification_endpoint_discriminator import NotificationEndpointDiscriminator
from influxdb_client.domain.notification_endpoint_type import NotificationEndpointType
from influxdb_client.domain.notification_endpoint_update import NotificationEndpointUpdate
from influxdb_client.domain.notification_endpoints import NotificationEndpoints
from influxdb_client.domain.notification_rule import NotificationRule
from influxdb_client.domain.notification_rule_base import NotificationRuleBase
from influxdb_client.domain.notification_rule_base_links import NotificationRuleBaseLinks
from influxdb_client.domain.notification_rule_discriminator import NotificationRuleDiscriminator
from influxdb_client.domain.notification_rule_update import NotificationRuleUpdate
from influxdb_client.domain.notification_rules import NotificationRules
from influxdb_client.domain.object_expression import ObjectExpression
from influxdb_client.domain.onboarding_request import OnboardingRequest
from influxdb_client.domain.onboarding_response import OnboardingResponse
from influxdb_client.domain.option_statement import OptionStatement
from influxdb_client.domain.organization import Organization
from influxdb_client.domain.organization_links import OrganizationLinks
from influxdb_client.domain.organizations import Organizations
from influxdb_client.domain.package import Package
from influxdb_client.domain.package_clause import PackageClause
from influxdb_client.domain.pager_duty_notification_endpoint import PagerDutyNotificationEndpoint
from influxdb_client.domain.pager_duty_notification_rule import PagerDutyNotificationRule
from influxdb_client.domain.pager_duty_notification_rule_base import PagerDutyNotificationRuleBase
from influxdb_client.domain.paren_expression import ParenExpression
from influxdb_client.domain.password_reset_body import PasswordResetBody
from influxdb_client.domain.patch_bucket_request import PatchBucketRequest
from influxdb_client.domain.patch_dashboard_request import PatchDashboardRequest
from influxdb_client.domain.patch_organization_request import PatchOrganizationRequest
from influxdb_client.domain.patch_retention_rule import PatchRetentionRule
from influxdb_client.domain.patch_stack_request import PatchStackRequest
from influxdb_client.domain.patch_stack_request_additional_resources import PatchStackRequestAdditionalResources
from influxdb_client.domain.permission import Permission
from influxdb_client.domain.permission_resource import PermissionResource
from influxdb_client.domain.pipe_expression import PipeExpression
from influxdb_client.domain.pipe_literal import PipeLiteral
from influxdb_client.domain.post_bucket_request import PostBucketRequest
from influxdb_client.domain.post_check import PostCheck
from influxdb_client.domain.post_notification_endpoint import PostNotificationEndpoint
from influxdb_client.domain.post_notification_rule import PostNotificationRule
from influxdb_client.domain.post_organization_request import PostOrganizationRequest
from influxdb_client.domain.post_restore_kv_response import PostRestoreKVResponse
from influxdb_client.domain.post_stack_request import PostStackRequest
from influxdb_client.domain.property_key import PropertyKey
from influxdb_client.domain.query import Query
from influxdb_client.domain.query_edit_mode import QueryEditMode
from influxdb_client.domain.query_variable_properties import QueryVariableProperties
from influxdb_client.domain.query_variable_properties_values import QueryVariablePropertiesValues
from influxdb_client.domain.range_threshold import RangeThreshold
from influxdb_client.domain.ready import Ready
from influxdb_client.domain.regexp_literal import RegexpLiteral
from influxdb_client.domain.remote_connection import RemoteConnection
from influxdb_client.domain.remote_connection_creation_request import RemoteConnectionCreationRequest
from influxdb_client.domain.remote_connection_update_request import RemoteConnectionUpdateRequest
from influxdb_client.domain.remote_connections import RemoteConnections
from influxdb_client.domain.renamable_field import RenamableField
from influxdb_client.domain.replication import Replication
from influxdb_client.domain.replication_creation_request import ReplicationCreationRequest
from influxdb_client.domain.replication_update_request import ReplicationUpdateRequest
from influxdb_client.domain.replications import Replications
from influxdb_client.domain.resource_member import ResourceMember
from influxdb_client.domain.resource_members import ResourceMembers
from influxdb_client.domain.resource_members_links import ResourceMembersLinks
from influxdb_client.domain.resource_owner import ResourceOwner
from influxdb_client.domain.resource_owners import ResourceOwners
from influxdb_client.domain.restored_bucket_mappings import RestoredBucketMappings
from influxdb_client.domain.retention_policy_manifest import RetentionPolicyManifest
from influxdb_client.domain.return_statement import ReturnStatement
from influxdb_client.domain.routes import Routes
from influxdb_client.domain.routes_external import RoutesExternal
from influxdb_client.domain.routes_query import RoutesQuery
from influxdb_client.domain.routes_system import RoutesSystem
from influxdb_client.domain.rule_status_level import RuleStatusLevel
from influxdb_client.domain.run import Run
from influxdb_client.domain.run_links import RunLinks
from influxdb_client.domain.run_manually import RunManually
from influxdb_client.domain.runs import Runs
from influxdb_client.domain.smtp_notification_rule import SMTPNotificationRule
from influxdb_client.domain.smtp_notification_rule_base import SMTPNotificationRuleBase
from influxdb_client.domain.scatter_view_properties import ScatterViewProperties
from influxdb_client.domain.schema_type import SchemaType
from influxdb_client.domain.scraper_target_request import ScraperTargetRequest
from influxdb_client.domain.scraper_target_response import ScraperTargetResponse
from influxdb_client.domain.scraper_target_responses import ScraperTargetResponses
from influxdb_client.domain.script import Script
from influxdb_client.domain.script_create_request import ScriptCreateRequest
from influxdb_client.domain.script_invocation_params import ScriptInvocationParams
from influxdb_client.domain.script_language import ScriptLanguage
from influxdb_client.domain.script_update_request import ScriptUpdateRequest
from influxdb_client.domain.scripts import Scripts
from influxdb_client.domain.secret_keys import SecretKeys
from influxdb_client.domain.secret_keys_response import SecretKeysResponse
from influxdb_client.domain.shard_group_manifest import ShardGroupManifest
from influxdb_client.domain.shard_manifest import ShardManifest
from influxdb_client.domain.shard_owner import ShardOwner
from influxdb_client.domain.simple_table_view_properties import SimpleTableViewProperties
from influxdb_client.domain.single_stat_view_properties import SingleStatViewProperties
from influxdb_client.domain.slack_notification_endpoint import SlackNotificationEndpoint
from influxdb_client.domain.slack_notification_rule import SlackNotificationRule
from influxdb_client.domain.slack_notification_rule_base import SlackNotificationRuleBase
from influxdb_client.domain.source import Source
from influxdb_client.domain.source_links import SourceLinks
from influxdb_client.domain.sources import Sources
from influxdb_client.domain.stack import Stack
from influxdb_client.domain.stack_associations import StackAssociations
from influxdb_client.domain.stack_events import StackEvents
from influxdb_client.domain.stack_links import StackLinks
from influxdb_client.domain.stack_resources import StackResources
from influxdb_client.domain.statement import Statement
from influxdb_client.domain.static_legend import StaticLegend
from influxdb_client.domain.status_rule import StatusRule
from influxdb_client.domain.string_literal import StringLiteral
from influxdb_client.domain.subscription_manifest import SubscriptionManifest
from influxdb_client.domain.table_view_properties import TableViewProperties
from influxdb_client.domain.table_view_properties_table_options import TableViewPropertiesTableOptions
from influxdb_client.domain.tag_rule import TagRule
from influxdb_client.domain.task import Task
from influxdb_client.domain.task_create_request import TaskCreateRequest
from influxdb_client.domain.task_links import TaskLinks
from influxdb_client.domain.task_status_type import TaskStatusType
from influxdb_client.domain.task_update_request import TaskUpdateRequest
from influxdb_client.domain.tasks import Tasks
from influxdb_client.domain.telegraf import Telegraf
from influxdb_client.domain.telegraf_plugin import TelegrafPlugin
from influxdb_client.domain.telegraf_plugin_request import TelegrafPluginRequest
from influxdb_client.domain.telegraf_plugin_request_plugins import TelegrafPluginRequestPlugins
from influxdb_client.domain.telegraf_plugins import TelegrafPlugins
from influxdb_client.domain.telegraf_request import TelegrafRequest
from influxdb_client.domain.telegraf_request_metadata import TelegrafRequestMetadata
from influxdb_client.domain.telegrafs import Telegrafs
from influxdb_client.domain.telegram_notification_endpoint import TelegramNotificationEndpoint
from influxdb_client.domain.telegram_notification_rule import TelegramNotificationRule
from influxdb_client.domain.telegram_notification_rule_base import TelegramNotificationRuleBase
from influxdb_client.domain.template_apply import TemplateApply
from influxdb_client.domain.template_apply_remotes import TemplateApplyRemotes
from influxdb_client.domain.template_apply_template import TemplateApplyTemplate
from influxdb_client.domain.template_chart import TemplateChart
from influxdb_client.domain.template_export_by_id import TemplateExportByID
from influxdb_client.domain.template_export_by_id_org_ids import TemplateExportByIDOrgIDs
from influxdb_client.domain.template_export_by_id_resource_filters import TemplateExportByIDResourceFilters
from influxdb_client.domain.template_export_by_id_resources import TemplateExportByIDResources
from influxdb_client.domain.template_export_by_name import TemplateExportByName
from influxdb_client.domain.template_export_by_name_resources import TemplateExportByNameResources
from influxdb_client.domain.template_kind import TemplateKind
from influxdb_client.domain.template_summary import TemplateSummary
from influxdb_client.domain.template_summary_diff import TemplateSummaryDiff
from influxdb_client.domain.template_summary_diff_buckets import TemplateSummaryDiffBuckets
from influxdb_client.domain.template_summary_diff_buckets_new_old import TemplateSummaryDiffBucketsNewOld
from influxdb_client.domain.template_summary_diff_checks import TemplateSummaryDiffChecks
from influxdb_client.domain.template_summary_diff_dashboards import TemplateSummaryDiffDashboards
from influxdb_client.domain.template_summary_diff_dashboards_new_old import TemplateSummaryDiffDashboardsNewOld
from influxdb_client.domain.template_summary_diff_label_mappings import TemplateSummaryDiffLabelMappings
from influxdb_client.domain.template_summary_diff_labels import TemplateSummaryDiffLabels
from influxdb_client.domain.template_summary_diff_labels_new_old import TemplateSummaryDiffLabelsNewOld
from influxdb_client.domain.template_summary_diff_notification_endpoints import TemplateSummaryDiffNotificationEndpoints
from influxdb_client.domain.template_summary_diff_notification_rules import TemplateSummaryDiffNotificationRules
from influxdb_client.domain.template_summary_diff_notification_rules_new_old import TemplateSummaryDiffNotificationRulesNewOld
from influxdb_client.domain.template_summary_diff_tasks import TemplateSummaryDiffTasks
from influxdb_client.domain.template_summary_diff_tasks_new_old import TemplateSummaryDiffTasksNewOld
from influxdb_client.domain.template_summary_diff_telegraf_configs import TemplateSummaryDiffTelegrafConfigs
from influxdb_client.domain.template_summary_diff_variables import TemplateSummaryDiffVariables
from influxdb_client.domain.template_summary_diff_variables_new_old import TemplateSummaryDiffVariablesNewOld
from influxdb_client.domain.template_summary_errors import TemplateSummaryErrors
from influxdb_client.domain.template_summary_label import TemplateSummaryLabel
from influxdb_client.domain.template_summary_label_properties import TemplateSummaryLabelProperties
from influxdb_client.domain.template_summary_summary import TemplateSummarySummary
from influxdb_client.domain.template_summary_summary_buckets import TemplateSummarySummaryBuckets
from influxdb_client.domain.template_summary_summary_dashboards import TemplateSummarySummaryDashboards
from influxdb_client.domain.template_summary_summary_label_mappings import TemplateSummarySummaryLabelMappings
from influxdb_client.domain.template_summary_summary_notification_rules import TemplateSummarySummaryNotificationRules
from influxdb_client.domain.template_summary_summary_status_rules import TemplateSummarySummaryStatusRules
from influxdb_client.domain.template_summary_summary_tag_rules import TemplateSummarySummaryTagRules
from influxdb_client.domain.template_summary_summary_tasks import TemplateSummarySummaryTasks
from influxdb_client.domain.template_summary_summary_variables import TemplateSummarySummaryVariables
from influxdb_client.domain.test_statement import TestStatement
from influxdb_client.domain.threshold import Threshold
from influxdb_client.domain.threshold_base import ThresholdBase
from influxdb_client.domain.threshold_check import ThresholdCheck
from influxdb_client.domain.unary_expression import UnaryExpression
from influxdb_client.domain.unsigned_integer_literal import UnsignedIntegerLiteral
from influxdb_client.domain.user import User
from influxdb_client.domain.user_response import UserResponse
from influxdb_client.domain.user_response_links import UserResponseLinks
from influxdb_client.domain.users import Users
from influxdb_client.domain.variable import Variable
from influxdb_client.domain.variable_assignment import VariableAssignment
from influxdb_client.domain.variable_links import VariableLinks
from influxdb_client.domain.variable_properties import VariableProperties
from influxdb_client.domain.variables import Variables
from influxdb_client.domain.view import View
from influxdb_client.domain.view_links import ViewLinks
from influxdb_client.domain.view_properties import ViewProperties
from influxdb_client.domain.views import Views
from influxdb_client.domain.write_precision import WritePrecision
from influxdb_client.domain.xy_geom import XYGeom
from influxdb_client.domain.xy_view_properties import XYViewProperties

Some files were not shown because too many files have changed in this diff Show More