id
int64 0
843k
| repository_name
stringlengths 7
55
| file_path
stringlengths 9
332
| class_name
stringlengths 3
290
| human_written_code
stringlengths 12
4.36M
| class_skeleton
stringlengths 19
2.2M
| total_program_units
int64 1
9.57k
| total_doc_str
int64 0
4.2k
| AvgCountLine
float64 0
7.89k
| AvgCountLineBlank
float64 0
300
| AvgCountLineCode
float64 0
7.89k
| AvgCountLineComment
float64 0
7.89k
| AvgCyclomatic
float64 0
130
| CommentToCodeRatio
float64 0
176
| CountClassBase
float64 0
48
| CountClassCoupled
float64 0
589
| CountClassCoupledModified
float64 0
581
| CountClassDerived
float64 0
5.37k
| CountDeclInstanceMethod
float64 0
4.2k
| CountDeclInstanceVariable
float64 0
299
| CountDeclMethod
float64 0
4.2k
| CountDeclMethodAll
float64 0
4.2k
| CountLine
float64 1
115k
| CountLineBlank
float64 0
9.01k
| CountLineCode
float64 0
94.4k
| CountLineCodeDecl
float64 0
46.1k
| CountLineCodeExe
float64 0
91.3k
| CountLineComment
float64 0
27k
| CountStmt
float64 1
93.2k
| CountStmtDecl
float64 0
46.1k
| CountStmtExe
float64 0
90.2k
| MaxCyclomatic
float64 0
759
| MaxInheritanceTree
float64 0
16
| MaxNesting
float64 0
34
| SumCyclomatic
float64 0
6k
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
10,800 |
Azure/azure-cli-extensions
|
src/cosmosdb-preview/azext_cosmosdb_preview/vendored_sdks/azure_mgmt_mongocluster/models/_models_py3.py
|
azext_cosmosdb_preview.vendored_sdks.azure_mgmt_mongocluster.models._models_py3.PrivateEndpointConnectionResourceListResult
|
class PrivateEndpointConnectionResourceListResult(_serialization.Model): # pylint: disable=name-too-long
"""The response of a PrivateEndpointConnectionResource list operation.
All required parameters must be populated in order to send to server.
:ivar value: The PrivateEndpointConnectionResource items on this page. Required.
:vartype value: list[~azure.mgmt.mongocluster.models.PrivateEndpointConnectionResource]
:ivar next_link: The link to the next page of items.
:vartype next_link: str
"""
_validation = {
"value": {"required": True},
}
_attribute_map = {
"value": {"key": "value", "type": "[PrivateEndpointConnectionResource]"},
"next_link": {"key": "nextLink", "type": "str"},
}
def __init__(
self,
*,
value: List["_models.PrivateEndpointConnectionResource"],
next_link: Optional[str] = None,
**kwargs: Any
) -> None:
"""
:keyword value: The PrivateEndpointConnectionResource items on this page. Required.
:paramtype value: list[~azure.mgmt.mongocluster.models.PrivateEndpointConnectionResource]
:keyword next_link: The link to the next page of items.
:paramtype next_link: str
"""
super().__init__(**kwargs)
self.value = value
self.next_link = next_link
|
class PrivateEndpointConnectionResourceListResult(_serialization.Model):
'''The response of a PrivateEndpointConnectionResource list operation.
All required parameters must be populated in order to send to server.
:ivar value: The PrivateEndpointConnectionResource items on this page. Required.
:vartype value: list[~azure.mgmt.mongocluster.models.PrivateEndpointConnectionResource]
:ivar next_link: The link to the next page of items.
:vartype next_link: str
'''
def __init__(
self,
*,
value: List["_models.PrivateEndpointConnectionResource"],
next_link: Optional[str] = None,
**kwargs: Any
) -> None:
'''
:keyword value: The PrivateEndpointConnectionResource items on this page. Required.
:paramtype value: list[~azure.mgmt.mongocluster.models.PrivateEndpointConnectionResource]
:keyword next_link: The link to the next page of items.
:paramtype next_link: str
'''
pass
| 2 | 2 | 16 | 0 | 10 | 6 | 1 | 0.78 | 1 | 3 | 0 | 0 | 1 | 2 | 1 | 16 | 36 | 5 | 18 | 12 | 10 | 14 | 7 | 6 | 5 | 1 | 2 | 0 | 1 |
10,801 |
Azure/azure-cli-extensions
|
src/cosmosdb-preview/azext_cosmosdb_preview/vendored_sdks/azure_mgmt_mongocluster/models/_models_py3.py
|
azext_cosmosdb_preview.vendored_sdks.azure_mgmt_mongocluster.models._models_py3.PrivateLinkResource
|
class PrivateLinkResource(ProxyResource):
"""Concrete proxy resource types can be created by aliasing this type using a specific property
type.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Fully qualified resource ID for the resource. E.g.
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}". # pylint: disable=line-too-long
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
"Microsoft.Storage/storageAccounts".
:vartype type: str
:ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy
information.
:vartype system_data: ~azure.mgmt.mongocluster.models.SystemData
:ivar properties: The resource-specific properties for this resource.
:vartype properties: ~azure.mgmt.mongocluster.models.PrivateLinkResourceProperties
"""
_validation = {
"id": {"readonly": True},
"name": {"readonly": True},
"type": {"readonly": True},
"system_data": {"readonly": True},
}
_attribute_map = {
"id": {"key": "id", "type": "str"},
"name": {"key": "name", "type": "str"},
"type": {"key": "type", "type": "str"},
"system_data": {"key": "systemData", "type": "SystemData"},
"properties": {"key": "properties", "type": "PrivateLinkResourceProperties"},
}
def __init__(self, *, properties: Optional["_models.PrivateLinkResourceProperties"] = None, **kwargs: Any) -> None:
"""
:keyword properties: The resource-specific properties for this resource.
:paramtype properties: ~azure.mgmt.mongocluster.models.PrivateLinkResourceProperties
"""
super().__init__(**kwargs)
self.properties = properties
|
class PrivateLinkResource(ProxyResource):
'''Concrete proxy resource types can be created by aliasing this type using a specific property
type.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Fully qualified resource ID for the resource. E.g.
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}". # pylint: disable=line-too-long
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
"Microsoft.Storage/storageAccounts".
:vartype type: str
:ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy
information.
:vartype system_data: ~azure.mgmt.mongocluster.models.SystemData
:ivar properties: The resource-specific properties for this resource.
:vartype properties: ~azure.mgmt.mongocluster.models.PrivateLinkResourceProperties
'''
def __init__(self, *, properties: Optional["_models.PrivateLinkResourceProperties"] = None, **kwargs: Any) -> None:
'''
:keyword properties: The resource-specific properties for this resource.
:paramtype properties: ~azure.mgmt.mongocluster.models.PrivateLinkResourceProperties
'''
pass
| 2 | 2 | 7 | 0 | 3 | 4 | 1 | 1.24 | 1 | 2 | 0 | 0 | 1 | 1 | 1 | 17 | 43 | 5 | 17 | 5 | 15 | 21 | 6 | 5 | 4 | 1 | 4 | 0 | 1 |
10,802 |
Azure/azure-cli-extensions
|
src/cosmosdb-preview/azext_cosmosdb_preview/vendored_sdks/azure_mgmt_mongocluster/models/_models_py3.py
|
azext_cosmosdb_preview.vendored_sdks.azure_mgmt_mongocluster.models._models_py3.PrivateLinkResourceListResult
|
class PrivateLinkResourceListResult(_serialization.Model):
"""The response of a PrivateLinkResource list operation.
All required parameters must be populated in order to send to server.
:ivar value: The PrivateLinkResource items on this page. Required.
:vartype value: list[~azure.mgmt.mongocluster.models.PrivateLinkResource]
:ivar next_link: The link to the next page of items.
:vartype next_link: str
"""
_validation = {
"value": {"required": True},
}
_attribute_map = {
"value": {"key": "value", "type": "[PrivateLinkResource]"},
"next_link": {"key": "nextLink", "type": "str"},
}
def __init__(
self, *, value: List["_models.PrivateLinkResource"], next_link: Optional[str] = None, **kwargs: Any
) -> None:
"""
:keyword value: The PrivateLinkResource items on this page. Required.
:paramtype value: list[~azure.mgmt.mongocluster.models.PrivateLinkResource]
:keyword next_link: The link to the next page of items.
:paramtype next_link: str
"""
super().__init__(**kwargs)
self.value = value
self.next_link = next_link
|
class PrivateLinkResourceListResult(_serialization.Model):
'''The response of a PrivateLinkResource list operation.
All required parameters must be populated in order to send to server.
:ivar value: The PrivateLinkResource items on this page. Required.
:vartype value: list[~azure.mgmt.mongocluster.models.PrivateLinkResource]
:ivar next_link: The link to the next page of items.
:vartype next_link: str
'''
def __init__(
self, *, value: List["_models.PrivateLinkResource"], next_link: Optional[str] = None, **kwargs: Any
) -> None:
'''
:keyword value: The PrivateLinkResource items on this page. Required.
:paramtype value: list[~azure.mgmt.mongocluster.models.PrivateLinkResource]
:keyword next_link: The link to the next page of items.
:paramtype next_link: str
'''
pass
| 2 | 2 | 12 | 0 | 6 | 6 | 1 | 0.93 | 1 | 3 | 0 | 0 | 1 | 2 | 1 | 16 | 32 | 5 | 14 | 8 | 10 | 13 | 7 | 6 | 5 | 1 | 2 | 0 | 1 |
10,803 |
Azure/azure-cli-extensions
|
src/cosmosdb-preview/azext_cosmosdb_preview/vendored_sdks/azure_mgmt_mongocluster/models/_models_py3.py
|
azext_cosmosdb_preview.vendored_sdks.azure_mgmt_mongocluster.models._models_py3.PrivateLinkResourceProperties
|
class PrivateLinkResourceProperties(_serialization.Model):
"""Properties of a private link resource.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar group_id: The private link resource group id.
:vartype group_id: str
:ivar required_members: The private link resource required member names.
:vartype required_members: list[str]
:ivar required_zone_names: The private link resource private link DNS zone name.
:vartype required_zone_names: list[str]
"""
_validation = {
"group_id": {"readonly": True},
"required_members": {"readonly": True},
}
_attribute_map = {
"group_id": {"key": "groupId", "type": "str"},
"required_members": {"key": "requiredMembers", "type": "[str]"},
"required_zone_names": {"key": "requiredZoneNames", "type": "[str]"},
}
def __init__(self, *, required_zone_names: Optional[List[str]] = None, **kwargs: Any) -> None:
"""
:keyword required_zone_names: The private link resource private link DNS zone name.
:paramtype required_zone_names: list[str]
"""
super().__init__(**kwargs)
self.group_id = None
self.required_members = None
self.required_zone_names = required_zone_names
|
class PrivateLinkResourceProperties(_serialization.Model):
'''Properties of a private link resource.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar group_id: The private link resource group id.
:vartype group_id: str
:ivar required_members: The private link resource required member names.
:vartype required_members: list[str]
:ivar required_zone_names: The private link resource private link DNS zone name.
:vartype required_zone_names: list[str]
'''
def __init__(self, *, required_zone_names: Optional[List[str]] = None, **kwargs: Any) -> None:
'''
:keyword required_zone_names: The private link resource private link DNS zone name.
:paramtype required_zone_names: list[str]
'''
pass
| 2 | 2 | 9 | 0 | 5 | 4 | 1 | 0.87 | 1 | 3 | 0 | 0 | 1 | 3 | 1 | 16 | 33 | 5 | 15 | 7 | 13 | 13 | 8 | 7 | 6 | 1 | 2 | 0 | 1 |
10,804 |
Azure/azure-cli-extensions
|
src/cosmosdb-preview/azext_cosmosdb_preview/vendored_sdks/azure_mgmt_mongocluster/models/_models_py3.py
|
azext_cosmosdb_preview.vendored_sdks.azure_mgmt_mongocluster.models._models_py3.PrivateLinkServiceConnectionState
|
class PrivateLinkServiceConnectionState(_serialization.Model):
"""A collection of information about the state of the connection between service consumer and
provider.
:ivar status: Indicates whether the connection has been Approved/Rejected/Removed by the owner
of the service. Known values are: "Pending", "Approved", and "Rejected".
:vartype status: str or ~azure.mgmt.mongocluster.models.PrivateEndpointServiceConnectionStatus
:ivar description: The reason for approval/rejection of the connection.
:vartype description: str
:ivar actions_required: A message indicating if changes on the service provider require any
updates on the consumer.
:vartype actions_required: str
"""
_attribute_map = {
"status": {"key": "status", "type": "str"},
"description": {"key": "description", "type": "str"},
"actions_required": {"key": "actionsRequired", "type": "str"},
}
def __init__(
self,
*,
status: Optional[Union[str, "_models.PrivateEndpointServiceConnectionStatus"]] = None,
description: Optional[str] = None,
actions_required: Optional[str] = None,
**kwargs: Any
) -> None:
"""
:keyword status: Indicates whether the connection has been Approved/Rejected/Removed by the
owner of the service. Known values are: "Pending", "Approved", and "Rejected".
:paramtype status: str or
~azure.mgmt.mongocluster.models.PrivateEndpointServiceConnectionStatus
:keyword description: The reason for approval/rejection of the connection.
:paramtype description: str
:keyword actions_required: A message indicating if changes on the service provider require any
updates on the consumer.
:paramtype actions_required: str
"""
super().__init__(**kwargs)
self.status = status
self.description = description
self.actions_required = actions_required
|
class PrivateLinkServiceConnectionState(_serialization.Model):
'''A collection of information about the state of the connection between service consumer and
provider.
:ivar status: Indicates whether the connection has been Approved/Rejected/Removed by the owner
of the service. Known values are: "Pending", "Approved", and "Rejected".
:vartype status: str or ~azure.mgmt.mongocluster.models.PrivateEndpointServiceConnectionStatus
:ivar description: The reason for approval/rejection of the connection.
:vartype description: str
:ivar actions_required: A message indicating if changes on the service provider require any
updates on the consumer.
:vartype actions_required: str
'''
def __init__(
self,
*,
status: Optional[Union[str, "_models.PrivateEndpointServiceConnectionStatus"]] = None,
description: Optional[str] = None,
actions_required: Optional[str] = None,
**kwargs: Any
) -> None:
'''
:keyword status: Indicates whether the connection has been Approved/Rejected/Removed by the
owner of the service. Known values are: "Pending", "Approved", and "Rejected".
:paramtype status: str or
~azure.mgmt.mongocluster.models.PrivateEndpointServiceConnectionStatus
:keyword description: The reason for approval/rejection of the connection.
:paramtype description: str
:keyword actions_required: A message indicating if changes on the service provider require any
updates on the consumer.
:paramtype actions_required: str
'''
pass
| 2 | 2 | 23 | 0 | 12 | 11 | 1 | 1.22 | 1 | 3 | 0 | 0 | 1 | 3 | 1 | 16 | 43 | 3 | 18 | 13 | 9 | 22 | 7 | 6 | 5 | 1 | 2 | 0 | 1 |
10,805 |
Azure/azure-cli-extensions
|
src/cosmosdb-preview/azext_cosmosdb_preview/vendored_sdks/azure_mgmt_mongocluster/models/_models_py3.py
|
azext_cosmosdb_preview.vendored_sdks.azure_mgmt_mongocluster.models._models_py3.ProxyResource
|
class ProxyResource(Resource):
"""The resource model definition for a Azure Resource Manager proxy resource. It will not have
tags and a location.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Fully qualified resource ID for the resource. E.g.
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}". # pylint: disable=line-too-long
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
"Microsoft.Storage/storageAccounts".
:vartype type: str
:ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy
information.
:vartype system_data: ~azure.mgmt.mongocluster.models.SystemData
"""
|
class ProxyResource(Resource):
'''The resource model definition for a Azure Resource Manager proxy resource. It will not have
tags and a location.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Fully qualified resource ID for the resource. E.g.
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}". # pylint: disable=line-too-long
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
"Microsoft.Storage/storageAccounts".
:vartype type: str
:ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy
information.
:vartype system_data: ~azure.mgmt.mongocluster.models.SystemData
'''
| 1 | 1 | 0 | 0 | 0 | 0 | 0 | 15 | 1 | 0 | 0 | 4 | 0 | 0 | 0 | 16 | 18 | 2 | 1 | 1 | 0 | 15 | 1 | 1 | 0 | 0 | 3 | 0 | 0 |
10,806 |
Azure/azure-cli-extensions
|
src/cosmosdb-preview/azext_cosmosdb_preview/vendored_sdks/azure_mgmt_mongocluster/models/_models_py3.py
|
azext_cosmosdb_preview.vendored_sdks.azure_mgmt_mongocluster.models._models_py3.Resource
|
class Resource(_serialization.Model):
"""Common fields that are returned in the response for all Azure Resource Manager resources.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Fully qualified resource ID for the resource. E.g.
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}". # pylint: disable=line-too-long
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
"Microsoft.Storage/storageAccounts".
:vartype type: str
:ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy
information.
:vartype system_data: ~azure.mgmt.mongocluster.models.SystemData
"""
_validation = {
"id": {"readonly": True},
"name": {"readonly": True},
"type": {"readonly": True},
"system_data": {"readonly": True},
}
_attribute_map = {
"id": {"key": "id", "type": "str"},
"name": {"key": "name", "type": "str"},
"type": {"key": "type", "type": "str"},
"system_data": {"key": "systemData", "type": "SystemData"},
}
def __init__(self, **kwargs: Any) -> None:
""" """
super().__init__(**kwargs)
self.id = None
self.name = None
self.type = None
self.system_data = None
|
class Resource(_serialization.Model):
'''Common fields that are returned in the response for all Azure Resource Manager resources.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Fully qualified resource ID for the resource. E.g.
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}". # pylint: disable=line-too-long
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
"Microsoft.Storage/storageAccounts".
:vartype type: str
:ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy
information.
:vartype system_data: ~azure.mgmt.mongocluster.models.SystemData
'''
def __init__(self, **kwargs: Any) -> None:
''' '''
pass
| 2 | 2 | 7 | 0 | 6 | 1 | 1 | 0.79 | 1 | 2 | 0 | 2 | 1 | 4 | 1 | 16 | 39 | 5 | 19 | 8 | 17 | 15 | 9 | 8 | 7 | 1 | 2 | 0 | 1 |
10,807 |
Azure/azure-cli-extensions
|
src/cosmosdb-preview/azext_cosmosdb_preview/vendored_sdks/azure_mgmt_mongocluster/models/_models_py3.py
|
azext_cosmosdb_preview.vendored_sdks.azure_mgmt_mongocluster.models._models_py3.SystemData
|
class SystemData(_serialization.Model):
"""Metadata pertaining to creation and last modification of the resource.
:ivar created_by: The identity that created the resource.
:vartype created_by: str
:ivar created_by_type: The type of identity that created the resource. Known values are:
"User", "Application", "ManagedIdentity", and "Key".
:vartype created_by_type: str or ~azure.mgmt.mongocluster.models.CreatedByType
:ivar created_at: The timestamp of resource creation (UTC).
:vartype created_at: ~datetime.datetime
:ivar last_modified_by: The identity that last modified the resource.
:vartype last_modified_by: str
:ivar last_modified_by_type: The type of identity that last modified the resource. Known values
are: "User", "Application", "ManagedIdentity", and "Key".
:vartype last_modified_by_type: str or ~azure.mgmt.mongocluster.models.CreatedByType
:ivar last_modified_at: The timestamp of resource last modification (UTC).
:vartype last_modified_at: ~datetime.datetime
"""
_attribute_map = {
"created_by": {"key": "createdBy", "type": "str"},
"created_by_type": {"key": "createdByType", "type": "str"},
"created_at": {"key": "createdAt", "type": "iso-8601"},
"last_modified_by": {"key": "lastModifiedBy", "type": "str"},
"last_modified_by_type": {"key": "lastModifiedByType", "type": "str"},
"last_modified_at": {"key": "lastModifiedAt", "type": "iso-8601"},
}
def __init__(
self,
*,
created_by: Optional[str] = None,
created_by_type: Optional[Union[str, "_models.CreatedByType"]] = None,
created_at: Optional[datetime.datetime] = None,
last_modified_by: Optional[str] = None,
last_modified_by_type: Optional[Union[str, "_models.CreatedByType"]] = None,
last_modified_at: Optional[datetime.datetime] = None,
**kwargs: Any
) -> None:
"""
:keyword created_by: The identity that created the resource.
:paramtype created_by: str
:keyword created_by_type: The type of identity that created the resource. Known values are:
"User", "Application", "ManagedIdentity", and "Key".
:paramtype created_by_type: str or ~azure.mgmt.mongocluster.models.CreatedByType
:keyword created_at: The timestamp of resource creation (UTC).
:paramtype created_at: ~datetime.datetime
:keyword last_modified_by: The identity that last modified the resource.
:paramtype last_modified_by: str
:keyword last_modified_by_type: The type of identity that last modified the resource. Known
values are: "User", "Application", "ManagedIdentity", and "Key".
:paramtype last_modified_by_type: str or ~azure.mgmt.mongocluster.models.CreatedByType
:keyword last_modified_at: The timestamp of resource last modification (UTC).
:paramtype last_modified_at: ~datetime.datetime
"""
super().__init__(**kwargs)
self.created_by = created_by
self.created_by_type = created_by_type
self.created_at = created_at
self.last_modified_by = last_modified_by
self.last_modified_by_type = last_modified_by_type
self.last_modified_at = last_modified_at
|
class SystemData(_serialization.Model):
'''Metadata pertaining to creation and last modification of the resource.
:ivar created_by: The identity that created the resource.
:vartype created_by: str
:ivar created_by_type: The type of identity that created the resource. Known values are:
"User", "Application", "ManagedIdentity", and "Key".
:vartype created_by_type: str or ~azure.mgmt.mongocluster.models.CreatedByType
:ivar created_at: The timestamp of resource creation (UTC).
:vartype created_at: ~datetime.datetime
:ivar last_modified_by: The identity that last modified the resource.
:vartype last_modified_by: str
:ivar last_modified_by_type: The type of identity that last modified the resource. Known values
are: "User", "Application", "ManagedIdentity", and "Key".
:vartype last_modified_by_type: str or ~azure.mgmt.mongocluster.models.CreatedByType
:ivar last_modified_at: The timestamp of resource last modification (UTC).
:vartype last_modified_at: ~datetime.datetime
'''
def __init__(
self,
*,
created_by: Optional[str] = None,
created_by_type: Optional[Union[str, "_models.CreatedByType"]] = None,
created_at: Optional[datetime.datetime] = None,
last_modified_by: Optional[str] = None,
last_modified_by_type: Optional[Union[str, "_models.CreatedByType"]] = None,
last_modified_at: Optional[datetime.datetime] = None,
**kwargs: Any
) -> None:
'''
:keyword created_by: The identity that created the resource.
:paramtype created_by: str
:keyword created_by_type: The type of identity that created the resource. Known values are:
"User", "Application", "ManagedIdentity", and "Key".
:paramtype created_by_type: str or ~azure.mgmt.mongocluster.models.CreatedByType
:keyword created_at: The timestamp of resource creation (UTC).
:paramtype created_at: ~datetime.datetime
:keyword last_modified_by: The identity that last modified the resource.
:paramtype last_modified_by: str
:keyword last_modified_by_type: The type of identity that last modified the resource. Known
values are: "User", "Application", "ManagedIdentity", and "Key".
:paramtype last_modified_by_type: str or ~azure.mgmt.mongocluster.models.CreatedByType
:keyword last_modified_at: The timestamp of resource last modification (UTC).
:paramtype last_modified_at: ~datetime.datetime
'''
pass
| 2 | 2 | 34 | 0 | 18 | 16 | 1 | 1.19 | 1 | 4 | 0 | 0 | 1 | 6 | 1 | 16 | 62 | 3 | 27 | 19 | 15 | 32 | 10 | 9 | 8 | 1 | 2 | 0 | 1 |
10,808 |
Azure/azure-cli-extensions
|
src/cosmosdb-preview/azext_cosmosdb_preview/vendored_sdks/azure_mgmt_mongocluster/models/_models_py3.py
|
azext_cosmosdb_preview.vendored_sdks.azure_mgmt_mongocluster.models._models_py3.TrackedResource
|
class TrackedResource(Resource):
"""The resource model definition for an Azure Resource Manager tracked top level resource which
has 'tags' and a 'location'.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to server.
:ivar id: Fully qualified resource ID for the resource. E.g.
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}". # pylint: disable=line-too-long
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
"Microsoft.Storage/storageAccounts".
:vartype type: str
:ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy
information.
:vartype system_data: ~azure.mgmt.mongocluster.models.SystemData
:ivar tags: Resource tags.
:vartype tags: dict[str, str]
:ivar location: The geo-location where the resource lives. Required.
:vartype location: str
"""
_validation = {
"id": {"readonly": True},
"name": {"readonly": True},
"type": {"readonly": True},
"system_data": {"readonly": True},
"location": {"required": True},
}
_attribute_map = {
"id": {"key": "id", "type": "str"},
"name": {"key": "name", "type": "str"},
"type": {"key": "type", "type": "str"},
"system_data": {"key": "systemData", "type": "SystemData"},
"tags": {"key": "tags", "type": "{str}"},
"location": {"key": "location", "type": "str"},
}
def __init__(self, *, location: str, tags: Optional[Dict[str, str]] = None, **kwargs: Any) -> None:
"""
:keyword tags: Resource tags.
:paramtype tags: dict[str, str]
:keyword location: The geo-location where the resource lives. Required.
:paramtype location: str
"""
super().__init__(**kwargs)
self.tags = tags
self.location = location
|
class TrackedResource(Resource):
'''The resource model definition for an Azure Resource Manager tracked top level resource which
has 'tags' and a 'location'.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to server.
:ivar id: Fully qualified resource ID for the resource. E.g.
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}". # pylint: disable=line-too-long
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
"Microsoft.Storage/storageAccounts".
:vartype type: str
:ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy
information.
:vartype system_data: ~azure.mgmt.mongocluster.models.SystemData
:ivar tags: Resource tags.
:vartype tags: dict[str, str]
:ivar location: The geo-location where the resource lives. Required.
:vartype location: str
'''
def __init__(self, *, location: str, tags: Optional[Dict[str, str]] = None, **kwargs: Any) -> None:
'''
:keyword tags: Resource tags.
:paramtype tags: dict[str, str]
:keyword location: The geo-location where the resource lives. Required.
:paramtype location: str
'''
pass
| 2 | 2 | 10 | 0 | 4 | 6 | 1 | 1.3 | 1 | 3 | 0 | 1 | 1 | 2 | 1 | 17 | 52 | 6 | 20 | 6 | 18 | 26 | 7 | 6 | 5 | 1 | 3 | 0 | 1 |
10,809 |
Azure/azure-cli-extensions
|
src/cosmosdb-preview/azext_cosmosdb_preview/vendored_sdks/azure_mgmt_mongocluster/models/_mongo_cluster_mgmt_client_enums.py
|
azext_cosmosdb_preview.vendored_sdks.azure_mgmt_mongocluster.models._mongo_cluster_mgmt_client_enums.CheckNameAvailabilityReason
|
class CheckNameAvailabilityReason(str, Enum, metaclass=CaseInsensitiveEnumMeta):
"""The reason why the given name is not available."""
INVALID = "Invalid"
ALREADY_EXISTS = "AlreadyExists"
|
class CheckNameAvailabilityReason(str, Enum, metaclass=CaseInsensitiveEnumMeta):
'''The reason why the given name is not available.'''
pass
| 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 3 | 0 | 0 | 0 | 0 | 0 | 0 | 115 | 5 | 1 | 3 | 3 | 2 | 1 | 3 | 3 | 2 | 0 | 4 | 0 | 0 |
10,810 |
Azure/azure-cli-extensions
|
src/cosmosdb-preview/azext_cosmosdb_preview/vendored_sdks/azure_mgmt_mongocluster/models/_mongo_cluster_mgmt_client_enums.py
|
azext_cosmosdb_preview.vendored_sdks.azure_mgmt_mongocluster.models._mongo_cluster_mgmt_client_enums.CreateMode
|
class CreateMode(str, Enum, metaclass=CaseInsensitiveEnumMeta):
"""The mode that the Mongo Cluster is created with."""
DEFAULT = "Default"
"""Create a new mongo cluster."""
POINT_IN_TIME_RESTORE = "PointInTimeRestore"
"""Create a mongo cluster from a restore point-in-time."""
|
class CreateMode(str, Enum, metaclass=CaseInsensitiveEnumMeta):
'''The mode that the Mongo Cluster is created with.'''
pass
| 1 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 3 | 0 | 0 | 0 | 0 | 0 | 0 | 115 | 7 | 1 | 3 | 3 | 2 | 3 | 3 | 3 | 2 | 0 | 4 | 0 | 0 |
10,811 |
Azure/azure-cli-extensions
|
src/cosmosdb-preview/azext_cosmosdb_preview/vendored_sdks/azure_mgmt_mongocluster/models/_mongo_cluster_mgmt_client_enums.py
|
azext_cosmosdb_preview.vendored_sdks.azure_mgmt_mongocluster.models._mongo_cluster_mgmt_client_enums.MongoClusterStatus
|
class MongoClusterStatus(str, Enum, metaclass=CaseInsensitiveEnumMeta):
"""The status of the Mongo cluster resource."""
READY = "Ready"
"""The mongo cluster resource is ready for use."""
PROVISIONING = "Provisioning"
"""The mongo cluster resource is being provisioned."""
UPDATING = "Updating"
"""The mongo cluster resource is being updated."""
STARTING = "Starting"
"""The mongo cluster resource is being started."""
STOPPING = "Stopping"
"""The mongo cluster resource is being stopped."""
STOPPED = "Stopped"
"""The mongo cluster resource is stopped."""
DROPPING = "Dropping"
"""The mongo cluster resource is being dropped."""
|
class MongoClusterStatus(str, Enum, metaclass=CaseInsensitiveEnumMeta):
'''The status of the Mongo cluster resource.'''
pass
| 1 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 3 | 0 | 0 | 0 | 0 | 0 | 0 | 115 | 17 | 1 | 8 | 8 | 7 | 8 | 8 | 8 | 7 | 0 | 4 | 0 | 0 |
10,812 |
Azure/azure-cli-extensions
|
src/cosmosdb-preview/azext_cosmosdb_preview/vendored_sdks/azure_mgmt_mongocluster/models/_mongo_cluster_mgmt_client_enums.py
|
azext_cosmosdb_preview.vendored_sdks.azure_mgmt_mongocluster.models._mongo_cluster_mgmt_client_enums.NodeKind
|
class NodeKind(str, Enum, metaclass=CaseInsensitiveEnumMeta):
"""The kind of the node on the cluster."""
SHARD = "Shard"
"""The node is a shard kind."""
|
class NodeKind(str, Enum, metaclass=CaseInsensitiveEnumMeta):
'''The kind of the node on the cluster.'''
pass
| 1 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 3 | 0 | 0 | 0 | 0 | 0 | 0 | 115 | 5 | 1 | 2 | 2 | 1 | 2 | 2 | 2 | 1 | 0 | 4 | 0 | 0 |
10,813 |
Azure/azure-cli-extensions
|
src/cosmosdb-preview/azext_cosmosdb_preview/vendored_sdks/azure_mgmt_mongocluster/models/_mongo_cluster_mgmt_client_enums.py
|
azext_cosmosdb_preview.vendored_sdks.azure_mgmt_mongocluster.models._mongo_cluster_mgmt_client_enums.ProvisioningState
|
class ProvisioningState(str, Enum, metaclass=CaseInsensitiveEnumMeta):
"""The provisioning state of the last accepted operation."""
SUCCEEDED = "Succeeded"
"""Resource has been created."""
FAILED = "Failed"
"""Resource creation failed."""
CANCELED = "Canceled"
"""Resource creation was canceled."""
IN_PROGRESS = "InProgress"
"""An operation is in-progress on the resource."""
UPDATING = "Updating"
"""An update operation is in-progress on the resource."""
DROPPING = "Dropping"
"""A drop operation is in-progress on the resource."""
|
class ProvisioningState(str, Enum, metaclass=CaseInsensitiveEnumMeta):
'''The provisioning state of the last accepted operation.'''
pass
| 1 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 3 | 0 | 0 | 0 | 0 | 0 | 0 | 115 | 15 | 1 | 7 | 7 | 6 | 7 | 7 | 7 | 6 | 0 | 4 | 0 | 0 |
10,814 |
Azure/azure-cli-extensions
|
src/cosmosdb-preview/azext_cosmosdb_preview/vendored_sdks/azure_mgmt_mongocluster/models/_models_py3.py
|
azext_cosmosdb_preview.vendored_sdks.azure_mgmt_mongocluster.models._models_py3.OperationListResult
|
class OperationListResult(_serialization.Model):
"""A list of REST API operations supported by an Azure Resource Provider. It contains an URL link
to get the next set of results.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar value: List of operations supported by the resource provider.
:vartype value: list[~azure.mgmt.mongocluster.models.Operation]
:ivar next_link: URL to get the next set of operation list results (if there are any).
:vartype next_link: str
"""
_validation = {
"value": {"readonly": True},
"next_link": {"readonly": True},
}
_attribute_map = {
"value": {"key": "value", "type": "[Operation]"},
"next_link": {"key": "nextLink", "type": "str"},
}
def __init__(self, **kwargs: Any) -> None:
""" """
super().__init__(**kwargs)
self.value = None
self.next_link = None
|
class OperationListResult(_serialization.Model):
'''A list of REST API operations supported by an Azure Resource Provider. It contains an URL link
to get the next set of results.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar value: List of operations supported by the resource provider.
:vartype value: list[~azure.mgmt.mongocluster.models.Operation]
:ivar next_link: URL to get the next set of operation list results (if there are any).
:vartype next_link: str
'''
def __init__(self, **kwargs: Any) -> None:
''' '''
pass
| 2 | 2 | 5 | 0 | 4 | 1 | 1 | 0.69 | 1 | 2 | 0 | 0 | 1 | 2 | 1 | 16 | 27 | 5 | 13 | 6 | 11 | 9 | 7 | 6 | 5 | 1 | 2 | 0 | 1 |
10,815 |
Azure/azure-cli-extensions
|
src/cosmosdb-preview/azext_cosmosdb_preview/vendored_sdks/azure_mgmt_mongocluster/models/_models_py3.py
|
azext_cosmosdb_preview.vendored_sdks.azure_mgmt_mongocluster.models._models_py3.Operation
|
class Operation(_serialization.Model):
"""Details of a REST API operation, returned from the Resource Provider Operations API.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar name: The name of the operation, as per Resource-Based Access Control (RBAC). Examples:
"Microsoft.Compute/virtualMachines/write", "Microsoft.Compute/virtualMachines/capture/action".
:vartype name: str
:ivar is_data_action: Whether the operation applies to data-plane. This is "true" for
data-plane operations and "false" for ARM/control-plane operations.
:vartype is_data_action: bool
:ivar display: Localized display information for this particular operation.
:vartype display: ~azure.mgmt.mongocluster.models.OperationDisplay
:ivar origin: The intended executor of the operation; as in Resource Based Access Control
(RBAC) and audit logs UX. Default value is "user,system". Known values are: "user", "system",
and "user,system".
:vartype origin: str or ~azure.mgmt.mongocluster.models.Origin
:ivar action_type: Enum. Indicates the action type. "Internal" refers to actions that are for
internal only APIs. "Internal"
:vartype action_type: str or ~azure.mgmt.mongocluster.models.ActionType
"""
_validation = {
"name": {"readonly": True},
"is_data_action": {"readonly": True},
"origin": {"readonly": True},
"action_type": {"readonly": True},
}
_attribute_map = {
"name": {"key": "name", "type": "str"},
"is_data_action": {"key": "isDataAction", "type": "bool"},
"display": {"key": "display", "type": "OperationDisplay"},
"origin": {"key": "origin", "type": "str"},
"action_type": {"key": "actionType", "type": "str"},
}
def __init__(self, *, display: Optional["_models.OperationDisplay"] = None, **kwargs: Any) -> None:
"""
:keyword display: Localized display information for this particular operation.
:paramtype display: ~azure.mgmt.mongocluster.models.OperationDisplay
"""
super().__init__(**kwargs)
self.name = None
self.is_data_action = None
self.display = display
self.origin = None
self.action_type = None
|
class Operation(_serialization.Model):
'''Details of a REST API operation, returned from the Resource Provider Operations API.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar name: The name of the operation, as per Resource-Based Access Control (RBAC). Examples:
"Microsoft.Compute/virtualMachines/write", "Microsoft.Compute/virtualMachines/capture/action".
:vartype name: str
:ivar is_data_action: Whether the operation applies to data-plane. This is "true" for
data-plane operations and "false" for ARM/control-plane operations.
:vartype is_data_action: bool
:ivar display: Localized display information for this particular operation.
:vartype display: ~azure.mgmt.mongocluster.models.OperationDisplay
:ivar origin: The intended executor of the operation; as in Resource Based Access Control
(RBAC) and audit logs UX. Default value is "user,system". Known values are: "user", "system",
and "user,system".
:vartype origin: str or ~azure.mgmt.mongocluster.models.Origin
:ivar action_type: Enum. Indicates the action type. "Internal" refers to actions that are for
internal only APIs. "Internal"
:vartype action_type: str or ~azure.mgmt.mongocluster.models.ActionType
'''
def __init__(self, *, display: Optional["_models.OperationDisplay"] = None, **kwargs: Any) -> None:
'''
:keyword display: Localized display information for this particular operation.
:paramtype display: ~azure.mgmt.mongocluster.models.OperationDisplay
'''
pass
| 2 | 2 | 11 | 0 | 7 | 4 | 1 | 1.05 | 1 | 2 | 0 | 0 | 1 | 5 | 1 | 16 | 48 | 5 | 21 | 9 | 19 | 22 | 10 | 9 | 8 | 1 | 2 | 0 | 1 |
10,816 |
Azure/azure-cli-extensions
|
src/cosmosdb-preview/azext_cosmosdb_preview/vendored_sdks/azure_mgmt_mongocluster/models/_models_py3.py
|
azext_cosmosdb_preview.vendored_sdks.azure_mgmt_mongocluster.models._models_py3.NodeGroupSpec
|
class NodeGroupSpec(_serialization.Model):
"""Specification for a node group.
:ivar sku: The resource sku for the node group. This defines the size of CPU and memory that is
provisioned for each node. Example values: 'M30', 'M40'.
:vartype sku: str
:ivar disk_size_gb: The disk storage size for the node group in GB. Example values: 128, 256,
512, 1024.
:vartype disk_size_gb: int
:ivar enable_ha: Whether high availability is enabled on the node group.
:vartype enable_ha: bool
:ivar kind: The node type deployed in the node group. "Shard"
:vartype kind: str or ~azure.mgmt.mongocluster.models.NodeKind
:ivar node_count: The number of nodes in the node group.
:vartype node_count: int
"""
_attribute_map = {
"sku": {"key": "sku", "type": "str"},
"disk_size_gb": {"key": "diskSizeGB", "type": "int"},
"enable_ha": {"key": "enableHa", "type": "bool"},
"kind": {"key": "kind", "type": "str"},
"node_count": {"key": "nodeCount", "type": "int"},
}
def __init__(
self,
*,
sku: Optional[str] = None,
disk_size_gb: Optional[int] = None,
enable_ha: Optional[bool] = None,
kind: Optional[Union[str, "_models.NodeKind"]] = None,
node_count: Optional[int] = None,
**kwargs: Any
) -> None:
"""
:keyword sku: The resource sku for the node group. This defines the size of CPU and memory that
is provisioned for each node. Example values: 'M30', 'M40'.
:paramtype sku: str
:keyword disk_size_gb: The disk storage size for the node group in GB. Example values: 128,
256, 512, 1024.
:paramtype disk_size_gb: int
:keyword enable_ha: Whether high availability is enabled on the node group.
:paramtype enable_ha: bool
:keyword kind: The node type deployed in the node group. "Shard"
:paramtype kind: str or ~azure.mgmt.mongocluster.models.NodeKind
:keyword node_count: The number of nodes in the node group.
:paramtype node_count: int
"""
super().__init__(**kwargs)
self.sku = sku
self.disk_size_gb = disk_size_gb
self.enable_ha = enable_ha
self.kind = kind
self.node_count = node_count
|
class NodeGroupSpec(_serialization.Model):
'''Specification for a node group.
:ivar sku: The resource sku for the node group. This defines the size of CPU and memory that is
provisioned for each node. Example values: 'M30', 'M40'.
:vartype sku: str
:ivar disk_size_gb: The disk storage size for the node group in GB. Example values: 128, 256,
512, 1024.
:vartype disk_size_gb: int
:ivar enable_ha: Whether high availability is enabled on the node group.
:vartype enable_ha: bool
:ivar kind: The node type deployed in the node group. "Shard"
:vartype kind: str or ~azure.mgmt.mongocluster.models.NodeKind
:ivar node_count: The number of nodes in the node group.
:vartype node_count: int
'''
def __init__(
self,
*,
sku: Optional[str] = None,
disk_size_gb: Optional[int] = None,
enable_ha: Optional[bool] = None,
kind: Optional[Union[str, "_models.NodeKind"]] = None,
node_count: Optional[int] = None,
**kwargs: Any
) -> None:
'''
:keyword sku: The resource sku for the node group. This defines the size of CPU and memory that
is provisioned for each node. Example values: 'M30', 'M40'.
:paramtype sku: str
:keyword disk_size_gb: The disk storage size for the node group in GB. Example values: 128,
256, 512, 1024.
:paramtype disk_size_gb: int
:keyword enable_ha: Whether high availability is enabled on the node group.
:paramtype enable_ha: bool
:keyword kind: The node type deployed in the node group. "Shard"
:paramtype kind: str or ~azure.mgmt.mongocluster.models.NodeKind
:keyword node_count: The number of nodes in the node group.
:paramtype node_count: int
'''
pass
| 2 | 2 | 30 | 0 | 16 | 14 | 1 | 1.17 | 1 | 5 | 0 | 0 | 1 | 5 | 1 | 16 | 55 | 3 | 24 | 17 | 13 | 28 | 9 | 8 | 7 | 1 | 2 | 0 | 1 |
10,817 |
Azure/azure-cli-extensions
|
src/cosmosdb-preview/azext_cosmosdb_preview/vendored_sdks/azure_mgmt_mongocluster/models/_models_py3.py
|
azext_cosmosdb_preview.vendored_sdks.azure_mgmt_mongocluster.models._models_py3.NodeGroupProperties
|
class NodeGroupProperties(_serialization.Model):
"""The properties of the node group on a cluster.
:ivar sku: The resource sku for the node group. This defines the size of CPU and memory that is
provisioned for each node. Example values: 'M30', 'M40'.
:vartype sku: str
:ivar disk_size_gb: The disk storage size for the node group in GB. Example values: 128, 256,
512, 1024.
:vartype disk_size_gb: int
:ivar enable_ha: Whether high availability is enabled on the node group.
:vartype enable_ha: bool
"""
_attribute_map = {
"sku": {"key": "sku", "type": "str"},
"disk_size_gb": {"key": "diskSizeGB", "type": "int"},
"enable_ha": {"key": "enableHa", "type": "bool"},
}
def __init__(
self,
*,
sku: Optional[str] = None,
disk_size_gb: Optional[int] = None,
enable_ha: Optional[bool] = None,
**kwargs: Any
) -> None:
"""
:keyword sku: The resource sku for the node group. This defines the size of CPU and memory that
is provisioned for each node. Example values: 'M30', 'M40'.
:paramtype sku: str
:keyword disk_size_gb: The disk storage size for the node group in GB. Example values: 128,
256, 512, 1024.
:paramtype disk_size_gb: int
:keyword enable_ha: Whether high availability is enabled on the node group.
:paramtype enable_ha: bool
"""
super().__init__(**kwargs)
self.sku = sku
self.disk_size_gb = disk_size_gb
self.enable_ha = enable_ha
|
class NodeGroupProperties(_serialization.Model):
'''The properties of the node group on a cluster.
:ivar sku: The resource sku for the node group. This defines the size of CPU and memory that is
provisioned for each node. Example values: 'M30', 'M40'.
:vartype sku: str
:ivar disk_size_gb: The disk storage size for the node group in GB. Example values: 128, 256,
512, 1024.
:vartype disk_size_gb: int
:ivar enable_ha: Whether high availability is enabled on the node group.
:vartype enable_ha: bool
'''
def __init__(
self,
*,
sku: Optional[str] = None,
disk_size_gb: Optional[int] = None,
enable_ha: Optional[bool] = None,
**kwargs: Any
) -> None:
'''
:keyword sku: The resource sku for the node group. This defines the size of CPU and memory that
is provisioned for each node. Example values: 'M30', 'M40'.
:paramtype sku: str
:keyword disk_size_gb: The disk storage size for the node group in GB. Example values: 128,
256, 512, 1024.
:paramtype disk_size_gb: int
:keyword enable_ha: Whether high availability is enabled on the node group.
:paramtype enable_ha: bool
'''
pass
| 2 | 2 | 22 | 0 | 12 | 10 | 1 | 1.11 | 1 | 5 | 0 | 0 | 1 | 3 | 1 | 16 | 41 | 3 | 18 | 13 | 9 | 20 | 7 | 6 | 5 | 1 | 2 | 0 | 1 |
10,818 |
Azure/azure-cli-extensions
|
src/cosmosdb-preview/azext_cosmosdb_preview/vendored_sdks/azure_mgmt_mongocluster/aio/operations/_private_endpoint_connections_operations.py
|
azext_cosmosdb_preview.vendored_sdks.azure_mgmt_mongocluster.aio.operations._private_endpoint_connections_operations.PrivateEndpointConnectionsOperations
|
class PrivateEndpointConnectionsOperations:
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.mgmt.mongocluster.aio.MongoClusterMgmtClient`'s
:attr:`private_endpoint_connections` attribute.
"""
models = _models
def __init__(self, *args, **kwargs) -> None:
input_args = list(args)
self._client = input_args.pop(0) if input_args else kwargs.pop("client")
self._config = input_args.pop(0) if input_args else kwargs.pop("config")
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
@distributed_trace
def list_by_mongo_cluster(
self, resource_group_name: str, mongo_cluster_name: str, **kwargs: Any
) -> AsyncIterable["_models.PrivateEndpointConnectionResource"]:
"""List existing private connections.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param mongo_cluster_name: The name of the mongo cluster. Required.
:type mongo_cluster_name: str
:return: An iterator like instance of either PrivateEndpointConnectionResource or the result of
cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.mongocluster.models.PrivateEndpointConnectionResource]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
cls: ClsType[_models.PrivateEndpointConnectionResourceListResult] = kwargs.pop("cls", None)
error_map: MutableMapping[int, Type[HttpResponseError]] = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
_request = build_list_by_mongo_cluster_request(
resource_group_name=resource_group_name,
mongo_cluster_name=mongo_cluster_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
headers=_headers,
params=_params,
)
_request = _convert_request(_request)
_request.url = self._client.format_url(_request.url)
else:
# make call to next link with the client's api-version
_parsed_next_link = urllib.parse.urlparse(next_link)
_next_request_params = case_insensitive_dict(
{
key: [urllib.parse.quote(v) for v in value]
for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
}
)
_next_request_params["api-version"] = self._config.api_version
_request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
_request = _convert_request(_request)
_request.url = self._client.format_url(_request.url)
_request.method = "GET"
return _request
async def extract_data(pipeline_response):
deserialized = self._deserialize("PrivateEndpointConnectionResourceListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem) # type: ignore
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
_request = prepare_request(next_link)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
_request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(get_next, extract_data)
@distributed_trace_async
async def get(
self, resource_group_name: str, mongo_cluster_name: str, private_endpoint_connection_name: str, **kwargs: Any
) -> _models.PrivateEndpointConnectionResource:
"""Get a specific private connection.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param mongo_cluster_name: The name of the mongo cluster. Required.
:type mongo_cluster_name: str
:param private_endpoint_connection_name: The name of the private endpoint connection associated
with the Azure resource. Required.
:type private_endpoint_connection_name: str
:return: PrivateEndpointConnectionResource or the result of cls(response)
:rtype: ~azure.mgmt.mongocluster.models.PrivateEndpointConnectionResource
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map: MutableMapping[int, Type[HttpResponseError]] = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
cls: ClsType[_models.PrivateEndpointConnectionResource] = kwargs.pop("cls", None)
_request = build_get_request(
resource_group_name=resource_group_name,
mongo_cluster_name=mongo_cluster_name,
private_endpoint_connection_name=private_endpoint_connection_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
headers=_headers,
params=_params,
)
_request = _convert_request(_request)
_request.url = self._client.format_url(_request.url)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
_request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize("PrivateEndpointConnectionResource", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {}) # type: ignore
return deserialized # type: ignore
async def _create_initial(
self,
resource_group_name: str,
mongo_cluster_name: str,
private_endpoint_connection_name: str,
resource: Union[_models.PrivateEndpointConnectionResource, IO[bytes]],
**kwargs: Any
) -> Optional[_models.PrivateEndpointConnectionResource]:
error_map: MutableMapping[int, Type[HttpResponseError]] = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
cls: ClsType[Optional[_models.PrivateEndpointConnectionResource]] = kwargs.pop("cls", None)
content_type = content_type or "application/json"
_json = None
_content = None
if isinstance(resource, (IOBase, bytes)):
_content = resource
else:
_json = self._serialize.body(resource, "PrivateEndpointConnectionResource")
_request = build_create_request(
resource_group_name=resource_group_name,
mongo_cluster_name=mongo_cluster_name,
private_endpoint_connection_name=private_endpoint_connection_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
content_type=content_type,
json=_json,
content=_content,
headers=_headers,
params=_params,
)
_request = _convert_request(_request)
_request.url = self._client.format_url(_request.url)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
_request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 201, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = None
response_headers = {}
if response.status_code == 200:
deserialized = self._deserialize("PrivateEndpointConnectionResource", pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize("PrivateEndpointConnectionResource", pipeline_response)
if response.status_code == 202:
response_headers["Location"] = self._deserialize("str", response.headers.get("Location"))
response_headers["Retry-After"] = self._deserialize("int", response.headers.get("Retry-After"))
if cls:
return cls(pipeline_response, deserialized, response_headers) # type: ignore
return deserialized # type: ignore
@overload
async def begin_create(
self,
resource_group_name: str,
mongo_cluster_name: str,
private_endpoint_connection_name: str,
resource: _models.PrivateEndpointConnectionResource,
*,
content_type: str = "application/json",
**kwargs: Any
) -> AsyncLROPoller[_models.PrivateEndpointConnectionResource]:
"""Create a Private endpoint connection.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param mongo_cluster_name: The name of the mongo cluster. Required.
:type mongo_cluster_name: str
:param private_endpoint_connection_name: The name of the private endpoint connection associated
with the Azure resource. Required.
:type private_endpoint_connection_name: str
:param resource: Resource create parameters. Required.
:type resource: ~azure.mgmt.mongocluster.models.PrivateEndpointConnectionResource
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
:return: An instance of AsyncLROPoller that returns either PrivateEndpointConnectionResource or
the result of cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.mongocluster.models.PrivateEndpointConnectionResource]
:raises ~azure.core.exceptions.HttpResponseError:
"""
@overload
async def begin_create(
self,
resource_group_name: str,
mongo_cluster_name: str,
private_endpoint_connection_name: str,
resource: IO[bytes],
*,
content_type: str = "application/json",
**kwargs: Any
) -> AsyncLROPoller[_models.PrivateEndpointConnectionResource]:
"""Create a Private endpoint connection.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param mongo_cluster_name: The name of the mongo cluster. Required.
:type mongo_cluster_name: str
:param private_endpoint_connection_name: The name of the private endpoint connection associated
with the Azure resource. Required.
:type private_endpoint_connection_name: str
:param resource: Resource create parameters. Required.
:type resource: IO[bytes]
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
:return: An instance of AsyncLROPoller that returns either PrivateEndpointConnectionResource or
the result of cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.mongocluster.models.PrivateEndpointConnectionResource]
:raises ~azure.core.exceptions.HttpResponseError:
"""
@distributed_trace_async
async def begin_create(
self,
resource_group_name: str,
mongo_cluster_name: str,
private_endpoint_connection_name: str,
resource: Union[_models.PrivateEndpointConnectionResource, IO[bytes]],
**kwargs: Any
) -> AsyncLROPoller[_models.PrivateEndpointConnectionResource]:
"""Create a Private endpoint connection.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param mongo_cluster_name: The name of the mongo cluster. Required.
:type mongo_cluster_name: str
:param private_endpoint_connection_name: The name of the private endpoint connection associated
with the Azure resource. Required.
:type private_endpoint_connection_name: str
:param resource: Resource create parameters. Is either a PrivateEndpointConnectionResource type
or a IO[bytes] type. Required.
:type resource: ~azure.mgmt.mongocluster.models.PrivateEndpointConnectionResource or IO[bytes]
:return: An instance of AsyncLROPoller that returns either PrivateEndpointConnectionResource or
the result of cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.mongocluster.models.PrivateEndpointConnectionResource]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
cls: ClsType[_models.PrivateEndpointConnectionResource] = kwargs.pop("cls", None)
polling: Union[bool, AsyncPollingMethod] = kwargs.pop("polling", True)
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token: Optional[str] = kwargs.pop("continuation_token", None)
if cont_token is None:
raw_result = await self._create_initial(
resource_group_name=resource_group_name,
mongo_cluster_name=mongo_cluster_name,
private_endpoint_connection_name=private_endpoint_connection_name,
resource=resource,
api_version=api_version,
content_type=content_type,
cls=lambda x, y, z: x,
headers=_headers,
params=_params,
**kwargs
)
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize("PrivateEndpointConnectionResource", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {}) # type: ignore
return deserialized
if polling is True:
polling_method: AsyncPollingMethod = cast(
AsyncPollingMethod,
AsyncARMPolling(lro_delay, lro_options={"final-state-via": "azure-async-operation"}, **kwargs),
)
elif polling is False:
polling_method = cast(AsyncPollingMethod, AsyncNoPolling())
else:
polling_method = polling
if cont_token:
return AsyncLROPoller[_models.PrivateEndpointConnectionResource].from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
return AsyncLROPoller[_models.PrivateEndpointConnectionResource](
self._client, raw_result, get_long_running_output, polling_method # type: ignore
)
async def _delete_initial( # pylint: disable=inconsistent-return-statements
self, resource_group_name: str, mongo_cluster_name: str, private_endpoint_connection_name: str, **kwargs: Any
) -> None:
error_map: MutableMapping[int, Type[HttpResponseError]] = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
cls: ClsType[None] = kwargs.pop("cls", None)
_request = build_delete_request(
resource_group_name=resource_group_name,
mongo_cluster_name=mongo_cluster_name,
private_endpoint_connection_name=private_endpoint_connection_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
headers=_headers,
params=_params,
)
_request = _convert_request(_request)
_request.url = self._client.format_url(_request.url)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
_request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
response_headers = {}
if response.status_code == 202:
response_headers["Location"] = self._deserialize("str", response.headers.get("Location"))
response_headers["Retry-After"] = self._deserialize("int", response.headers.get("Retry-After"))
if cls:
return cls(pipeline_response, None, response_headers) # type: ignore
@distributed_trace_async
async def begin_delete(
self, resource_group_name: str, mongo_cluster_name: str, private_endpoint_connection_name: str, **kwargs: Any
) -> AsyncLROPoller[None]:
"""Delete the private endpoint connection.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param mongo_cluster_name: The name of the mongo cluster. Required.
:type mongo_cluster_name: str
:param private_endpoint_connection_name: The name of the private endpoint connection associated
with the Azure resource. Required.
:type private_endpoint_connection_name: str
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
cls: ClsType[None] = kwargs.pop("cls", None)
polling: Union[bool, AsyncPollingMethod] = kwargs.pop("polling", True)
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token: Optional[str] = kwargs.pop("continuation_token", None)
if cont_token is None:
raw_result = await self._delete_initial( # type: ignore
resource_group_name=resource_group_name,
mongo_cluster_name=mongo_cluster_name,
private_endpoint_connection_name=private_endpoint_connection_name,
api_version=api_version,
cls=lambda x, y, z: x,
headers=_headers,
params=_params,
**kwargs
)
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements
if cls:
return cls(pipeline_response, None, {}) # type: ignore
if polling is True:
polling_method: AsyncPollingMethod = cast(
AsyncPollingMethod, AsyncARMPolling(lro_delay, lro_options={"final-state-via": "location"}, **kwargs)
)
elif polling is False:
polling_method = cast(AsyncPollingMethod, AsyncNoPolling())
else:
polling_method = polling
if cont_token:
return AsyncLROPoller[None].from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
return AsyncLROPoller[None](self._client, raw_result, get_long_running_output, polling_method)
|
class PrivateEndpointConnectionsOperations:
'''
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.mgmt.mongocluster.aio.MongoClusterMgmtClient`'s
:attr:`private_endpoint_connections` attribute.
'''
def __init__(self, *args, **kwargs) -> None:
pass
@distributed_trace
def list_by_mongo_cluster(
self, resource_group_name: str, mongo_cluster_name: str, **kwargs: Any
) -> AsyncIterable["_models.PrivateEndpointConnectionResource"]:
'''List existing private connections.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param mongo_cluster_name: The name of the mongo cluster. Required.
:type mongo_cluster_name: str
:return: An iterator like instance of either PrivateEndpointConnectionResource or the result of
cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.mongocluster.models.PrivateEndpointConnectionResource]
:raises ~azure.core.exceptions.HttpResponseError:
'''
pass
def prepare_request(next_link=None):
pass
async def extract_data(pipeline_response):
pass
async def get_next(next_link=None):
pass
@distributed_trace_async
async def get_next(next_link=None):
'''Get a specific private connection.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param mongo_cluster_name: The name of the mongo cluster. Required.
:type mongo_cluster_name: str
:param private_endpoint_connection_name: The name of the private endpoint connection associated
with the Azure resource. Required.
:type private_endpoint_connection_name: str
:return: PrivateEndpointConnectionResource or the result of cls(response)
:rtype: ~azure.mgmt.mongocluster.models.PrivateEndpointConnectionResource
:raises ~azure.core.exceptions.HttpResponseError:
'''
pass
async def _create_initial(
self,
resource_group_name: str,
mongo_cluster_name: str,
private_endpoint_connection_name: str,
resource: Union[_models.PrivateEndpointConnectionResource, IO[bytes]],
**kwargs: Any
) -> Optional[_models.PrivateEndpointConnectionResource]:
pass
@overload
async def begin_create(
self,
resource_group_name: str,
mongo_cluster_name: str,
private_endpoint_connection_name: str,
resource: _models.PrivateEndpointConnectionResource,
*,
content_type: str = "application/json",
**kwargs: Any
) -> AsyncLROPoller[_models.PrivateEndpointConnectionResource]:
'''Create a Private endpoint connection.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param mongo_cluster_name: The name of the mongo cluster. Required.
:type mongo_cluster_name: str
:param private_endpoint_connection_name: The name of the private endpoint connection associated
with the Azure resource. Required.
:type private_endpoint_connection_name: str
:param resource: Resource create parameters. Required.
:type resource: ~azure.mgmt.mongocluster.models.PrivateEndpointConnectionResource
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
:return: An instance of AsyncLROPoller that returns either PrivateEndpointConnectionResource or
the result of cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.mongocluster.models.PrivateEndpointConnectionResource]
:raises ~azure.core.exceptions.HttpResponseError:
'''
pass
@overload
async def begin_create(
self,
resource_group_name: str,
mongo_cluster_name: str,
private_endpoint_connection_name: str,
resource: _models.PrivateEndpointConnectionResource,
*,
content_type: str = "application/json",
**kwargs: Any
) -> AsyncLROPoller[_models.PrivateEndpointConnectionResource]:
'''Create a Private endpoint connection.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param mongo_cluster_name: The name of the mongo cluster. Required.
:type mongo_cluster_name: str
:param private_endpoint_connection_name: The name of the private endpoint connection associated
with the Azure resource. Required.
:type private_endpoint_connection_name: str
:param resource: Resource create parameters. Required.
:type resource: IO[bytes]
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
:return: An instance of AsyncLROPoller that returns either PrivateEndpointConnectionResource or
the result of cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.mongocluster.models.PrivateEndpointConnectionResource]
:raises ~azure.core.exceptions.HttpResponseError:
'''
pass
@distributed_trace_async
async def begin_create(
self,
resource_group_name: str,
mongo_cluster_name: str,
private_endpoint_connection_name: str,
resource: _models.PrivateEndpointConnectionResource,
*,
content_type: str = "application/json",
**kwargs: Any
) -> AsyncLROPoller[_models.PrivateEndpointConnectionResource]:
'''Create a Private endpoint connection.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param mongo_cluster_name: The name of the mongo cluster. Required.
:type mongo_cluster_name: str
:param private_endpoint_connection_name: The name of the private endpoint connection associated
with the Azure resource. Required.
:type private_endpoint_connection_name: str
:param resource: Resource create parameters. Is either a PrivateEndpointConnectionResource type
or a IO[bytes] type. Required.
:type resource: ~azure.mgmt.mongocluster.models.PrivateEndpointConnectionResource or IO[bytes]
:return: An instance of AsyncLROPoller that returns either PrivateEndpointConnectionResource or
the result of cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.mongocluster.models.PrivateEndpointConnectionResource]
:raises ~azure.core.exceptions.HttpResponseError:
'''
pass
def get_long_running_output(pipeline_response):
pass
async def _delete_initial( # pylint: disable=inconsistent-return-statements
self, resource_group_name: str, mongo_cluster_name: str, private_endpoint_connection_name: str, **kwargs: Any
) -> None:
pass
@distributed_trace_async
async def begin_delete(
self, resource_group_name: str, mongo_cluster_name: str, private_endpoint_connection_name: str, **kwargs: Any
) -> AsyncLROPoller[None]:
'''Delete the private endpoint connection.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param mongo_cluster_name: The name of the mongo cluster. Required.
:type mongo_cluster_name: str
:param private_endpoint_connection_name: The name of the private endpoint connection associated
with the Azure resource. Required.
:type private_endpoint_connection_name: str
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
'''
pass
def get_long_running_output(pipeline_response):
pass
| 21 | 7 | 38 | 4 | 27 | 9 | 3 | 0.37 | 0 | 9 | 0 | 0 | 9 | 4 | 9 | 9 | 497 | 63 | 330 | 139 | 269 | 121 | 172 | 93 | 157 | 7 | 0 | 1 | 42 |
10,819 |
Azure/azure-cli-extensions
|
src/cosmosdb-preview/azext_cosmosdb_preview/vendored_sdks/azure_mgmt_mongocluster/aio/operations/_private_links_operations.py
|
azext_cosmosdb_preview.vendored_sdks.azure_mgmt_mongocluster.aio.operations._private_links_operations.PrivateLinksOperations
|
class PrivateLinksOperations:
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.mgmt.mongocluster.aio.MongoClusterMgmtClient`'s
:attr:`private_links` attribute.
"""
models = _models
def __init__(self, *args, **kwargs) -> None:
input_args = list(args)
self._client = input_args.pop(0) if input_args else kwargs.pop("client")
self._config = input_args.pop(0) if input_args else kwargs.pop("config")
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
@distributed_trace
def list_by_mongo_cluster(
self, resource_group_name: str, mongo_cluster_name: str, **kwargs: Any
) -> AsyncIterable["_models.PrivateLinkResource"]:
"""list private links on the given resource.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param mongo_cluster_name: The name of the mongo cluster. Required.
:type mongo_cluster_name: str
:return: An iterator like instance of either PrivateLinkResource or the result of cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.mongocluster.models.PrivateLinkResource]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
cls: ClsType[_models.PrivateLinkResourceListResult] = kwargs.pop("cls", None)
error_map: MutableMapping[int, Type[HttpResponseError]] = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
_request = build_list_by_mongo_cluster_request(
resource_group_name=resource_group_name,
mongo_cluster_name=mongo_cluster_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
headers=_headers,
params=_params,
)
_request = _convert_request(_request)
_request.url = self._client.format_url(_request.url)
else:
# make call to next link with the client's api-version
_parsed_next_link = urllib.parse.urlparse(next_link)
_next_request_params = case_insensitive_dict(
{
key: [urllib.parse.quote(v) for v in value]
for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
}
)
_next_request_params["api-version"] = self._config.api_version
_request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
_request = _convert_request(_request)
_request.url = self._client.format_url(_request.url)
_request.method = "GET"
return _request
async def extract_data(pipeline_response):
deserialized = self._deserialize("PrivateLinkResourceListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem) # type: ignore
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
_request = prepare_request(next_link)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
_request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(get_next, extract_data)
|
class PrivateLinksOperations:
'''
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.mgmt.mongocluster.aio.MongoClusterMgmtClient`'s
:attr:`private_links` attribute.
'''
def __init__(self, *args, **kwargs) -> None:
pass
@distributed_trace
def list_by_mongo_cluster(
self, resource_group_name: str, mongo_cluster_name: str, **kwargs: Any
) -> AsyncIterable["_models.PrivateLinkResource"]:
'''list private links on the given resource.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param mongo_cluster_name: The name of the mongo cluster. Required.
:type mongo_cluster_name: str
:return: An iterator like instance of either PrivateLinkResource or the result of cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.mongocluster.models.PrivateLinkResource]
:raises ~azure.core.exceptions.HttpResponseError:
'''
pass
def prepare_request(next_link=None):
pass
async def extract_data(pipeline_response):
pass
async def get_next(next_link=None):
pass
| 7 | 2 | 29 | 3 | 23 | 3 | 2 | 0.3 | 0 | 5 | 0 | 0 | 2 | 4 | 2 | 2 | 105 | 16 | 70 | 30 | 61 | 21 | 45 | 27 | 39 | 5 | 0 | 1 | 12 |
10,820 |
Azure/azure-cli-extensions
|
src/cosmosdb-preview/azext_cosmosdb_preview/vendored_sdks/azure_mgmt_mongocluster/models/_models_py3.py
|
azext_cosmosdb_preview.vendored_sdks.azure_mgmt_mongocluster.models._models_py3.AzureResourceManagerPrivateEndpointConnection
|
class AzureResourceManagerPrivateEndpointConnection(ProxyResource): # pylint: disable=name-too-long
"""The private endpoint connection resource.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Fully qualified resource ID for the resource. E.g.
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}". # pylint: disable=line-too-long
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
"Microsoft.Storage/storageAccounts".
:vartype type: str
:ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy
information.
:vartype system_data: ~azure.mgmt.mongocluster.models.SystemData
:ivar properties: The private endpoint connection properties.
:vartype properties: ~azure.mgmt.mongocluster.models.PrivateEndpointConnectionProperties
"""
_validation = {
"id": {"readonly": True},
"name": {"readonly": True},
"type": {"readonly": True},
"system_data": {"readonly": True},
}
_attribute_map = {
"id": {"key": "id", "type": "str"},
"name": {"key": "name", "type": "str"},
"type": {"key": "type", "type": "str"},
"system_data": {"key": "systemData", "type": "SystemData"},
"properties": {"key": "properties", "type": "PrivateEndpointConnectionProperties"},
}
def __init__(
self, *, properties: Optional["_models.PrivateEndpointConnectionProperties"] = None, **kwargs: Any
) -> None:
"""
:keyword properties: The private endpoint connection properties.
:paramtype properties: ~azure.mgmt.mongocluster.models.PrivateEndpointConnectionProperties
"""
super().__init__(**kwargs)
self.properties = properties
|
class AzureResourceManagerPrivateEndpointConnection(ProxyResource):
'''The private endpoint connection resource.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Fully qualified resource ID for the resource. E.g.
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}". # pylint: disable=line-too-long
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
"Microsoft.Storage/storageAccounts".
:vartype type: str
:ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy
information.
:vartype system_data: ~azure.mgmt.mongocluster.models.SystemData
:ivar properties: The private endpoint connection properties.
:vartype properties: ~azure.mgmt.mongocluster.models.PrivateEndpointConnectionProperties
'''
def __init__(
self, *, properties: Optional["_models.PrivateEndpointConnectionProperties"] = None, **kwargs: Any
) -> None:
'''
:keyword properties: The private endpoint connection properties.
:paramtype properties: ~azure.mgmt.mongocluster.models.PrivateEndpointConnectionProperties
'''
pass
| 2 | 2 | 9 | 0 | 5 | 4 | 1 | 1.11 | 1 | 2 | 0 | 0 | 1 | 1 | 1 | 17 | 44 | 5 | 19 | 7 | 15 | 21 | 6 | 5 | 4 | 1 | 4 | 0 | 1 |
10,821 |
Azure/azure-cli-extensions
|
src/cosmosdb-preview/azext_cosmosdb_preview/vendored_sdks/azure_mgmt_mongocluster/models/_models_py3.py
|
azext_cosmosdb_preview.vendored_sdks.azure_mgmt_mongocluster.models._models_py3.CheckNameAvailabilityRequest
|
class CheckNameAvailabilityRequest(_serialization.Model):
"""The check availability request body.
:ivar name: The name of the resource for which availability needs to be checked.
:vartype name: str
:ivar type: The resource type.
:vartype type: str
"""
_attribute_map = {
"name": {"key": "name", "type": "str"},
"type": {"key": "type", "type": "str"},
}
def __init__(self, *, name: Optional[str] = None, type: Optional[str] = None, **kwargs: Any) -> None:
"""
:keyword name: The name of the resource for which availability needs to be checked.
:paramtype name: str
:keyword type: The resource type.
:paramtype type: str
"""
super().__init__(**kwargs)
self.name = name
self.type = type
|
class CheckNameAvailabilityRequest(_serialization.Model):
'''The check availability request body.
:ivar name: The name of the resource for which availability needs to be checked.
:vartype name: str
:ivar type: The resource type.
:vartype type: str
'''
def __init__(self, *, name: Optional[str] = None, type: Optional[str] = None, **kwargs: Any) -> None:
'''
:keyword name: The name of the resource for which availability needs to be checked.
:paramtype name: str
:keyword type: The resource type.
:paramtype type: str
'''
pass
| 2 | 2 | 10 | 0 | 4 | 6 | 1 | 1.33 | 1 | 3 | 0 | 0 | 1 | 2 | 1 | 16 | 24 | 3 | 9 | 5 | 7 | 12 | 6 | 5 | 4 | 1 | 2 | 0 | 1 |
10,822 |
Azure/azure-cli-extensions
|
src/cosmosdb-preview/azext_cosmosdb_preview/vendored_sdks/azure_mgmt_mongocluster/models/_models_py3.py
|
azext_cosmosdb_preview.vendored_sdks.azure_mgmt_mongocluster.models._models_py3.CheckNameAvailabilityResponse
|
class CheckNameAvailabilityResponse(_serialization.Model):
"""The check availability result.
:ivar name_available: Indicates if the resource name is available.
:vartype name_available: bool
:ivar reason: The reason why the given name is not available. Known values are: "Invalid" and
"AlreadyExists".
:vartype reason: str or ~azure.mgmt.mongocluster.models.CheckNameAvailabilityReason
:ivar message: Detailed reason why the given name is available.
:vartype message: str
"""
_attribute_map = {
"name_available": {"key": "nameAvailable", "type": "bool"},
"reason": {"key": "reason", "type": "str"},
"message": {"key": "message", "type": "str"},
}
def __init__(
self,
*,
name_available: Optional[bool] = None,
reason: Optional[Union[str, "_models.CheckNameAvailabilityReason"]] = None,
message: Optional[str] = None,
**kwargs: Any
) -> None:
"""
:keyword name_available: Indicates if the resource name is available.
:paramtype name_available: bool
:keyword reason: The reason why the given name is not available. Known values are: "Invalid"
and "AlreadyExists".
:paramtype reason: str or ~azure.mgmt.mongocluster.models.CheckNameAvailabilityReason
:keyword message: Detailed reason why the given name is available.
:paramtype message: str
"""
super().__init__(**kwargs)
self.name_available = name_available
self.reason = reason
self.message = message
|
class CheckNameAvailabilityResponse(_serialization.Model):
'''The check availability result.
:ivar name_available: Indicates if the resource name is available.
:vartype name_available: bool
:ivar reason: The reason why the given name is not available. Known values are: "Invalid" and
"AlreadyExists".
:vartype reason: str or ~azure.mgmt.mongocluster.models.CheckNameAvailabilityReason
:ivar message: Detailed reason why the given name is available.
:vartype message: str
'''
def __init__(
self,
*,
name_available: Optional[bool] = None,
reason: Optional[Union[str, "_models.CheckNameAvailabilityReason"]] = None,
message: Optional[str] = None,
**kwargs: Any
) -> None:
'''
:keyword name_available: Indicates if the resource name is available.
:paramtype name_available: bool
:keyword reason: The reason why the given name is not available. Known values are: "Invalid"
and "AlreadyExists".
:paramtype reason: str or ~azure.mgmt.mongocluster.models.CheckNameAvailabilityReason
:keyword message: Detailed reason why the given name is available.
:paramtype message: str
'''
pass
| 2 | 2 | 21 | 0 | 12 | 9 | 1 | 1 | 1 | 4 | 0 | 0 | 1 | 3 | 1 | 16 | 39 | 3 | 18 | 13 | 9 | 18 | 7 | 6 | 5 | 1 | 2 | 0 | 1 |
10,823 |
Azure/azure-cli-extensions
|
src/cosmosdb-preview/azext_cosmosdb_preview/vendored_sdks/azure_mgmt_mongocluster/models/_models_py3.py
|
azext_cosmosdb_preview.vendored_sdks.azure_mgmt_mongocluster.models._models_py3.ConnectionString
|
class ConnectionString(_serialization.Model):
"""Connection string for the mongo cluster.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar connection_string: Value of the connection string.
:vartype connection_string: str
:ivar description: Description of the connection string.
:vartype description: str
"""
_validation = {
"connection_string": {"readonly": True},
"description": {"readonly": True},
}
_attribute_map = {
"connection_string": {"key": "connectionString", "type": "str"},
"description": {"key": "description", "type": "str"},
}
def __init__(self, **kwargs: Any) -> None:
""" """
super().__init__(**kwargs)
self.connection_string = None
self.description = None
|
class ConnectionString(_serialization.Model):
'''Connection string for the mongo cluster.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar connection_string: Value of the connection string.
:vartype connection_string: str
:ivar description: Description of the connection string.
:vartype description: str
'''
def __init__(self, **kwargs: Any) -> None:
''' '''
pass
| 2 | 2 | 5 | 0 | 4 | 1 | 1 | 0.62 | 1 | 2 | 0 | 0 | 1 | 2 | 1 | 16 | 26 | 5 | 13 | 6 | 11 | 8 | 7 | 6 | 5 | 1 | 2 | 0 | 1 |
10,824 |
Azure/azure-cli-extensions
|
src/cosmosdb-preview/azext_cosmosdb_preview/vendored_sdks/azure_mgmt_mongocluster/models/_models_py3.py
|
azext_cosmosdb_preview.vendored_sdks.azure_mgmt_mongocluster.models._models_py3.ErrorDetail
|
class ErrorDetail(_serialization.Model):
"""The error detail.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar code: The error code.
:vartype code: str
:ivar message: The error message.
:vartype message: str
:ivar target: The error target.
:vartype target: str
:ivar details: The error details.
:vartype details: list[~azure.mgmt.mongocluster.models.ErrorDetail]
:ivar additional_info: The error additional info.
:vartype additional_info: list[~azure.mgmt.mongocluster.models.ErrorAdditionalInfo]
"""
_validation = {
"code": {"readonly": True},
"message": {"readonly": True},
"target": {"readonly": True},
"details": {"readonly": True},
"additional_info": {"readonly": True},
}
_attribute_map = {
"code": {"key": "code", "type": "str"},
"message": {"key": "message", "type": "str"},
"target": {"key": "target", "type": "str"},
"details": {"key": "details", "type": "[ErrorDetail]"},
"additional_info": {"key": "additionalInfo", "type": "[ErrorAdditionalInfo]"},
}
def __init__(self, **kwargs: Any) -> None:
""" """
super().__init__(**kwargs)
self.code = None
self.message = None
self.target = None
self.details = None
self.additional_info = None
|
class ErrorDetail(_serialization.Model):
'''The error detail.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar code: The error code.
:vartype code: str
:ivar message: The error message.
:vartype message: str
:ivar target: The error target.
:vartype target: str
:ivar details: The error details.
:vartype details: list[~azure.mgmt.mongocluster.models.ErrorDetail]
:ivar additional_info: The error additional info.
:vartype additional_info: list[~azure.mgmt.mongocluster.models.ErrorAdditionalInfo]
'''
def __init__(self, **kwargs: Any) -> None:
''' '''
pass
| 2 | 2 | 8 | 0 | 7 | 1 | 1 | 0.64 | 1 | 2 | 0 | 0 | 1 | 5 | 1 | 16 | 41 | 5 | 22 | 9 | 20 | 14 | 10 | 9 | 8 | 1 | 2 | 0 | 1 |
10,825 |
Azure/azure-cli-extensions
|
src/cosmosdb-preview/azext_cosmosdb_preview/vendored_sdks/azure_mgmt_mongocluster/models/_models_py3.py
|
azext_cosmosdb_preview.vendored_sdks.azure_mgmt_mongocluster.models._models_py3.ErrorResponse
|
class ErrorResponse(_serialization.Model):
"""Common error response for all Azure Resource Manager APIs to return error details for failed
operations. (This also follows the OData error response format.).
:ivar error: The error object.
:vartype error: ~azure.mgmt.mongocluster.models.ErrorDetail
"""
_attribute_map = {
"error": {"key": "error", "type": "ErrorDetail"},
}
def __init__(self, *, error: Optional["_models.ErrorDetail"] = None, **kwargs: Any) -> None:
"""
:keyword error: The error object.
:paramtype error: ~azure.mgmt.mongocluster.models.ErrorDetail
"""
super().__init__(**kwargs)
self.error = error
|
class ErrorResponse(_serialization.Model):
'''Common error response for all Azure Resource Manager APIs to return error details for failed
operations. (This also follows the OData error response format.).
:ivar error: The error object.
:vartype error: ~azure.mgmt.mongocluster.models.ErrorDetail
'''
def __init__(self, *, error: Optional["_models.ErrorDetail"] = None, **kwargs: Any) -> None:
'''
:keyword error: The error object.
:paramtype error: ~azure.mgmt.mongocluster.models.ErrorDetail
'''
pass
| 2 | 2 | 7 | 0 | 3 | 4 | 1 | 1.29 | 1 | 2 | 0 | 0 | 1 | 1 | 1 | 16 | 19 | 3 | 7 | 4 | 5 | 9 | 5 | 4 | 3 | 1 | 2 | 0 | 1 |
10,826 |
Azure/azure-cli-extensions
|
src/cosmosdb-preview/azext_cosmosdb_preview/vendored_sdks/azure_mgmt_mongocluster/aio/operations/_mongo_clusters_operations.py
|
azext_cosmosdb_preview.vendored_sdks.azure_mgmt_mongocluster.aio.operations._mongo_clusters_operations.MongoClustersOperations
|
class MongoClustersOperations:
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.mgmt.mongocluster.aio.MongoClusterMgmtClient`'s
:attr:`mongo_clusters` attribute.
"""
models = _models
def __init__(self, *args, **kwargs) -> None:
input_args = list(args)
self._client = input_args.pop(0) if input_args else kwargs.pop("client")
self._config = input_args.pop(0) if input_args else kwargs.pop("config")
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
@overload
async def check_name_availability(
self,
location: str,
body: _models.CheckNameAvailabilityRequest,
*,
content_type: str = "application/json",
**kwargs: Any
) -> _models.CheckNameAvailabilityResponse:
"""Check if mongo cluster name is available for use.
:param location: The name of the Azure region. Required.
:type location: str
:param body: The CheckAvailability request. Required.
:type body: ~azure.mgmt.mongocluster.models.CheckNameAvailabilityRequest
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
:return: CheckNameAvailabilityResponse or the result of cls(response)
:rtype: ~azure.mgmt.mongocluster.models.CheckNameAvailabilityResponse
:raises ~azure.core.exceptions.HttpResponseError:
"""
@overload
async def check_name_availability(
self, location: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any
) -> _models.CheckNameAvailabilityResponse:
"""Check if mongo cluster name is available for use.
:param location: The name of the Azure region. Required.
:type location: str
:param body: The CheckAvailability request. Required.
:type body: IO[bytes]
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
:return: CheckNameAvailabilityResponse or the result of cls(response)
:rtype: ~azure.mgmt.mongocluster.models.CheckNameAvailabilityResponse
:raises ~azure.core.exceptions.HttpResponseError:
"""
@distributed_trace_async
async def check_name_availability(
self, location: str, body: Union[_models.CheckNameAvailabilityRequest, IO[bytes]], **kwargs: Any
) -> _models.CheckNameAvailabilityResponse:
"""Check if mongo cluster name is available for use.
:param location: The name of the Azure region. Required.
:type location: str
:param body: The CheckAvailability request. Is either a CheckNameAvailabilityRequest type or a
IO[bytes] type. Required.
:type body: ~azure.mgmt.mongocluster.models.CheckNameAvailabilityRequest or IO[bytes]
:return: CheckNameAvailabilityResponse or the result of cls(response)
:rtype: ~azure.mgmt.mongocluster.models.CheckNameAvailabilityResponse
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map: MutableMapping[int, Type[HttpResponseError]] = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
cls: ClsType[_models.CheckNameAvailabilityResponse] = kwargs.pop("cls", None)
content_type = content_type or "application/json"
_json = None
_content = None
if isinstance(body, (IOBase, bytes)):
_content = body
else:
_json = self._serialize.body(body, "CheckNameAvailabilityRequest")
_request = build_check_name_availability_request(
location=location,
subscription_id=self._config.subscription_id,
api_version=api_version,
content_type=content_type,
json=_json,
content=_content,
headers=_headers,
params=_params,
)
_request = _convert_request(_request)
_request.url = self._client.format_url(_request.url)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
_request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize("CheckNameAvailabilityResponse", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {}) # type: ignore
return deserialized # type: ignore
@distributed_trace
def list(self, **kwargs: Any) -> AsyncIterable["_models.MongoCluster"]:
"""List all the mongo clusters in a given subscription.
:return: An iterator like instance of either MongoCluster or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.mongocluster.models.MongoCluster]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
cls: ClsType[_models.MongoClusterListResult] = kwargs.pop("cls", None)
error_map: MutableMapping[int, Type[HttpResponseError]] = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
_request = build_list_request(
subscription_id=self._config.subscription_id,
api_version=api_version,
headers=_headers,
params=_params,
)
_request = _convert_request(_request)
_request.url = self._client.format_url(_request.url)
else:
# make call to next link with the client's api-version
_parsed_next_link = urllib.parse.urlparse(next_link)
_next_request_params = case_insensitive_dict(
{
key: [urllib.parse.quote(v) for v in value]
for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
}
)
_next_request_params["api-version"] = self._config.api_version
_request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
_request = _convert_request(_request)
_request.url = self._client.format_url(_request.url)
_request.method = "GET"
return _request
async def extract_data(pipeline_response):
deserialized = self._deserialize("MongoClusterListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem) # type: ignore
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
_request = prepare_request(next_link)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
_request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(get_next, extract_data)
@distributed_trace
def list_by_resource_group(self, resource_group_name: str, **kwargs: Any) -> AsyncIterable["_models.MongoCluster"]:
"""List all the mongo clusters in a given resource group.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:return: An iterator like instance of either MongoCluster or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.mongocluster.models.MongoCluster]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
cls: ClsType[_models.MongoClusterListResult] = kwargs.pop("cls", None)
error_map: MutableMapping[int, Type[HttpResponseError]] = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
_request = build_list_by_resource_group_request(
resource_group_name=resource_group_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
headers=_headers,
params=_params,
)
_request = _convert_request(_request)
_request.url = self._client.format_url(_request.url)
else:
# make call to next link with the client's api-version
_parsed_next_link = urllib.parse.urlparse(next_link)
_next_request_params = case_insensitive_dict(
{
key: [urllib.parse.quote(v) for v in value]
for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
}
)
_next_request_params["api-version"] = self._config.api_version
_request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
_request = _convert_request(_request)
_request.url = self._client.format_url(_request.url)
_request.method = "GET"
return _request
async def extract_data(pipeline_response):
deserialized = self._deserialize("MongoClusterListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem) # type: ignore
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
_request = prepare_request(next_link)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
_request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(get_next, extract_data)
@distributed_trace_async
async def get(self, resource_group_name: str, mongo_cluster_name: str, **kwargs: Any) -> _models.MongoCluster:
"""Gets information about a mongo cluster.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param mongo_cluster_name: The name of the mongo cluster. Required.
:type mongo_cluster_name: str
:return: MongoCluster or the result of cls(response)
:rtype: ~azure.mgmt.mongocluster.models.MongoCluster
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map: MutableMapping[int, Type[HttpResponseError]] = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
cls: ClsType[_models.MongoCluster] = kwargs.pop("cls", None)
_request = build_get_request(
resource_group_name=resource_group_name,
mongo_cluster_name=mongo_cluster_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
headers=_headers,
params=_params,
)
_request = _convert_request(_request)
_request.url = self._client.format_url(_request.url)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
_request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize("MongoCluster", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {}) # type: ignore
return deserialized # type: ignore
async def _create_or_update_initial(
self,
resource_group_name: str,
mongo_cluster_name: str,
resource: Union[_models.MongoCluster, IO[bytes]],
**kwargs: Any
) -> _models.MongoCluster:
error_map: MutableMapping[int, Type[HttpResponseError]] = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
cls: ClsType[_models.MongoCluster] = kwargs.pop("cls", None)
content_type = content_type or "application/json"
_json = None
_content = None
if isinstance(resource, (IOBase, bytes)):
_content = resource
else:
_json = self._serialize.body(resource, "MongoCluster")
_request = build_create_or_update_request(
resource_group_name=resource_group_name,
mongo_cluster_name=mongo_cluster_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
content_type=content_type,
json=_json,
content=_content,
headers=_headers,
params=_params,
)
_request = _convert_request(_request)
_request.url = self._client.format_url(_request.url)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
_request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
response_headers = {}
if response.status_code == 200:
deserialized = self._deserialize("MongoCluster", pipeline_response)
if response.status_code == 201:
response_headers["Retry-After"] = self._deserialize("int", response.headers.get("Retry-After"))
deserialized = self._deserialize("MongoCluster", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, response_headers) # type: ignore
return deserialized # type: ignore
@overload
async def begin_create_or_update(
self,
resource_group_name: str,
mongo_cluster_name: str,
resource: _models.MongoCluster,
*,
content_type: str = "application/json",
**kwargs: Any
) -> AsyncLROPoller[_models.MongoCluster]:
"""Create or update a mongo cluster. Update overwrites all properties for the resource. To only
modify some of the properties, use PATCH.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param mongo_cluster_name: The name of the mongo cluster. Required.
:type mongo_cluster_name: str
:param resource: Resource create parameters. Required.
:type resource: ~azure.mgmt.mongocluster.models.MongoCluster
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
:return: An instance of AsyncLROPoller that returns either MongoCluster or the result of
cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.mongocluster.models.MongoCluster]
:raises ~azure.core.exceptions.HttpResponseError:
"""
@overload
async def begin_create_or_update(
self,
resource_group_name: str,
mongo_cluster_name: str,
resource: IO[bytes],
*,
content_type: str = "application/json",
**kwargs: Any
) -> AsyncLROPoller[_models.MongoCluster]:
"""Create or update a mongo cluster. Update overwrites all properties for the resource. To only
modify some of the properties, use PATCH.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param mongo_cluster_name: The name of the mongo cluster. Required.
:type mongo_cluster_name: str
:param resource: Resource create parameters. Required.
:type resource: IO[bytes]
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
:return: An instance of AsyncLROPoller that returns either MongoCluster or the result of
cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.mongocluster.models.MongoCluster]
:raises ~azure.core.exceptions.HttpResponseError:
"""
@distributed_trace_async
async def begin_create_or_update(
self,
resource_group_name: str,
mongo_cluster_name: str,
resource: Union[_models.MongoCluster, IO[bytes]],
**kwargs: Any
) -> AsyncLROPoller[_models.MongoCluster]:
"""Create or update a mongo cluster. Update overwrites all properties for the resource. To only
modify some of the properties, use PATCH.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param mongo_cluster_name: The name of the mongo cluster. Required.
:type mongo_cluster_name: str
:param resource: Resource create parameters. Is either a MongoCluster type or a IO[bytes] type.
Required.
:type resource: ~azure.mgmt.mongocluster.models.MongoCluster or IO[bytes]
:return: An instance of AsyncLROPoller that returns either MongoCluster or the result of
cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.mongocluster.models.MongoCluster]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
cls: ClsType[_models.MongoCluster] = kwargs.pop("cls", None)
polling: Union[bool, AsyncPollingMethod] = kwargs.pop("polling", True)
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token: Optional[str] = kwargs.pop("continuation_token", None)
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
mongo_cluster_name=mongo_cluster_name,
resource=resource,
api_version=api_version,
content_type=content_type,
cls=lambda x, y, z: x,
headers=_headers,
params=_params,
**kwargs
)
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize("MongoCluster", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {}) # type: ignore
return deserialized
if polling is True:
polling_method: AsyncPollingMethod = cast(
AsyncPollingMethod,
AsyncARMPolling(lro_delay, lro_options={"final-state-via": "azure-async-operation"}, **kwargs),
)
elif polling is False:
polling_method = cast(AsyncPollingMethod, AsyncNoPolling())
else:
polling_method = polling
if cont_token:
return AsyncLROPoller[_models.MongoCluster].from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
return AsyncLROPoller[_models.MongoCluster](
self._client, raw_result, get_long_running_output, polling_method # type: ignore
)
async def _update_initial(
self,
resource_group_name: str,
mongo_cluster_name: str,
properties: Union[_models.MongoClusterUpdate, IO[bytes]],
**kwargs: Any
) -> Optional[_models.MongoCluster]:
error_map: MutableMapping[int, Type[HttpResponseError]] = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
cls: ClsType[Optional[_models.MongoCluster]] = kwargs.pop("cls", None)
content_type = content_type or "application/json"
_json = None
_content = None
if isinstance(properties, (IOBase, bytes)):
_content = properties
else:
_json = self._serialize.body(properties, "MongoClusterUpdate")
_request = build_update_request(
resource_group_name=resource_group_name,
mongo_cluster_name=mongo_cluster_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
content_type=content_type,
json=_json,
content=_content,
headers=_headers,
params=_params,
)
_request = _convert_request(_request)
_request.url = self._client.format_url(_request.url)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
_request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = None
response_headers = {}
if response.status_code == 200:
deserialized = self._deserialize("MongoCluster", pipeline_response)
if response.status_code == 202:
response_headers["Location"] = self._deserialize("str", response.headers.get("Location"))
response_headers["Retry-After"] = self._deserialize("int", response.headers.get("Retry-After"))
if cls:
return cls(pipeline_response, deserialized, response_headers) # type: ignore
return deserialized # type: ignore
@overload
async def begin_update(
self,
resource_group_name: str,
mongo_cluster_name: str,
properties: _models.MongoClusterUpdate,
*,
content_type: str = "application/json",
**kwargs: Any
) -> AsyncLROPoller[_models.MongoCluster]:
"""Updates an existing mongo cluster. The request body can contain one to many of the properties
present in the normal mongo cluster definition.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param mongo_cluster_name: The name of the mongo cluster. Required.
:type mongo_cluster_name: str
:param properties: The resource properties to be updated. Required.
:type properties: ~azure.mgmt.mongocluster.models.MongoClusterUpdate
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
:return: An instance of AsyncLROPoller that returns either MongoCluster or the result of
cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.mongocluster.models.MongoCluster]
:raises ~azure.core.exceptions.HttpResponseError:
"""
@overload
async def begin_update(
self,
resource_group_name: str,
mongo_cluster_name: str,
properties: IO[bytes],
*,
content_type: str = "application/json",
**kwargs: Any
) -> AsyncLROPoller[_models.MongoCluster]:
"""Updates an existing mongo cluster. The request body can contain one to many of the properties
present in the normal mongo cluster definition.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param mongo_cluster_name: The name of the mongo cluster. Required.
:type mongo_cluster_name: str
:param properties: The resource properties to be updated. Required.
:type properties: IO[bytes]
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
:return: An instance of AsyncLROPoller that returns either MongoCluster or the result of
cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.mongocluster.models.MongoCluster]
:raises ~azure.core.exceptions.HttpResponseError:
"""
@distributed_trace_async
async def begin_update(
self,
resource_group_name: str,
mongo_cluster_name: str,
properties: Union[_models.MongoClusterUpdate, IO[bytes]],
**kwargs: Any
) -> AsyncLROPoller[_models.MongoCluster]:
"""Updates an existing mongo cluster. The request body can contain one to many of the properties
present in the normal mongo cluster definition.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param mongo_cluster_name: The name of the mongo cluster. Required.
:type mongo_cluster_name: str
:param properties: The resource properties to be updated. Is either a MongoClusterUpdate type
or a IO[bytes] type. Required.
:type properties: ~azure.mgmt.mongocluster.models.MongoClusterUpdate or IO[bytes]
:return: An instance of AsyncLROPoller that returns either MongoCluster or the result of
cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.mongocluster.models.MongoCluster]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
cls: ClsType[_models.MongoCluster] = kwargs.pop("cls", None)
polling: Union[bool, AsyncPollingMethod] = kwargs.pop("polling", True)
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token: Optional[str] = kwargs.pop("continuation_token", None)
if cont_token is None:
raw_result = await self._update_initial(
resource_group_name=resource_group_name,
mongo_cluster_name=mongo_cluster_name,
properties=properties,
api_version=api_version,
content_type=content_type,
cls=lambda x, y, z: x,
headers=_headers,
params=_params,
**kwargs
)
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize("MongoCluster", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {}) # type: ignore
return deserialized
if polling is True:
polling_method: AsyncPollingMethod = cast(
AsyncPollingMethod, AsyncARMPolling(lro_delay, lro_options={"final-state-via": "location"}, **kwargs)
)
elif polling is False:
polling_method = cast(AsyncPollingMethod, AsyncNoPolling())
else:
polling_method = polling
if cont_token:
return AsyncLROPoller[_models.MongoCluster].from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
return AsyncLROPoller[_models.MongoCluster](
self._client, raw_result, get_long_running_output, polling_method # type: ignore
)
async def _delete_initial( # pylint: disable=inconsistent-return-statements
self, resource_group_name: str, mongo_cluster_name: str, **kwargs: Any
) -> None:
error_map: MutableMapping[int, Type[HttpResponseError]] = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
cls: ClsType[None] = kwargs.pop("cls", None)
_request = build_delete_request(
resource_group_name=resource_group_name,
mongo_cluster_name=mongo_cluster_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
headers=_headers,
params=_params,
)
_request = _convert_request(_request)
_request.url = self._client.format_url(_request.url)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
_request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
response_headers = {}
if response.status_code == 202:
response_headers["Location"] = self._deserialize("str", response.headers.get("Location"))
response_headers["Retry-After"] = self._deserialize("int", response.headers.get("Retry-After"))
if cls:
return cls(pipeline_response, None, response_headers) # type: ignore
@distributed_trace_async
async def begin_delete(
self, resource_group_name: str, mongo_cluster_name: str, **kwargs: Any
) -> AsyncLROPoller[None]:
"""Deletes a mongo cluster.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param mongo_cluster_name: The name of the mongo cluster. Required.
:type mongo_cluster_name: str
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
cls: ClsType[None] = kwargs.pop("cls", None)
polling: Union[bool, AsyncPollingMethod] = kwargs.pop("polling", True)
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token: Optional[str] = kwargs.pop("continuation_token", None)
if cont_token is None:
raw_result = await self._delete_initial( # type: ignore
resource_group_name=resource_group_name,
mongo_cluster_name=mongo_cluster_name,
api_version=api_version,
cls=lambda x, y, z: x,
headers=_headers,
params=_params,
**kwargs
)
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements
if cls:
return cls(pipeline_response, None, {}) # type: ignore
if polling is True:
polling_method: AsyncPollingMethod = cast(
AsyncPollingMethod, AsyncARMPolling(lro_delay, lro_options={"final-state-via": "location"}, **kwargs)
)
elif polling is False:
polling_method = cast(AsyncPollingMethod, AsyncNoPolling())
else:
polling_method = polling
if cont_token:
return AsyncLROPoller[None].from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
return AsyncLROPoller[None](self._client, raw_result, get_long_running_output, polling_method) # type: ignore
@distributed_trace_async
async def list_connection_strings(
self, resource_group_name: str, mongo_cluster_name: str, **kwargs: Any
) -> _models.ListConnectionStringsResult:
"""List mongo cluster connection strings. This includes the default connection string using
SCRAM-SHA-256, as well as other connection strings supported by the cluster.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param mongo_cluster_name: The name of the mongo cluster. Required.
:type mongo_cluster_name: str
:return: ListConnectionStringsResult or the result of cls(response)
:rtype: ~azure.mgmt.mongocluster.models.ListConnectionStringsResult
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map: MutableMapping[int, Type[HttpResponseError]] = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
cls: ClsType[_models.ListConnectionStringsResult] = kwargs.pop("cls", None)
_request = build_list_connection_strings_request(
resource_group_name=resource_group_name,
mongo_cluster_name=mongo_cluster_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
headers=_headers,
params=_params,
)
_request = _convert_request(_request)
_request.url = self._client.format_url(_request.url)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
_request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize("ListConnectionStringsResult", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {}) # type: ignore
return deserialized
|
class MongoClustersOperations:
'''
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.mgmt.mongocluster.aio.MongoClusterMgmtClient`'s
:attr:`mongo_clusters` attribute.
'''
def __init__(self, *args, **kwargs) -> None:
pass
@overload
async def check_name_availability(
self,
location: str,
body: _models.CheckNameAvailabilityRequest,
*,
content_type: str = "application/json",
**kwargs: Any
) -> _models.CheckNameAvailabilityResponse:
'''Check if mongo cluster name is available for use.
:param location: The name of the Azure region. Required.
:type location: str
:param body: The CheckAvailability request. Required.
:type body: ~azure.mgmt.mongocluster.models.CheckNameAvailabilityRequest
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
:return: CheckNameAvailabilityResponse or the result of cls(response)
:rtype: ~azure.mgmt.mongocluster.models.CheckNameAvailabilityResponse
:raises ~azure.core.exceptions.HttpResponseError:
'''
pass
@overload
async def check_name_availability(
self,
location: str,
body: _models.CheckNameAvailabilityRequest,
*,
content_type: str = "application/json",
**kwargs: Any
) -> _models.CheckNameAvailabilityResponse:
'''Check if mongo cluster name is available for use.
:param location: The name of the Azure region. Required.
:type location: str
:param body: The CheckAvailability request. Required.
:type body: IO[bytes]
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
:return: CheckNameAvailabilityResponse or the result of cls(response)
:rtype: ~azure.mgmt.mongocluster.models.CheckNameAvailabilityResponse
:raises ~azure.core.exceptions.HttpResponseError:
'''
pass
@distributed_trace_async
async def check_name_availability(
self,
location: str,
body: _models.CheckNameAvailabilityRequest,
*,
content_type: str = "application/json",
**kwargs: Any
) -> _models.CheckNameAvailabilityResponse:
'''Check if mongo cluster name is available for use.
:param location: The name of the Azure region. Required.
:type location: str
:param body: The CheckAvailability request. Is either a CheckNameAvailabilityRequest type or a
IO[bytes] type. Required.
:type body: ~azure.mgmt.mongocluster.models.CheckNameAvailabilityRequest or IO[bytes]
:return: CheckNameAvailabilityResponse or the result of cls(response)
:rtype: ~azure.mgmt.mongocluster.models.CheckNameAvailabilityResponse
:raises ~azure.core.exceptions.HttpResponseError:
'''
pass
@distributed_trace
def list(self, **kwargs: Any) -> AsyncIterable["_models.MongoCluster"]:
'''List all the mongo clusters in a given subscription.
:return: An iterator like instance of either MongoCluster or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.mongocluster.models.MongoCluster]
:raises ~azure.core.exceptions.HttpResponseError:
'''
pass
def prepare_request(next_link=None):
pass
async def extract_data(pipeline_response):
pass
async def get_next(next_link=None):
pass
@distributed_trace
def list_by_resource_group(self, resource_group_name: str, **kwargs: Any) -> AsyncIterable["_models.MongoCluster"]:
'''List all the mongo clusters in a given resource group.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:return: An iterator like instance of either MongoCluster or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.mongocluster.models.MongoCluster]
:raises ~azure.core.exceptions.HttpResponseError:
'''
pass
def prepare_request(next_link=None):
pass
async def extract_data(pipeline_response):
pass
async def get_next(next_link=None):
pass
@distributed_trace_async
async def get_next(next_link=None):
'''Gets information about a mongo cluster.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param mongo_cluster_name: The name of the mongo cluster. Required.
:type mongo_cluster_name: str
:return: MongoCluster or the result of cls(response)
:rtype: ~azure.mgmt.mongocluster.models.MongoCluster
:raises ~azure.core.exceptions.HttpResponseError:
'''
pass
async def _create_or_update_initial(
self,
resource_group_name: str,
mongo_cluster_name: str,
resource: Union[_models.MongoCluster, IO[bytes]],
**kwargs: Any
) -> _models.MongoCluster:
pass
@overload
async def begin_create_or_update(
self,
resource_group_name: str,
mongo_cluster_name: str,
resource: _models.MongoCluster,
*,
content_type: str = "application/json",
**kwargs: Any
) -> AsyncLROPoller[_models.MongoCluster]:
'''Create or update a mongo cluster. Update overwrites all properties for the resource. To only
modify some of the properties, use PATCH.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param mongo_cluster_name: The name of the mongo cluster. Required.
:type mongo_cluster_name: str
:param resource: Resource create parameters. Required.
:type resource: ~azure.mgmt.mongocluster.models.MongoCluster
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
:return: An instance of AsyncLROPoller that returns either MongoCluster or the result of
cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.mongocluster.models.MongoCluster]
:raises ~azure.core.exceptions.HttpResponseError:
'''
pass
@overload
async def begin_create_or_update(
self,
resource_group_name: str,
mongo_cluster_name: str,
resource: _models.MongoCluster,
*,
content_type: str = "application/json",
**kwargs: Any
) -> AsyncLROPoller[_models.MongoCluster]:
'''Create or update a mongo cluster. Update overwrites all properties for the resource. To only
modify some of the properties, use PATCH.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param mongo_cluster_name: The name of the mongo cluster. Required.
:type mongo_cluster_name: str
:param resource: Resource create parameters. Required.
:type resource: IO[bytes]
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
:return: An instance of AsyncLROPoller that returns either MongoCluster or the result of
cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.mongocluster.models.MongoCluster]
:raises ~azure.core.exceptions.HttpResponseError:
'''
pass
@distributed_trace_async
async def begin_create_or_update(
self,
resource_group_name: str,
mongo_cluster_name: str,
resource: _models.MongoCluster,
*,
content_type: str = "application/json",
**kwargs: Any
) -> AsyncLROPoller[_models.MongoCluster]:
'''Create or update a mongo cluster. Update overwrites all properties for the resource. To only
modify some of the properties, use PATCH.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param mongo_cluster_name: The name of the mongo cluster. Required.
:type mongo_cluster_name: str
:param resource: Resource create parameters. Is either a MongoCluster type or a IO[bytes] type.
Required.
:type resource: ~azure.mgmt.mongocluster.models.MongoCluster or IO[bytes]
:return: An instance of AsyncLROPoller that returns either MongoCluster or the result of
cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.mongocluster.models.MongoCluster]
:raises ~azure.core.exceptions.HttpResponseError:
'''
pass
def get_long_running_output(pipeline_response):
pass
async def _update_initial(
self,
resource_group_name: str,
mongo_cluster_name: str,
properties: Union[_models.MongoClusterUpdate, IO[bytes]],
**kwargs: Any
) -> Optional[_models.MongoCluster]:
pass
@overload
async def begin_update(
self,
resource_group_name: str,
mongo_cluster_name: str,
properties: _models.MongoClusterUpdate,
*,
content_type: str = "application/json",
**kwargs: Any
) -> AsyncLROPoller[_models.MongoCluster]:
'''Updates an existing mongo cluster. The request body can contain one to many of the properties
present in the normal mongo cluster definition.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param mongo_cluster_name: The name of the mongo cluster. Required.
:type mongo_cluster_name: str
:param properties: The resource properties to be updated. Required.
:type properties: ~azure.mgmt.mongocluster.models.MongoClusterUpdate
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
:return: An instance of AsyncLROPoller that returns either MongoCluster or the result of
cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.mongocluster.models.MongoCluster]
:raises ~azure.core.exceptions.HttpResponseError:
'''
pass
@overload
async def begin_update(
self,
resource_group_name: str,
mongo_cluster_name: str,
properties: _models.MongoClusterUpdate,
*,
content_type: str = "application/json",
**kwargs: Any
) -> AsyncLROPoller[_models.MongoCluster]:
'''Updates an existing mongo cluster. The request body can contain one to many of the properties
present in the normal mongo cluster definition.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param mongo_cluster_name: The name of the mongo cluster. Required.
:type mongo_cluster_name: str
:param properties: The resource properties to be updated. Required.
:type properties: IO[bytes]
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
:return: An instance of AsyncLROPoller that returns either MongoCluster or the result of
cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.mongocluster.models.MongoCluster]
:raises ~azure.core.exceptions.HttpResponseError:
'''
pass
@distributed_trace_async
async def begin_update(
self,
resource_group_name: str,
mongo_cluster_name: str,
properties: _models.MongoClusterUpdate,
*,
content_type: str = "application/json",
**kwargs: Any
) -> AsyncLROPoller[_models.MongoCluster]:
'''Updates an existing mongo cluster. The request body can contain one to many of the properties
present in the normal mongo cluster definition.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param mongo_cluster_name: The name of the mongo cluster. Required.
:type mongo_cluster_name: str
:param properties: The resource properties to be updated. Is either a MongoClusterUpdate type
or a IO[bytes] type. Required.
:type properties: ~azure.mgmt.mongocluster.models.MongoClusterUpdate or IO[bytes]
:return: An instance of AsyncLROPoller that returns either MongoCluster or the result of
cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.mongocluster.models.MongoCluster]
:raises ~azure.core.exceptions.HttpResponseError:
'''
pass
def get_long_running_output(pipeline_response):
pass
async def _delete_initial( # pylint: disable=inconsistent-return-statements
self, resource_group_name: str, mongo_cluster_name: str, **kwargs: Any
) -> None:
pass
@distributed_trace_async
async def begin_delete(
self, resource_group_name: str, mongo_cluster_name: str, **kwargs: Any
) -> AsyncLROPoller[None]:
'''Deletes a mongo cluster.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param mongo_cluster_name: The name of the mongo cluster. Required.
:type mongo_cluster_name: str
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
'''
pass
def get_long_running_output(pipeline_response):
pass
@distributed_trace_async
async def list_connection_strings(
self, resource_group_name: str, mongo_cluster_name: str, **kwargs: Any
) -> _models.ListConnectionStringsResult:
'''List mongo cluster connection strings. This includes the default connection string using
SCRAM-SHA-256, as well as other connection strings supported by the cluster.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param mongo_cluster_name: The name of the mongo cluster. Required.
:type mongo_cluster_name: str
:return: ListConnectionStringsResult or the result of cls(response)
:rtype: ~azure.mgmt.mongocluster.models.ListConnectionStringsResult
:raises ~azure.core.exceptions.HttpResponseError:
'''
pass
| 42 | 15 | 36 | 4 | 25 | 8 | 3 | 0.36 | 0 | 9 | 0 | 0 | 18 | 4 | 18 | 18 | 909 | 124 | 600 | 259 | 485 | 215 | 317 | 172 | 289 | 6 | 0 | 1 | 72 |
10,827 |
Azure/azure-cli-extensions
|
src/cosmosdb-preview/azext_cosmosdb_preview/vendored_sdks/azure_mgmt_mongocluster/models/_models_py3.py
|
azext_cosmosdb_preview.vendored_sdks.azure_mgmt_mongocluster.models._models_py3.FirewallRule
|
class FirewallRule(ProxyResource):
"""Represents a mongo cluster firewall rule.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Fully qualified resource ID for the resource. E.g.
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}". # pylint: disable=line-too-long
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
"Microsoft.Storage/storageAccounts".
:vartype type: str
:ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy
information.
:vartype system_data: ~azure.mgmt.mongocluster.models.SystemData
:ivar properties: The resource-specific properties for this resource.
:vartype properties: ~azure.mgmt.mongocluster.models.FirewallRuleProperties
"""
_validation = {
"id": {"readonly": True},
"name": {"readonly": True},
"type": {"readonly": True},
"system_data": {"readonly": True},
}
_attribute_map = {
"id": {"key": "id", "type": "str"},
"name": {"key": "name", "type": "str"},
"type": {"key": "type", "type": "str"},
"system_data": {"key": "systemData", "type": "SystemData"},
"properties": {"key": "properties", "type": "FirewallRuleProperties"},
}
def __init__(self, *, properties: Optional["_models.FirewallRuleProperties"] = None, **kwargs: Any) -> None:
"""
:keyword properties: The resource-specific properties for this resource.
:paramtype properties: ~azure.mgmt.mongocluster.models.FirewallRuleProperties
"""
super().__init__(**kwargs)
self.properties = properties
|
class FirewallRule(ProxyResource):
'''Represents a mongo cluster firewall rule.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Fully qualified resource ID for the resource. E.g.
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}". # pylint: disable=line-too-long
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
"Microsoft.Storage/storageAccounts".
:vartype type: str
:ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy
information.
:vartype system_data: ~azure.mgmt.mongocluster.models.SystemData
:ivar properties: The resource-specific properties for this resource.
:vartype properties: ~azure.mgmt.mongocluster.models.FirewallRuleProperties
'''
def __init__(self, *, properties: Optional["_models.FirewallRuleProperties"] = None, **kwargs: Any) -> None:
'''
:keyword properties: The resource-specific properties for this resource.
:paramtype properties: ~azure.mgmt.mongocluster.models.FirewallRuleProperties
'''
pass
| 2 | 2 | 7 | 0 | 3 | 4 | 1 | 1.18 | 1 | 2 | 0 | 0 | 1 | 1 | 1 | 17 | 42 | 5 | 17 | 5 | 15 | 20 | 6 | 5 | 4 | 1 | 4 | 0 | 1 |
10,828 |
Azure/azure-cli-extensions
|
src/cosmosdb-preview/azext_cosmosdb_preview/vendored_sdks/azure_mgmt_mongocluster/models/_models_py3.py
|
azext_cosmosdb_preview.vendored_sdks.azure_mgmt_mongocluster.models._models_py3.FirewallRuleProperties
|
class FirewallRuleProperties(_serialization.Model):
"""The properties of a mongo cluster firewall rule.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to server.
:ivar provisioning_state: The provisioning state of the firewall rule. Known values are:
"Succeeded", "Failed", "Canceled", "InProgress", "Updating", and "Dropping".
:vartype provisioning_state: str or ~azure.mgmt.mongocluster.models.ProvisioningState
:ivar start_ip_address: The start IP address of the mongo cluster firewall rule. Must be IPv4
format. Required.
:vartype start_ip_address: str
:ivar end_ip_address: The end IP address of the mongo cluster firewall rule. Must be IPv4
format. Required.
:vartype end_ip_address: str
"""
_validation = {
"provisioning_state": {"readonly": True},
"start_ip_address": {
"required": True,
"pattern": r"^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])$",
},
"end_ip_address": {
"required": True,
"pattern": r"^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])$",
},
}
_attribute_map = {
"provisioning_state": {"key": "provisioningState", "type": "str"},
"start_ip_address": {"key": "startIpAddress", "type": "str"},
"end_ip_address": {"key": "endIpAddress", "type": "str"},
}
def __init__(self, *, start_ip_address: str, end_ip_address: str, **kwargs: Any) -> None:
"""
:keyword start_ip_address: The start IP address of the mongo cluster firewall rule. Must be
IPv4 format. Required.
:paramtype start_ip_address: str
:keyword end_ip_address: The end IP address of the mongo cluster firewall rule. Must be IPv4
format. Required.
:paramtype end_ip_address: str
"""
super().__init__(**kwargs)
self.provisioning_state = None
self.start_ip_address = start_ip_address
self.end_ip_address = end_ip_address
|
class FirewallRuleProperties(_serialization.Model):
'''The properties of a mongo cluster firewall rule.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to server.
:ivar provisioning_state: The provisioning state of the firewall rule. Known values are:
"Succeeded", "Failed", "Canceled", "InProgress", "Updating", and "Dropping".
:vartype provisioning_state: str or ~azure.mgmt.mongocluster.models.ProvisioningState
:ivar start_ip_address: The start IP address of the mongo cluster firewall rule. Must be IPv4
format. Required.
:vartype start_ip_address: str
:ivar end_ip_address: The end IP address of the mongo cluster firewall rule. Must be IPv4
format. Required.
:vartype end_ip_address: str
'''
def __init__(self, *, start_ip_address: str, end_ip_address: str, **kwargs: Any) -> None:
'''
:keyword start_ip_address: The start IP address of the mongo cluster firewall rule. Must be
IPv4 format. Required.
:paramtype start_ip_address: str
:keyword end_ip_address: The end IP address of the mongo cluster firewall rule. Must be IPv4
format. Required.
:paramtype end_ip_address: str
'''
pass
| 2 | 2 | 13 | 0 | 5 | 8 | 1 | 0.95 | 1 | 3 | 0 | 0 | 1 | 3 | 1 | 16 | 49 | 6 | 22 | 7 | 20 | 21 | 8 | 7 | 6 | 1 | 2 | 0 | 1 |
10,829 |
Azure/azure-cli-extensions
|
src/cosmosdb-preview/azext_cosmosdb_preview/vendored_sdks/azure_mgmt_mongocluster/models/_models_py3.py
|
azext_cosmosdb_preview.vendored_sdks.azure_mgmt_mongocluster.models._models_py3.ListConnectionStringsResult
|
class ListConnectionStringsResult(_serialization.Model):
"""The connection strings for the given mongo cluster.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar connection_strings: An array that contains the connection strings for a mongo cluster.
:vartype connection_strings: list[~azure.mgmt.mongocluster.models.ConnectionString]
"""
_validation = {
"connection_strings": {"readonly": True},
}
_attribute_map = {
"connection_strings": {"key": "connectionStrings", "type": "[ConnectionString]"},
}
def __init__(self, **kwargs: Any) -> None:
""" """
super().__init__(**kwargs)
self.connection_strings = None
|
class ListConnectionStringsResult(_serialization.Model):
'''The connection strings for the given mongo cluster.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar connection_strings: An array that contains the connection strings for a mongo cluster.
:vartype connection_strings: list[~azure.mgmt.mongocluster.models.ConnectionString]
'''
def __init__(self, **kwargs: Any) -> None:
''' '''
pass
| 2 | 2 | 4 | 0 | 3 | 1 | 1 | 0.6 | 1 | 2 | 0 | 0 | 1 | 1 | 1 | 16 | 21 | 5 | 10 | 5 | 8 | 6 | 6 | 5 | 4 | 1 | 2 | 0 | 1 |
10,830 |
Azure/azure-cli-extensions
|
src/cosmosdb-preview/azext_cosmosdb_preview/vendored_sdks/azure_mgmt_mongocluster/models/_models_py3.py
|
azext_cosmosdb_preview.vendored_sdks.azure_mgmt_mongocluster.models._models_py3.MongoCluster
|
class MongoCluster(TrackedResource):
"""Represents a mongo cluster resource.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to server.
:ivar id: Fully qualified resource ID for the resource. E.g.
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}". # pylint: disable=line-too-long
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
"Microsoft.Storage/storageAccounts".
:vartype type: str
:ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy
information.
:vartype system_data: ~azure.mgmt.mongocluster.models.SystemData
:ivar tags: Resource tags.
:vartype tags: dict[str, str]
:ivar location: The geo-location where the resource lives. Required.
:vartype location: str
:ivar properties: The resource-specific properties for this resource.
:vartype properties: ~azure.mgmt.mongocluster.models.MongoClusterProperties
"""
_validation = {
"id": {"readonly": True},
"name": {"readonly": True},
"type": {"readonly": True},
"system_data": {"readonly": True},
"location": {"required": True},
}
_attribute_map = {
"id": {"key": "id", "type": "str"},
"name": {"key": "name", "type": "str"},
"type": {"key": "type", "type": "str"},
"system_data": {"key": "systemData", "type": "SystemData"},
"tags": {"key": "tags", "type": "{str}"},
"location": {"key": "location", "type": "str"},
"properties": {"key": "properties", "type": "MongoClusterProperties"},
}
def __init__(
self,
*,
location: str,
tags: Optional[Dict[str, str]] = None,
properties: Optional["_models.MongoClusterProperties"] = None,
**kwargs: Any
) -> None:
"""
:keyword tags: Resource tags.
:paramtype tags: dict[str, str]
:keyword location: The geo-location where the resource lives. Required.
:paramtype location: str
:keyword properties: The resource-specific properties for this resource.
:paramtype properties: ~azure.mgmt.mongocluster.models.MongoClusterProperties
"""
super().__init__(tags=tags, location=location, **kwargs)
self.properties = properties
|
class MongoCluster(TrackedResource):
'''Represents a mongo cluster resource.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to server.
:ivar id: Fully qualified resource ID for the resource. E.g.
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}". # pylint: disable=line-too-long
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
"Microsoft.Storage/storageAccounts".
:vartype type: str
:ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy
information.
:vartype system_data: ~azure.mgmt.mongocluster.models.SystemData
:ivar tags: Resource tags.
:vartype tags: dict[str, str]
:ivar location: The geo-location where the resource lives. Required.
:vartype location: str
:ivar properties: The resource-specific properties for this resource.
:vartype properties: ~azure.mgmt.mongocluster.models.MongoClusterProperties
'''
def __init__(
self,
*,
location: str,
tags: Optional[Dict[str, str]] = None,
properties: Optional["_models.MongoClusterProperties"] = None,
**kwargs: Any
) -> None:
'''
:keyword tags: Resource tags.
:paramtype tags: dict[str, str]
:keyword location: The geo-location where the resource lives. Required.
:paramtype location: str
:keyword properties: The resource-specific properties for this resource.
:paramtype properties: ~azure.mgmt.mongocluster.models.MongoClusterProperties
'''
pass
| 2 | 2 | 18 | 0 | 10 | 8 | 1 | 1.07 | 1 | 3 | 0 | 0 | 1 | 1 | 1 | 18 | 62 | 6 | 27 | 12 | 18 | 29 | 6 | 5 | 4 | 1 | 4 | 0 | 1 |
10,831 |
Azure/azure-cli-extensions
|
src/cosmosdb-preview/azext_cosmosdb_preview/vendored_sdks/azure_mgmt_mongocluster/models/_models_py3.py
|
azext_cosmosdb_preview.vendored_sdks.azure_mgmt_mongocluster.models._models_py3.MongoClusterListResult
|
class MongoClusterListResult(_serialization.Model):
"""The response of a MongoCluster list operation.
All required parameters must be populated in order to send to server.
:ivar value: The MongoCluster items on this page. Required.
:vartype value: list[~azure.mgmt.mongocluster.models.MongoCluster]
:ivar next_link: The link to the next page of items.
:vartype next_link: str
"""
_validation = {
"value": {"required": True},
}
_attribute_map = {
"value": {"key": "value", "type": "[MongoCluster]"},
"next_link": {"key": "nextLink", "type": "str"},
}
def __init__(self, *, value: List["_models.MongoCluster"], next_link: Optional[str] = None, **kwargs: Any) -> None:
"""
:keyword value: The MongoCluster items on this page. Required.
:paramtype value: list[~azure.mgmt.mongocluster.models.MongoCluster]
:keyword next_link: The link to the next page of items.
:paramtype next_link: str
"""
super().__init__(**kwargs)
self.value = value
self.next_link = next_link
|
class MongoClusterListResult(_serialization.Model):
'''The response of a MongoCluster list operation.
All required parameters must be populated in order to send to server.
:ivar value: The MongoCluster items on this page. Required.
:vartype value: list[~azure.mgmt.mongocluster.models.MongoCluster]
:ivar next_link: The link to the next page of items.
:vartype next_link: str
'''
def __init__(self, *, value: List["_models.MongoCluster"], next_link: Optional[str] = None, **kwargs: Any) -> None:
'''
:keyword value: The MongoCluster items on this page. Required.
:paramtype value: list[~azure.mgmt.mongocluster.models.MongoCluster]
:keyword next_link: The link to the next page of items.
:paramtype next_link: str
'''
pass
| 2 | 2 | 10 | 0 | 4 | 6 | 1 | 1.08 | 1 | 3 | 0 | 0 | 1 | 2 | 1 | 16 | 30 | 5 | 12 | 6 | 10 | 13 | 7 | 6 | 5 | 1 | 2 | 0 | 1 |
10,832 |
Azure/azure-cli-extensions
|
src/cosmosdb-preview/azext_cosmosdb_preview/vendored_sdks/azure_mgmt_mongocluster/models/_models_py3.py
|
azext_cosmosdb_preview.vendored_sdks.azure_mgmt_mongocluster.models._models_py3.MongoClusterProperties
|
class MongoClusterProperties(_serialization.Model): # pylint: disable=too-many-instance-attributes
"""The properties of a mongo cluster.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar create_mode: The mode to create a mongo cluster. Known values are: "Default" and
"PointInTimeRestore".
:vartype create_mode: str or ~azure.mgmt.mongocluster.models.CreateMode
:ivar restore_parameters: The parameters to create a point-in-time restore mongo cluster.
:vartype restore_parameters: ~azure.mgmt.mongocluster.models.MongoClusterRestoreParameters
:ivar administrator_login: The administrator's login for the mongo cluster.
:vartype administrator_login: str
:ivar administrator_login_password: The password of the administrator login.
:vartype administrator_login_password: str
:ivar server_version: The Mongo DB server version. Defaults to the latest available version if
not specified.
:vartype server_version: str
:ivar connection_string: The default mongo connection string for the cluster.
:vartype connection_string: str
:ivar earliest_restore_time: Earliest restore timestamp in UTC ISO8601 format.
:vartype earliest_restore_time: str
:ivar provisioning_state: The provisioning state of the mongo cluster. Known values are:
"Succeeded", "Failed", "Canceled", "InProgress", "Updating", and "Dropping".
:vartype provisioning_state: str or ~azure.mgmt.mongocluster.models.ProvisioningState
:ivar cluster_status: The status of the mongo cluster. Known values are: "Ready",
"Provisioning", "Updating", "Starting", "Stopping", "Stopped", and "Dropping".
:vartype cluster_status: str or ~azure.mgmt.mongocluster.models.MongoClusterStatus
:ivar public_network_access: Whether or not public endpoint access is allowed for this mongo
cluster. Known values are: "Enabled" and "Disabled".
:vartype public_network_access: str or ~azure.mgmt.mongocluster.models.PublicNetworkAccess
:ivar node_group_specs: The list of node group specs in the cluster.
:vartype node_group_specs: list[~azure.mgmt.mongocluster.models.NodeGroupSpec]
:ivar private_endpoint_connections: List of private endpoint connections.
:vartype private_endpoint_connections:
list[~azure.mgmt.mongocluster.models.AzureResourceManagerPrivateEndpointConnection]
"""
_validation = {
"connection_string": {"readonly": True},
"earliest_restore_time": {"readonly": True},
"provisioning_state": {"readonly": True},
"cluster_status": {"readonly": True},
"private_endpoint_connections": {"readonly": True},
}
_attribute_map = {
"create_mode": {"key": "createMode", "type": "str"},
"restore_parameters": {"key": "restoreParameters", "type": "MongoClusterRestoreParameters"},
"administrator_login": {"key": "administratorLogin", "type": "str"},
"administrator_login_password": {"key": "administratorLoginPassword", "type": "str"},
"server_version": {"key": "serverVersion", "type": "str"},
"connection_string": {"key": "connectionString", "type": "str"},
"earliest_restore_time": {"key": "earliestRestoreTime", "type": "str"},
"provisioning_state": {"key": "provisioningState", "type": "str"},
"cluster_status": {"key": "clusterStatus", "type": "str"},
"public_network_access": {"key": "publicNetworkAccess", "type": "str"},
"node_group_specs": {"key": "nodeGroupSpecs", "type": "[NodeGroupSpec]"},
"private_endpoint_connections": {
"key": "privateEndpointConnections",
"type": "[AzureResourceManagerPrivateEndpointConnection]",
},
}
def __init__(
self,
*,
create_mode: Optional[Union[str, "_models.CreateMode"]] = None,
restore_parameters: Optional["_models.MongoClusterRestoreParameters"] = None,
administrator_login: Optional[str] = None,
administrator_login_password: Optional[str] = None,
server_version: Optional[str] = None,
public_network_access: Optional[Union[str, "_models.PublicNetworkAccess"]] = None,
node_group_specs: Optional[List["_models.NodeGroupSpec"]] = None,
**kwargs: Any
) -> None:
"""
:keyword create_mode: The mode to create a mongo cluster. Known values are: "Default" and
"PointInTimeRestore".
:paramtype create_mode: str or ~azure.mgmt.mongocluster.models.CreateMode
:keyword restore_parameters: The parameters to create a point-in-time restore mongo cluster.
:paramtype restore_parameters: ~azure.mgmt.mongocluster.models.MongoClusterRestoreParameters
:keyword administrator_login: The administrator's login for the mongo cluster.
:paramtype administrator_login: str
:keyword administrator_login_password: The password of the administrator login.
:paramtype administrator_login_password: str
:keyword server_version: The Mongo DB server version. Defaults to the latest available version
if not specified.
:paramtype server_version: str
:keyword public_network_access: Whether or not public endpoint access is allowed for this mongo
cluster. Known values are: "Enabled" and "Disabled".
:paramtype public_network_access: str or ~azure.mgmt.mongocluster.models.PublicNetworkAccess
:keyword node_group_specs: The list of node group specs in the cluster.
:paramtype node_group_specs: list[~azure.mgmt.mongocluster.models.NodeGroupSpec]
"""
super().__init__(**kwargs)
self.create_mode = create_mode
self.restore_parameters = restore_parameters
self.administrator_login = administrator_login
self.administrator_login_password = administrator_login_password
self.server_version = server_version
self.connection_string = None
self.earliest_restore_time = None
self.provisioning_state = None
self.cluster_status = None
self.public_network_access = public_network_access
self.node_group_specs = node_group_specs
self.private_endpoint_connections = None
|
class MongoClusterProperties(_serialization.Model):
'''The properties of a mongo cluster.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar create_mode: The mode to create a mongo cluster. Known values are: "Default" and
"PointInTimeRestore".
:vartype create_mode: str or ~azure.mgmt.mongocluster.models.CreateMode
:ivar restore_parameters: The parameters to create a point-in-time restore mongo cluster.
:vartype restore_parameters: ~azure.mgmt.mongocluster.models.MongoClusterRestoreParameters
:ivar administrator_login: The administrator's login for the mongo cluster.
:vartype administrator_login: str
:ivar administrator_login_password: The password of the administrator login.
:vartype administrator_login_password: str
:ivar server_version: The Mongo DB server version. Defaults to the latest available version if
not specified.
:vartype server_version: str
:ivar connection_string: The default mongo connection string for the cluster.
:vartype connection_string: str
:ivar earliest_restore_time: Earliest restore timestamp in UTC ISO8601 format.
:vartype earliest_restore_time: str
:ivar provisioning_state: The provisioning state of the mongo cluster. Known values are:
"Succeeded", "Failed", "Canceled", "InProgress", "Updating", and "Dropping".
:vartype provisioning_state: str or ~azure.mgmt.mongocluster.models.ProvisioningState
:ivar cluster_status: The status of the mongo cluster. Known values are: "Ready",
"Provisioning", "Updating", "Starting", "Stopping", "Stopped", and "Dropping".
:vartype cluster_status: str or ~azure.mgmt.mongocluster.models.MongoClusterStatus
:ivar public_network_access: Whether or not public endpoint access is allowed for this mongo
cluster. Known values are: "Enabled" and "Disabled".
:vartype public_network_access: str or ~azure.mgmt.mongocluster.models.PublicNetworkAccess
:ivar node_group_specs: The list of node group specs in the cluster.
:vartype node_group_specs: list[~azure.mgmt.mongocluster.models.NodeGroupSpec]
:ivar private_endpoint_connections: List of private endpoint connections.
:vartype private_endpoint_connections:
list[~azure.mgmt.mongocluster.models.AzureResourceManagerPrivateEndpointConnection]
'''
def __init__(
self,
*,
create_mode: Optional[Union[str, "_models.CreateMode"]] = None,
restore_parameters: Optional["_models.MongoClusterRestoreParameters"] = None,
administrator_login: Optional[str] = None,
administrator_login_password: Optional[str] = None,
server_version: Optional[str] = None,
public_network_access: Optional[Union[str, "_models.PublicNetworkAccess"]] = None,
node_group_specs: Optional[List["_models.NodeGroupSpec"]] = None,
**kwargs: Any
) -> None:
'''
:keyword create_mode: The mode to create a mongo cluster. Known values are: "Default" and
"PointInTimeRestore".
:paramtype create_mode: str or ~azure.mgmt.mongocluster.models.CreateMode
:keyword restore_parameters: The parameters to create a point-in-time restore mongo cluster.
:paramtype restore_parameters: ~azure.mgmt.mongocluster.models.MongoClusterRestoreParameters
:keyword administrator_login: The administrator's login for the mongo cluster.
:paramtype administrator_login: str
:keyword administrator_login_password: The password of the administrator login.
:paramtype administrator_login_password: str
:keyword server_version: The Mongo DB server version. Defaults to the latest available version
if not specified.
:paramtype server_version: str
:keyword public_network_access: Whether or not public endpoint access is allowed for this mongo
cluster. Known values are: "Enabled" and "Disabled".
:paramtype public_network_access: str or ~azure.mgmt.mongocluster.models.PublicNetworkAccess
:keyword node_group_specs: The list of node group specs in the cluster.
:paramtype node_group_specs: list[~azure.mgmt.mongocluster.models.NodeGroupSpec]
'''
pass
| 2 | 2 | 44 | 0 | 25 | 19 | 1 | 1.06 | 1 | 3 | 0 | 0 | 1 | 12 | 1 | 16 | 107 | 5 | 50 | 27 | 37 | 53 | 17 | 16 | 15 | 1 | 2 | 0 | 1 |
10,833 |
Azure/azure-cli-extensions
|
src/cosmosdb-preview/azext_cosmosdb_preview/vendored_sdks/azure_mgmt_mongocluster/models/_models_py3.py
|
azext_cosmosdb_preview.vendored_sdks.azure_mgmt_mongocluster.models._models_py3.MongoClusterRestoreParameters
|
class MongoClusterRestoreParameters(_serialization.Model):
"""Parameters used for restore operations.
:ivar point_in_time_utc: UTC point in time to restore a mongo cluster.
:vartype point_in_time_utc: ~datetime.datetime
:ivar source_resource_id: Resource ID to locate the source cluster to restore.
:vartype source_resource_id: str
"""
_attribute_map = {
"point_in_time_utc": {"key": "pointInTimeUTC", "type": "iso-8601"},
"source_resource_id": {"key": "sourceResourceId", "type": "str"},
}
def __init__(
self,
*,
point_in_time_utc: Optional[datetime.datetime] = None,
source_resource_id: Optional[str] = None,
**kwargs: Any
) -> None:
"""
:keyword point_in_time_utc: UTC point in time to restore a mongo cluster.
:paramtype point_in_time_utc: ~datetime.datetime
:keyword source_resource_id: Resource ID to locate the source cluster to restore.
:paramtype source_resource_id: str
"""
super().__init__(**kwargs)
self.point_in_time_utc = point_in_time_utc
self.source_resource_id = source_resource_id
|
class MongoClusterRestoreParameters(_serialization.Model):
'''Parameters used for restore operations.
:ivar point_in_time_utc: UTC point in time to restore a mongo cluster.
:vartype point_in_time_utc: ~datetime.datetime
:ivar source_resource_id: Resource ID to locate the source cluster to restore.
:vartype source_resource_id: str
'''
def __init__(
self,
*,
point_in_time_utc: Optional[datetime.datetime] = None,
source_resource_id: Optional[str] = None,
**kwargs: Any
) -> None:
'''
:keyword point_in_time_utc: UTC point in time to restore a mongo cluster.
:paramtype point_in_time_utc: ~datetime.datetime
:keyword source_resource_id: Resource ID to locate the source cluster to restore.
:paramtype source_resource_id: str
'''
pass
| 2 | 2 | 16 | 0 | 10 | 6 | 1 | 0.8 | 1 | 4 | 0 | 0 | 1 | 2 | 1 | 16 | 30 | 3 | 15 | 11 | 7 | 12 | 6 | 5 | 4 | 1 | 2 | 0 | 1 |
10,834 |
Azure/azure-cli-extensions
|
src/cosmosdb-preview/azext_cosmosdb_preview/vendored_sdks/azure_mgmt_mongocluster/models/_models_py3.py
|
azext_cosmosdb_preview.vendored_sdks.azure_mgmt_mongocluster.models._models_py3.MongoClusterUpdate
|
class MongoClusterUpdate(_serialization.Model):
"""The type used for update operations of the MongoCluster.
:ivar tags: Resource tags.
:vartype tags: dict[str, str]
:ivar properties: The updatable properties of the MongoCluster.
:vartype properties: ~azure.mgmt.mongocluster.models.MongoClusterUpdateProperties
"""
_attribute_map = {
"tags": {"key": "tags", "type": "{str}"},
"properties": {"key": "properties", "type": "MongoClusterUpdateProperties"},
}
def __init__(
self,
*,
tags: Optional[Dict[str, str]] = None,
properties: Optional["_models.MongoClusterUpdateProperties"] = None,
**kwargs: Any
) -> None:
"""
:keyword tags: Resource tags.
:paramtype tags: dict[str, str]
:keyword properties: The updatable properties of the MongoCluster.
:paramtype properties: ~azure.mgmt.mongocluster.models.MongoClusterUpdateProperties
"""
super().__init__(**kwargs)
self.tags = tags
self.properties = properties
|
class MongoClusterUpdate(_serialization.Model):
'''The type used for update operations of the MongoCluster.
:ivar tags: Resource tags.
:vartype tags: dict[str, str]
:ivar properties: The updatable properties of the MongoCluster.
:vartype properties: ~azure.mgmt.mongocluster.models.MongoClusterUpdateProperties
'''
def __init__(
self,
*,
tags: Optional[Dict[str, str]] = None,
properties: Optional["_models.MongoClusterUpdateProperties"] = None,
**kwargs: Any
) -> None:
'''
:keyword tags: Resource tags.
:paramtype tags: dict[str, str]
:keyword properties: The updatable properties of the MongoCluster.
:paramtype properties: ~azure.mgmt.mongocluster.models.MongoClusterUpdateProperties
'''
pass
| 2 | 2 | 16 | 0 | 10 | 6 | 1 | 0.8 | 1 | 3 | 0 | 0 | 1 | 2 | 1 | 16 | 30 | 3 | 15 | 11 | 7 | 12 | 6 | 5 | 4 | 1 | 2 | 0 | 1 |
10,835 |
Azure/azure-cli-extensions
|
src/cosmosdb-preview/azext_cosmosdb_preview/vendored_sdks/azure_mgmt_mongocluster/models/_models_py3.py
|
azext_cosmosdb_preview.vendored_sdks.azure_mgmt_mongocluster.models._models_py3.MongoClusterUpdateProperties
|
class MongoClusterUpdateProperties(_serialization.Model):
"""The updatable properties of the MongoCluster.
:ivar administrator_login: The administrator's login for the mongo cluster.
:vartype administrator_login: str
:ivar administrator_login_password: The password of the administrator login.
:vartype administrator_login_password: str
:ivar server_version: The Mongo DB server version. Defaults to the latest available version if
not specified.
:vartype server_version: str
:ivar public_network_access: Whether or not public endpoint access is allowed for this mongo
cluster. Known values are: "Enabled" and "Disabled".
:vartype public_network_access: str or ~azure.mgmt.mongocluster.models.PublicNetworkAccess
:ivar node_group_specs: The list of node group specs in the cluster.
:vartype node_group_specs: list[~azure.mgmt.mongocluster.models.NodeGroupSpec]
"""
_attribute_map = {
"administrator_login": {"key": "administratorLogin", "type": "str"},
"administrator_login_password": {"key": "administratorLoginPassword", "type": "str"},
"server_version": {"key": "serverVersion", "type": "str"},
"public_network_access": {"key": "publicNetworkAccess", "type": "str"},
"node_group_specs": {"key": "nodeGroupSpecs", "type": "[NodeGroupSpec]"},
}
def __init__(
self,
*,
administrator_login: Optional[str] = None,
administrator_login_password: Optional[str] = None,
server_version: Optional[str] = None,
public_network_access: Optional[Union[str, "_models.PublicNetworkAccess"]] = None,
node_group_specs: Optional[List["_models.NodeGroupSpec"]] = None,
**kwargs: Any
) -> None:
"""
:keyword administrator_login: The administrator's login for the mongo cluster.
:paramtype administrator_login: str
:keyword administrator_login_password: The password of the administrator login.
:paramtype administrator_login_password: str
:keyword server_version: The Mongo DB server version. Defaults to the latest available version
if not specified.
:paramtype server_version: str
:keyword public_network_access: Whether or not public endpoint access is allowed for this mongo
cluster. Known values are: "Enabled" and "Disabled".
:paramtype public_network_access: str or ~azure.mgmt.mongocluster.models.PublicNetworkAccess
:keyword node_group_specs: The list of node group specs in the cluster.
:paramtype node_group_specs: list[~azure.mgmt.mongocluster.models.NodeGroupSpec]
"""
super().__init__(**kwargs)
self.administrator_login = administrator_login
self.administrator_login_password = administrator_login_password
self.server_version = server_version
self.public_network_access = public_network_access
self.node_group_specs = node_group_specs
|
class MongoClusterUpdateProperties(_serialization.Model):
'''The updatable properties of the MongoCluster.
:ivar administrator_login: The administrator's login for the mongo cluster.
:vartype administrator_login: str
:ivar administrator_login_password: The password of the administrator login.
:vartype administrator_login_password: str
:ivar server_version: The Mongo DB server version. Defaults to the latest available version if
not specified.
:vartype server_version: str
:ivar public_network_access: Whether or not public endpoint access is allowed for this mongo
cluster. Known values are: "Enabled" and "Disabled".
:vartype public_network_access: str or ~azure.mgmt.mongocluster.models.PublicNetworkAccess
:ivar node_group_specs: The list of node group specs in the cluster.
:vartype node_group_specs: list[~azure.mgmt.mongocluster.models.NodeGroupSpec]
'''
def __init__(
self,
*,
administrator_login: Optional[str] = None,
administrator_login_password: Optional[str] = None,
server_version: Optional[str] = None,
public_network_access: Optional[Union[str, "_models.PublicNetworkAccess"]] = None,
node_group_specs: Optional[List["_models.NodeGroupSpec"]] = None,
**kwargs: Any
) -> None:
'''
:keyword administrator_login: The administrator's login for the mongo cluster.
:paramtype administrator_login: str
:keyword administrator_login_password: The password of the administrator login.
:paramtype administrator_login_password: str
:keyword server_version: The Mongo DB server version. Defaults to the latest available version
if not specified.
:paramtype server_version: str
:keyword public_network_access: Whether or not public endpoint access is allowed for this mongo
cluster. Known values are: "Enabled" and "Disabled".
:paramtype public_network_access: str or ~azure.mgmt.mongocluster.models.PublicNetworkAccess
:keyword node_group_specs: The list of node group specs in the cluster.
:paramtype node_group_specs: list[~azure.mgmt.mongocluster.models.NodeGroupSpec]
'''
pass
| 2 | 2 | 30 | 0 | 16 | 14 | 1 | 1.17 | 1 | 3 | 0 | 0 | 1 | 5 | 1 | 16 | 55 | 3 | 24 | 17 | 13 | 28 | 9 | 8 | 7 | 1 | 2 | 0 | 1 |
10,836 |
Azure/azure-cli-extensions
|
src/cosmosdb-preview/azext_cosmosdb_preview/vendored_sdks/azure_mgmt_mongocluster/models/_models_py3.py
|
azext_cosmosdb_preview.vendored_sdks.azure_mgmt_mongocluster.models._models_py3.FirewallRuleListResult
|
class FirewallRuleListResult(_serialization.Model):
"""The response of a FirewallRule list operation.
All required parameters must be populated in order to send to server.
:ivar value: The FirewallRule items on this page. Required.
:vartype value: list[~azure.mgmt.mongocluster.models.FirewallRule]
:ivar next_link: The link to the next page of items.
:vartype next_link: str
"""
_validation = {
"value": {"required": True},
}
_attribute_map = {
"value": {"key": "value", "type": "[FirewallRule]"},
"next_link": {"key": "nextLink", "type": "str"},
}
def __init__(self, *, value: List["_models.FirewallRule"], next_link: Optional[str] = None, **kwargs: Any) -> None:
"""
:keyword value: The FirewallRule items on this page. Required.
:paramtype value: list[~azure.mgmt.mongocluster.models.FirewallRule]
:keyword next_link: The link to the next page of items.
:paramtype next_link: str
"""
super().__init__(**kwargs)
self.value = value
self.next_link = next_link
|
class FirewallRuleListResult(_serialization.Model):
'''The response of a FirewallRule list operation.
All required parameters must be populated in order to send to server.
:ivar value: The FirewallRule items on this page. Required.
:vartype value: list[~azure.mgmt.mongocluster.models.FirewallRule]
:ivar next_link: The link to the next page of items.
:vartype next_link: str
'''
def __init__(self, *, value: List["_models.FirewallRule"], next_link: Optional[str] = None, **kwargs: Any) -> None:
'''
:keyword value: The FirewallRule items on this page. Required.
:paramtype value: list[~azure.mgmt.mongocluster.models.FirewallRule]
:keyword next_link: The link to the next page of items.
:paramtype next_link: str
'''
pass
| 2 | 2 | 10 | 0 | 4 | 6 | 1 | 1.08 | 1 | 3 | 0 | 0 | 1 | 2 | 1 | 16 | 30 | 5 | 12 | 6 | 10 | 13 | 7 | 6 | 5 | 1 | 2 | 0 | 1 |
10,837 |
Azure/azure-cli-extensions
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/Azure_azure-cli-extensions/src/managednetworkfabric/azext_managednetworkfabric/aaz/latest/networkfabric/taprule/_resync.py
|
azext_managednetworkfabric.aaz.latest.networkfabric.taprule._resync.Resync.NetworkTapRulesResync
|
class NetworkTapRulesResync(AAZHttpOperation):
CLIENT_TYPE = "MgmtClient"
def __call__(self, *args, **kwargs):
request = self.make_request()
session = self.client.send_request(
request=request, stream=False, **kwargs)
if session.http_response.status_code in [202]:
return self.client.build_lro_polling(
self.ctx.args.no_wait,
session,
self.on_200,
self.on_error,
lro_options={"final-state-via": "location"},
path_format_arguments=self.url_parameters,
)
if session.http_response.status_code in [200]:
return self.client.build_lro_polling(
self.ctx.args.no_wait,
session,
self.on_200,
self.on_error,
lro_options={"final-state-via": "location"},
path_format_arguments=self.url_parameters,
)
return self.on_error(session.http_response)
@property
def url(self):
return self.client.format_url(
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ManagedNetworkFabric/networkTapRules/{networkTapRuleName}/resync",
**self.url_parameters
)
@property
def method(self):
return "POST"
@property
def error_format(self):
return "MgmtErrorFormat"
@property
def url_parameters(self):
parameters = {
**self.serialize_url_param(
"networkTapRuleName", self.ctx.args.resource_name,
required=True,
),
**self.serialize_url_param(
"resourceGroupName", self.ctx.args.resource_group,
required=True,
),
**self.serialize_url_param(
"subscriptionId", self.ctx.subscription_id,
required=True,
),
}
return parameters
@property
def query_parameters(self):
parameters = {
**self.serialize_query_param(
"api-version", "2024-02-15-preview",
required=True,
),
}
return parameters
@property
def header_parameters(self):
parameters = {
**self.serialize_header_param(
"Accept", "application/json",
),
}
return parameters
def on_200(self, session):
data = self.deserialize_http_content(session)
self.ctx.set_var(
"instance",
data,
schema_builder=self._build_schema_on_200
)
_schema_on_200 = None
@classmethod
def _build_schema_on_200(cls):
if cls._schema_on_200 is not None:
return cls._schema_on_200
cls._schema_on_200 = AAZObjectType()
_ResyncHelper._build_schema_common_post_action_response_for_state_update_read(
cls._schema_on_200)
return cls._schema_on_200
|
class NetworkTapRulesResync(AAZHttpOperation):
def __call__(self, *args, **kwargs):
pass
@property
def url(self):
pass
@property
def method(self):
pass
@property
def error_format(self):
pass
@property
def url_parameters(self):
pass
@property
def query_parameters(self):
pass
@property
def header_parameters(self):
pass
def on_200(self, session):
pass
@classmethod
def _build_schema_on_200(cls):
pass
| 17 | 0 | 9 | 0 | 8 | 0 | 1 | 0 | 1 | 1 | 1 | 0 | 8 | 1 | 9 | 9 | 98 | 13 | 85 | 26 | 68 | 0 | 35 | 18 | 25 | 3 | 1 | 1 | 12 |
10,838 |
Azure/azure-cli-extensions
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/Azure_azure-cli-extensions/src/managednetworkfabric/azext_managednetworkfabric/aaz/latest/networkfabric/taprule/_delete.py
|
azext_managednetworkfabric.aaz.latest.networkfabric.taprule._delete.Delete.NetworkTapRulesDelete
|
class NetworkTapRulesDelete(AAZHttpOperation):
CLIENT_TYPE = "MgmtClient"
def __call__(self, *args, **kwargs):
request = self.make_request()
session = self.client.send_request(
request=request, stream=False, **kwargs)
if session.http_response.status_code in [202]:
return self.client.build_lro_polling(
self.ctx.args.no_wait,
session,
self.on_200,
self.on_error,
lro_options={"final-state-via": "location"},
path_format_arguments=self.url_parameters,
)
if session.http_response.status_code in [200]:
return self.client.build_lro_polling(
self.ctx.args.no_wait,
session,
self.on_200,
self.on_error,
lro_options={"final-state-via": "location"},
path_format_arguments=self.url_parameters,
)
if session.http_response.status_code in [204]:
return self.client.build_lro_polling(
self.ctx.args.no_wait,
session,
self.on_204,
self.on_error,
lro_options={"final-state-via": "location"},
path_format_arguments=self.url_parameters,
)
return self.on_error(session.http_response)
@property
def url(self):
return self.client.format_url(
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ManagedNetworkFabric/networkTapRules/{networkTapRuleName}",
**self.url_parameters
)
@property
def method(self):
return "DELETE"
@property
def error_format(self):
return "MgmtErrorFormat"
@property
def url_parameters(self):
parameters = {
**self.serialize_url_param(
"networkTapRuleName", self.ctx.args.resource_name,
required=True,
),
**self.serialize_url_param(
"resourceGroupName", self.ctx.args.resource_group,
required=True,
),
**self.serialize_url_param(
"subscriptionId", self.ctx.subscription_id,
required=True,
),
}
return parameters
@property
def query_parameters(self):
parameters = {
**self.serialize_query_param(
"api-version", "2024-02-15-preview",
required=True,
),
}
return parameters
def on_200(self, session):
pass
def on_204(self, session):
pass
|
class NetworkTapRulesDelete(AAZHttpOperation):
def __call__(self, *args, **kwargs):
pass
@property
def url(self):
pass
@property
def method(self):
pass
@property
def error_format(self):
pass
@property
def url_parameters(self):
pass
@property
def query_parameters(self):
pass
def on_200(self, session):
pass
def on_204(self, session):
pass
| 14 | 0 | 9 | 0 | 9 | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 8 | 1 | 8 | 8 | 84 | 9 | 75 | 20 | 61 | 0 | 28 | 14 | 19 | 4 | 1 | 1 | 11 |
10,839 |
Azure/azure-cli-extensions
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/Azure_azure-cli-extensions/src/managednetworkfabric/azext_managednetworkfabric/aaz/latest/networkfabric/taprule/_list.py
|
azext_managednetworkfabric.aaz.latest.networkfabric.taprule._list.List.NetworkTapRulesListByResourceGroup
|
class NetworkTapRulesListByResourceGroup(AAZHttpOperation):
CLIENT_TYPE = "MgmtClient"
def __call__(self, *args, **kwargs):
request = self.make_request()
session = self.client.send_request(
request=request, stream=False, **kwargs)
if session.http_response.status_code in [200]:
return self.on_200(session)
return self.on_error(session.http_response)
@property
def url(self):
return self.client.format_url(
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ManagedNetworkFabric/networkTapRules",
**self.url_parameters
)
@property
def method(self):
return "GET"
@property
def error_format(self):
return "MgmtErrorFormat"
@property
def url_parameters(self):
parameters = {
**self.serialize_url_param(
"resourceGroupName", self.ctx.args.resource_group,
required=True,
),
**self.serialize_url_param(
"subscriptionId", self.ctx.subscription_id,
required=True,
),
}
return parameters
@property
def query_parameters(self):
parameters = {
**self.serialize_query_param(
"api-version", "2024-02-15-preview",
required=True,
),
}
return parameters
@property
def header_parameters(self):
parameters = {
**self.serialize_header_param(
"Accept", "application/json",
),
}
return parameters
def on_200(self, session):
data = self.deserialize_http_content(session)
self.ctx.set_var(
"instance",
data,
schema_builder=self._build_schema_on_200
)
_schema_on_200 = None
@classmethod
def _build_schema_on_200(cls):
if cls._schema_on_200 is not None:
return cls._schema_on_200
cls._schema_on_200 = AAZObjectType()
_schema_on_200 = cls._schema_on_200
_schema_on_200.next_link = AAZStrType(
serialized_name="nextLink",
)
_schema_on_200.value = AAZListType()
value = cls._schema_on_200.value
value.Element = AAZObjectType()
_element = cls._schema_on_200.value.Element
_element.id = AAZStrType(
flags={"read_only": True},
)
_element.location = AAZStrType(
flags={"required": True},
)
_element.name = AAZStrType(
flags={"read_only": True},
)
_element.properties = AAZObjectType(
flags={"required": True, "client_flatten": True},
)
_element.system_data = AAZObjectType(
serialized_name="systemData",
flags={"read_only": True},
)
_element.tags = AAZDictType()
_element.type = AAZStrType(
flags={"read_only": True},
)
properties = cls._schema_on_200.value.Element.properties
properties.administrative_state = AAZStrType(
serialized_name="administrativeState",
flags={"read_only": True},
)
properties.annotation = AAZStrType()
properties.configuration_state = AAZStrType(
serialized_name="configurationState",
flags={"read_only": True},
)
properties.configuration_type = AAZStrType(
serialized_name="configurationType",
flags={"required": True},
)
properties.dynamic_match_configurations = AAZListType(
serialized_name="dynamicMatchConfigurations",
)
properties.last_synced_time = AAZStrType(
serialized_name="lastSyncedTime",
flags={"read_only": True},
)
properties.match_configurations = AAZListType(
serialized_name="matchConfigurations",
)
properties.network_tap_id = AAZStrType(
serialized_name="networkTapId",
flags={"read_only": True},
)
properties.polling_interval_in_seconds = AAZIntType(
serialized_name="pollingIntervalInSeconds",
)
properties.provisioning_state = AAZStrType(
serialized_name="provisioningState",
flags={"read_only": True},
)
properties.tap_rules_url = AAZStrType(
serialized_name="tapRulesUrl",
)
dynamic_match_configurations = cls._schema_on_200.value.Element.properties.dynamic_match_configurations
dynamic_match_configurations.Element = AAZObjectType()
_element = cls._schema_on_200.value.Element.properties.dynamic_match_configurations.Element
_element.ip_groups = AAZListType(
serialized_name="ipGroups",
)
_element.port_groups = AAZListType(
serialized_name="portGroups",
)
_element.vlan_groups = AAZListType(
serialized_name="vlanGroups",
)
ip_groups = cls._schema_on_200.value.Element.properties.dynamic_match_configurations.Element.ip_groups
ip_groups.Element = AAZObjectType()
_element = cls._schema_on_200.value.Element.properties.dynamic_match_configurations.Element.ip_groups.Element
_element.ip_address_type = AAZStrType(
serialized_name="ipAddressType",
)
_element.ip_prefixes = AAZListType(
serialized_name="ipPrefixes",
)
_element.name = AAZStrType()
ip_prefixes = cls._schema_on_200.value.Element.properties.dynamic_match_configurations.Element.ip_groups.Element.ip_prefixes
ip_prefixes.Element = AAZStrType()
port_groups = cls._schema_on_200.value.Element.properties.dynamic_match_configurations.Element.port_groups
port_groups.Element = AAZObjectType()
_element = cls._schema_on_200.value.Element.properties.dynamic_match_configurations.Element.port_groups.Element
_element.name = AAZStrType()
_element.ports = AAZListType()
ports = cls._schema_on_200.value.Element.properties.dynamic_match_configurations.Element.port_groups.Element.ports
ports.Element = AAZStrType()
vlan_groups = cls._schema_on_200.value.Element.properties.dynamic_match_configurations.Element.vlan_groups
vlan_groups.Element = AAZObjectType()
_element = cls._schema_on_200.value.Element.properties.dynamic_match_configurations.Element.vlan_groups.Element
_element.name = AAZStrType()
_element.vlans = AAZListType()
vlans = cls._schema_on_200.value.Element.properties.dynamic_match_configurations.Element.vlan_groups.Element.vlans
vlans.Element = AAZStrType()
match_configurations = cls._schema_on_200.value.Element.properties.match_configurations
match_configurations.Element = AAZObjectType()
_element = cls._schema_on_200.value.Element.properties.match_configurations.Element
_element.actions = AAZListType()
_element.ip_address_type = AAZStrType(
serialized_name="ipAddressType",
)
_element.match_conditions = AAZListType(
serialized_name="matchConditions",
)
_element.match_configuration_name = AAZStrType(
serialized_name="matchConfigurationName",
)
_element.sequence_number = AAZIntType(
serialized_name="sequenceNumber",
)
actions = cls._schema_on_200.value.Element.properties.match_configurations.Element.actions
actions.Element = AAZObjectType()
_element = cls._schema_on_200.value.Element.properties.match_configurations.Element.actions.Element
_element.destination_id = AAZStrType(
serialized_name="destinationId",
)
_element.is_timestamp_enabled = AAZStrType(
serialized_name="isTimestampEnabled",
)
_element.match_configuration_name = AAZStrType(
serialized_name="matchConfigurationName",
)
_element.truncate = AAZStrType()
_element.type = AAZStrType()
match_conditions = cls._schema_on_200.value.Element.properties.match_configurations.Element.match_conditions
match_conditions.Element = AAZObjectType()
_element = cls._schema_on_200.value.Element.properties.match_configurations.Element.match_conditions.Element
_element.encapsulation_type = AAZStrType(
serialized_name="encapsulationType",
)
_element.ip_condition = AAZObjectType(
serialized_name="ipCondition",
)
_element.port_condition = AAZObjectType(
serialized_name="portCondition",
)
_element.protocol_types = AAZListType(
serialized_name="protocolTypes",
)
_element.vlan_match_condition = AAZObjectType(
serialized_name="vlanMatchCondition",
)
ip_condition = cls._schema_on_200.value.Element.properties.match_configurations.Element.match_conditions.Element.ip_condition
ip_condition.ip_group_names = AAZListType(
serialized_name="ipGroupNames",
)
ip_condition.ip_prefix_values = AAZListType(
serialized_name="ipPrefixValues",
)
ip_condition.prefix_type = AAZStrType(
serialized_name="prefixType",
)
ip_condition.type = AAZStrType()
ip_group_names = cls._schema_on_200.value.Element.properties.match_configurations.Element.match_conditions.Element.ip_condition.ip_group_names
ip_group_names.Element = AAZStrType()
ip_prefix_values = cls._schema_on_200.value.Element.properties.match_configurations.Element.match_conditions.Element.ip_condition.ip_prefix_values
ip_prefix_values.Element = AAZStrType()
port_condition = cls._schema_on_200.value.Element.properties.match_configurations.Element.match_conditions.Element.port_condition
port_condition.layer4_protocol = AAZStrType(
serialized_name="layer4Protocol",
flags={"required": True},
)
port_condition.port_group_names = AAZListType(
serialized_name="portGroupNames",
)
port_condition.port_type = AAZStrType(
serialized_name="portType",
)
port_condition.ports = AAZListType()
port_group_names = cls._schema_on_200.value.Element.properties.match_configurations.Element.match_conditions.Element.port_condition.port_group_names
port_group_names.Element = AAZStrType()
ports = cls._schema_on_200.value.Element.properties.match_configurations.Element.match_conditions.Element.port_condition.ports
ports.Element = AAZStrType()
protocol_types = cls._schema_on_200.value.Element.properties.match_configurations.Element.match_conditions.Element.protocol_types
protocol_types.Element = AAZStrType()
vlan_match_condition = cls._schema_on_200.value.Element.properties.match_configurations.Element.match_conditions.Element.vlan_match_condition
vlan_match_condition.inner_vlans = AAZListType(
serialized_name="innerVlans",
)
vlan_match_condition.vlan_group_names = AAZListType(
serialized_name="vlanGroupNames",
)
vlan_match_condition.vlans = AAZListType()
inner_vlans = cls._schema_on_200.value.Element.properties.match_configurations.Element.match_conditions.Element.vlan_match_condition.inner_vlans
inner_vlans.Element = AAZStrType()
vlan_group_names = cls._schema_on_200.value.Element.properties.match_configurations.Element.match_conditions.Element.vlan_match_condition.vlan_group_names
vlan_group_names.Element = AAZStrType()
vlans = cls._schema_on_200.value.Element.properties.match_configurations.Element.match_conditions.Element.vlan_match_condition.vlans
vlans.Element = AAZStrType()
system_data = cls._schema_on_200.value.Element.system_data
system_data.created_at = AAZStrType(
serialized_name="createdAt",
)
system_data.created_by = AAZStrType(
serialized_name="createdBy",
)
system_data.created_by_type = AAZStrType(
serialized_name="createdByType",
)
system_data.last_modified_at = AAZStrType(
serialized_name="lastModifiedAt",
)
system_data.last_modified_by = AAZStrType(
serialized_name="lastModifiedBy",
)
system_data.last_modified_by_type = AAZStrType(
serialized_name="lastModifiedByType",
)
tags = cls._schema_on_200.value.Element.tags
tags.Element = AAZStrType()
return cls._schema_on_200
|
class NetworkTapRulesListByResourceGroup(AAZHttpOperation):
def __call__(self, *args, **kwargs):
pass
@property
def url(self):
pass
@property
def method(self):
pass
@property
def error_format(self):
pass
@property
def url_parameters(self):
pass
@property
def query_parameters(self):
pass
@property
def header_parameters(self):
pass
def on_200(self, session):
pass
@classmethod
def _build_schema_on_200(cls):
pass
| 17 | 0 | 35 | 4 | 30 | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 8 | 0 | 9 | 9 | 331 | 47 | 284 | 50 | 267 | 0 | 148 | 43 | 138 | 2 | 1 | 1 | 11 |
10,840 |
Azure/azure-cli-extensions
|
src/datafactory/azext_datafactory/vendored_sdks/datafactory/models/_models_py3.py
|
azext_datafactory.vendored_sdks.datafactory.models._models_py3.FailActivity
|
class FailActivity(ControlActivity):
"""This activity will fail within its own scope and output a custom error message and error code.
The error message and code can provided either as a string literal or as an expression that can
be evaluated to a string at runtime. The activity scope can be the whole pipeline or a control
activity (e.g. foreach, switch, until), if the fail activity is contained in it.
All required parameters must be populated in order to send to server.
:ivar additional_properties: Unmatched properties from the message are deserialized to this
collection.
:vartype additional_properties: dict[str, JSON]
:ivar name: Activity name. Required.
:vartype name: str
:ivar type: Type of activity. Required.
:vartype type: str
:ivar description: Activity description.
:vartype description: str
:ivar state: Activity state. This is an optional property and if not provided, the state will
be Active by default. Known values are: "Active" and "Inactive".
:vartype state: str or ~azure.mgmt.datafactory.models.ActivityState
:ivar on_inactive_mark_as: Status result of the activity when the state is set to Inactive.
This is an optional property and if not provided when the activity is inactive, the status will
be Succeeded by default. Known values are: "Succeeded", "Failed", and "Skipped".
:vartype on_inactive_mark_as: str or ~azure.mgmt.datafactory.models.ActivityOnInactiveMarkAs
:ivar depends_on: Activity depends on condition.
:vartype depends_on: list[~azure.mgmt.datafactory.models.ActivityDependency]
:ivar user_properties: Activity user properties.
:vartype user_properties: list[~azure.mgmt.datafactory.models.UserProperty]
:ivar message: The error message that surfaced in the Fail activity. It can be dynamic content
that's evaluated to a non empty/blank string at runtime. Type: string (or Expression with
resultType string). Required.
:vartype message: JSON
:ivar error_code: The error code that categorizes the error type of the Fail activity. It can
be dynamic content that's evaluated to a non empty/blank string at runtime. Type: string (or
Expression with resultType string). Required.
:vartype error_code: JSON
"""
_validation = {
"name": {"required": True},
"type": {"required": True},
"message": {"required": True},
"error_code": {"required": True},
}
_attribute_map = {
"additional_properties": {"key": "", "type": "{object}"},
"name": {"key": "name", "type": "str"},
"type": {"key": "type", "type": "str"},
"description": {"key": "description", "type": "str"},
"state": {"key": "state", "type": "str"},
"on_inactive_mark_as": {"key": "onInactiveMarkAs", "type": "str"},
"depends_on": {"key": "dependsOn", "type": "[ActivityDependency]"},
"user_properties": {"key": "userProperties", "type": "[UserProperty]"},
"message": {"key": "typeProperties.message", "type": "object"},
"error_code": {"key": "typeProperties.errorCode", "type": "object"},
}
def __init__(
self,
*,
name: str,
message: JSON,
error_code: JSON,
additional_properties: Optional[Dict[str, JSON]] = None,
description: Optional[str] = None,
state: Optional[Union[str, "_models.ActivityState"]] = None,
on_inactive_mark_as: Optional[Union[str, "_models.ActivityOnInactiveMarkAs"]] = None,
depends_on: Optional[List["_models.ActivityDependency"]] = None,
user_properties: Optional[List["_models.UserProperty"]] = None,
**kwargs: Any
) -> None:
"""
:keyword additional_properties: Unmatched properties from the message are deserialized to this
collection.
:paramtype additional_properties: dict[str, JSON]
:keyword name: Activity name. Required.
:paramtype name: str
:keyword description: Activity description.
:paramtype description: str
:keyword state: Activity state. This is an optional property and if not provided, the state
will be Active by default. Known values are: "Active" and "Inactive".
:paramtype state: str or ~azure.mgmt.datafactory.models.ActivityState
:keyword on_inactive_mark_as: Status result of the activity when the state is set to Inactive.
This is an optional property and if not provided when the activity is inactive, the status will
be Succeeded by default. Known values are: "Succeeded", "Failed", and "Skipped".
:paramtype on_inactive_mark_as: str or ~azure.mgmt.datafactory.models.ActivityOnInactiveMarkAs
:keyword depends_on: Activity depends on condition.
:paramtype depends_on: list[~azure.mgmt.datafactory.models.ActivityDependency]
:keyword user_properties: Activity user properties.
:paramtype user_properties: list[~azure.mgmt.datafactory.models.UserProperty]
:keyword message: The error message that surfaced in the Fail activity. It can be dynamic
content that's evaluated to a non empty/blank string at runtime. Type: string (or Expression
with resultType string). Required.
:paramtype message: JSON
:keyword error_code: The error code that categorizes the error type of the Fail activity. It
can be dynamic content that's evaluated to a non empty/blank string at runtime. Type: string
(or Expression with resultType string). Required.
:paramtype error_code: JSON
"""
super().__init__(
additional_properties=additional_properties,
name=name,
description=description,
state=state,
on_inactive_mark_as=on_inactive_mark_as,
depends_on=depends_on,
user_properties=user_properties,
**kwargs
)
self.type: str = "Fail"
self.message = message
self.error_code = error_code
|
class FailActivity(ControlActivity):
'''This activity will fail within its own scope and output a custom error message and error code.
The error message and code can provided either as a string literal or as an expression that can
be evaluated to a string at runtime. The activity scope can be the whole pipeline or a control
activity (e.g. foreach, switch, until), if the fail activity is contained in it.
All required parameters must be populated in order to send to server.
:ivar additional_properties: Unmatched properties from the message are deserialized to this
collection.
:vartype additional_properties: dict[str, JSON]
:ivar name: Activity name. Required.
:vartype name: str
:ivar type: Type of activity. Required.
:vartype type: str
:ivar description: Activity description.
:vartype description: str
:ivar state: Activity state. This is an optional property and if not provided, the state will
be Active by default. Known values are: "Active" and "Inactive".
:vartype state: str or ~azure.mgmt.datafactory.models.ActivityState
:ivar on_inactive_mark_as: Status result of the activity when the state is set to Inactive.
This is an optional property and if not provided when the activity is inactive, the status will
be Succeeded by default. Known values are: "Succeeded", "Failed", and "Skipped".
:vartype on_inactive_mark_as: str or ~azure.mgmt.datafactory.models.ActivityOnInactiveMarkAs
:ivar depends_on: Activity depends on condition.
:vartype depends_on: list[~azure.mgmt.datafactory.models.ActivityDependency]
:ivar user_properties: Activity user properties.
:vartype user_properties: list[~azure.mgmt.datafactory.models.UserProperty]
:ivar message: The error message that surfaced in the Fail activity. It can be dynamic content
that's evaluated to a non empty/blank string at runtime. Type: string (or Expression with
resultType string). Required.
:vartype message: JSON
:ivar error_code: The error code that categorizes the error type of the Fail activity. It can
be dynamic content that's evaluated to a non empty/blank string at runtime. Type: string (or
Expression with resultType string). Required.
:vartype error_code: JSON
'''
def __init__(
self,
*,
name: str,
message: JSON,
error_code: JSON,
additional_properties: Optional[Dict[str, JSON]] = None,
description: Optional[str] = None,
state: Optional[Union[str, "_models.ActivityState"]] = None,
on_inactive_mark_as: Optional[Union[str, "_models.ActivityOnInactiveMarkAs"]] = None,
depends_on: Optional[List["_models.ActivityDependency"]] = None,
user_properties: Optional[List["_models.UserProperty"]] = None,
**kwargs: Any
) -> None:
'''
:keyword additional_properties: Unmatched properties from the message are deserialized to this
collection.
:paramtype additional_properties: dict[str, JSON]
:keyword name: Activity name. Required.
:paramtype name: str
:keyword description: Activity description.
:paramtype description: str
:keyword state: Activity state. This is an optional property and if not provided, the state
will be Active by default. Known values are: "Active" and "Inactive".
:paramtype state: str or ~azure.mgmt.datafactory.models.ActivityState
:keyword on_inactive_mark_as: Status result of the activity when the state is set to Inactive.
This is an optional property and if not provided when the activity is inactive, the status will
be Succeeded by default. Known values are: "Succeeded", "Failed", and "Skipped".
:paramtype on_inactive_mark_as: str or ~azure.mgmt.datafactory.models.ActivityOnInactiveMarkAs
:keyword depends_on: Activity depends on condition.
:paramtype depends_on: list[~azure.mgmt.datafactory.models.ActivityDependency]
:keyword user_properties: Activity user properties.
:paramtype user_properties: list[~azure.mgmt.datafactory.models.UserProperty]
:keyword message: The error message that surfaced in the Fail activity. It can be dynamic
content that's evaluated to a non empty/blank string at runtime. Type: string (or Expression
with resultType string). Required.
:paramtype message: JSON
:keyword error_code: The error code that categorizes the error type of the Fail activity. It
can be dynamic content that's evaluated to a non empty/blank string at runtime. Type: string
(or Expression with resultType string). Required.
:paramtype error_code: JSON
'''
pass
| 2 | 2 | 55 | 0 | 27 | 28 | 1 | 1.35 | 1 | 3 | 0 | 0 | 1 | 3 | 1 | 18 | 113 | 5 | 46 | 20 | 31 | 62 | 8 | 7 | 6 | 1 | 4 | 0 | 1 |
10,841 |
Azure/azure-cli-extensions
|
src/datafactory/azext_datafactory/vendored_sdks/datafactory/models/_models_py3.py
|
azext_datafactory.vendored_sdks.datafactory.models._models_py3.FileServerLinkedService
|
class FileServerLinkedService(LinkedService):
"""File system linked service.
All required parameters must be populated in order to send to server.
:ivar additional_properties: Unmatched properties from the message are deserialized to this
collection.
:vartype additional_properties: dict[str, JSON]
:ivar type: Type of linked service. Required.
:vartype type: str
:ivar version: Version of the linked service.
:vartype version: str
:ivar connect_via: The integration runtime reference.
:vartype connect_via: ~azure.mgmt.datafactory.models.IntegrationRuntimeReference
:ivar description: Linked service description.
:vartype description: str
:ivar parameters: Parameters for linked service.
:vartype parameters: dict[str, ~azure.mgmt.datafactory.models.ParameterSpecification]
:ivar annotations: List of tags that can be used for describing the linked service.
:vartype annotations: list[JSON]
:ivar host: Host name of the server. Type: string (or Expression with resultType string).
Required.
:vartype host: JSON
:ivar user_id: User ID to logon the server. Type: string (or Expression with resultType
string).
:vartype user_id: JSON
:ivar password: Password to logon the server.
:vartype password: ~azure.mgmt.datafactory.models.SecretBase
:ivar encrypted_credential: The encrypted credential used for authentication. Credentials are
encrypted using the integration runtime credential manager. Type: string.
:vartype encrypted_credential: str
"""
_validation = {
"type": {"required": True},
"host": {"required": True},
}
_attribute_map = {
"additional_properties": {"key": "", "type": "{object}"},
"type": {"key": "type", "type": "str"},
"version": {"key": "version", "type": "str"},
"connect_via": {"key": "connectVia", "type": "IntegrationRuntimeReference"},
"description": {"key": "description", "type": "str"},
"parameters": {"key": "parameters", "type": "{ParameterSpecification}"},
"annotations": {"key": "annotations", "type": "[object]"},
"host": {"key": "typeProperties.host", "type": "object"},
"user_id": {"key": "typeProperties.userId", "type": "object"},
"password": {"key": "typeProperties.password", "type": "SecretBase"},
"encrypted_credential": {"key": "typeProperties.encryptedCredential", "type": "str"},
}
def __init__(
self,
*,
host: JSON,
additional_properties: Optional[Dict[str, JSON]] = None,
version: Optional[str] = None,
connect_via: Optional["_models.IntegrationRuntimeReference"] = None,
description: Optional[str] = None,
parameters: Optional[Dict[str, "_models.ParameterSpecification"]] = None,
annotations: Optional[List[JSON]] = None,
user_id: Optional[JSON] = None,
password: Optional["_models.SecretBase"] = None,
encrypted_credential: Optional[str] = None,
**kwargs: Any
) -> None:
"""
:keyword additional_properties: Unmatched properties from the message are deserialized to this
collection.
:paramtype additional_properties: dict[str, JSON]
:keyword version: Version of the linked service.
:paramtype version: str
:keyword connect_via: The integration runtime reference.
:paramtype connect_via: ~azure.mgmt.datafactory.models.IntegrationRuntimeReference
:keyword description: Linked service description.
:paramtype description: str
:keyword parameters: Parameters for linked service.
:paramtype parameters: dict[str, ~azure.mgmt.datafactory.models.ParameterSpecification]
:keyword annotations: List of tags that can be used for describing the linked service.
:paramtype annotations: list[JSON]
:keyword host: Host name of the server. Type: string (or Expression with resultType string).
Required.
:paramtype host: JSON
:keyword user_id: User ID to logon the server. Type: string (or Expression with resultType
string).
:paramtype user_id: JSON
:keyword password: Password to logon the server.
:paramtype password: ~azure.mgmt.datafactory.models.SecretBase
:keyword encrypted_credential: The encrypted credential used for authentication. Credentials
are encrypted using the integration runtime credential manager. Type: string.
:paramtype encrypted_credential: str
"""
super().__init__(
additional_properties=additional_properties,
version=version,
connect_via=connect_via,
description=description,
parameters=parameters,
annotations=annotations,
**kwargs
)
self.type: str = "FileServer"
self.host = host
self.user_id = user_id
self.password = password
self.encrypted_credential = encrypted_credential
|
class FileServerLinkedService(LinkedService):
'''File system linked service.
All required parameters must be populated in order to send to server.
:ivar additional_properties: Unmatched properties from the message are deserialized to this
collection.
:vartype additional_properties: dict[str, JSON]
:ivar type: Type of linked service. Required.
:vartype type: str
:ivar version: Version of the linked service.
:vartype version: str
:ivar connect_via: The integration runtime reference.
:vartype connect_via: ~azure.mgmt.datafactory.models.IntegrationRuntimeReference
:ivar description: Linked service description.
:vartype description: str
:ivar parameters: Parameters for linked service.
:vartype parameters: dict[str, ~azure.mgmt.datafactory.models.ParameterSpecification]
:ivar annotations: List of tags that can be used for describing the linked service.
:vartype annotations: list[JSON]
:ivar host: Host name of the server. Type: string (or Expression with resultType string).
Required.
:vartype host: JSON
:ivar user_id: User ID to logon the server. Type: string (or Expression with resultType
string).
:vartype user_id: JSON
:ivar password: Password to logon the server.
:vartype password: ~azure.mgmt.datafactory.models.SecretBase
:ivar encrypted_credential: The encrypted credential used for authentication. Credentials are
encrypted using the integration runtime credential manager. Type: string.
:vartype encrypted_credential: str
'''
def __init__(
self,
*,
host: JSON,
additional_properties: Optional[Dict[str, JSON]] = None,
version: Optional[str] = None,
connect_via: Optional["_models.IntegrationRuntimeReference"] = None,
description: Optional[str] = None,
parameters: Optional[Dict[str, "_models.ParameterSpecification"]] = None,
annotations: Optional[List[JSON]] = None,
user_id: Optional[JSON] = None,
password: Optional["_models.SecretBase"] = None,
encrypted_credential: Optional[str] = None,
**kwargs: Any
) -> None:
'''
:keyword additional_properties: Unmatched properties from the message are deserialized to this
collection.
:paramtype additional_properties: dict[str, JSON]
:keyword version: Version of the linked service.
:paramtype version: str
:keyword connect_via: The integration runtime reference.
:paramtype connect_via: ~azure.mgmt.datafactory.models.IntegrationRuntimeReference
:keyword description: Linked service description.
:paramtype description: str
:keyword parameters: Parameters for linked service.
:paramtype parameters: dict[str, ~azure.mgmt.datafactory.models.ParameterSpecification]
:keyword annotations: List of tags that can be used for describing the linked service.
:paramtype annotations: list[JSON]
:keyword host: Host name of the server. Type: string (or Expression with resultType string).
Required.
:paramtype host: JSON
:keyword user_id: User ID to logon the server. Type: string (or Expression with resultType
string).
:paramtype user_id: JSON
:keyword password: Password to logon the server.
:paramtype password: ~azure.mgmt.datafactory.models.SecretBase
:keyword encrypted_credential: The encrypted credential used for authentication. Credentials
are encrypted using the integration runtime credential manager. Type: string.
:paramtype encrypted_credential: str
'''
pass
| 2 | 2 | 55 | 0 | 29 | 26 | 1 | 1.17 | 1 | 3 | 0 | 0 | 1 | 5 | 1 | 17 | 107 | 5 | 47 | 23 | 31 | 55 | 10 | 9 | 8 | 1 | 3 | 0 | 1 |
10,842 |
Azure/azure-cli-extensions
|
src/datafactory/azext_datafactory/vendored_sdks/datafactory/models/_models_py3.py
|
azext_datafactory.vendored_sdks.datafactory.models._models_py3.FileServerLocation
|
class FileServerLocation(DatasetLocation):
"""The location of file server dataset.
All required parameters must be populated in order to send to server.
:ivar additional_properties: Unmatched properties from the message are deserialized to this
collection.
:vartype additional_properties: dict[str, JSON]
:ivar type: Type of dataset storage location. Required.
:vartype type: str
:ivar folder_path: Specify the folder path of dataset. Type: string (or Expression with
resultType string).
:vartype folder_path: JSON
:ivar file_name: Specify the file name of dataset. Type: string (or Expression with resultType
string).
:vartype file_name: JSON
"""
_validation = {
"type": {"required": True},
}
_attribute_map = {
"additional_properties": {"key": "", "type": "{object}"},
"type": {"key": "type", "type": "str"},
"folder_path": {"key": "folderPath", "type": "object"},
"file_name": {"key": "fileName", "type": "object"},
}
def __init__(
self,
*,
additional_properties: Optional[Dict[str, JSON]] = None,
folder_path: Optional[JSON] = None,
file_name: Optional[JSON] = None,
**kwargs: Any
) -> None:
"""
:keyword additional_properties: Unmatched properties from the message are deserialized to this
collection.
:paramtype additional_properties: dict[str, JSON]
:keyword folder_path: Specify the folder path of dataset. Type: string (or Expression with
resultType string).
:paramtype folder_path: JSON
:keyword file_name: Specify the file name of dataset. Type: string (or Expression with
resultType string).
:paramtype file_name: JSON
"""
super().__init__(
additional_properties=additional_properties, folder_path=folder_path, file_name=file_name, **kwargs
)
self.type: str = "FileServerLocation"
|
class FileServerLocation(DatasetLocation):
'''The location of file server dataset.
All required parameters must be populated in order to send to server.
:ivar additional_properties: Unmatched properties from the message are deserialized to this
collection.
:vartype additional_properties: dict[str, JSON]
:ivar type: Type of dataset storage location. Required.
:vartype type: str
:ivar folder_path: Specify the folder path of dataset. Type: string (or Expression with
resultType string).
:vartype folder_path: JSON
:ivar file_name: Specify the file name of dataset. Type: string (or Expression with resultType
string).
:vartype file_name: JSON
'''
def __init__(
self,
*,
additional_properties: Optional[Dict[str, JSON]] = None,
folder_path: Optional[JSON] = None,
file_name: Optional[JSON] = None,
**kwargs: Any
) -> None:
'''
:keyword additional_properties: Unmatched properties from the message are deserialized to this
collection.
:paramtype additional_properties: dict[str, JSON]
:keyword folder_path: Specify the folder path of dataset. Type: string (or Expression with
resultType string).
:paramtype folder_path: JSON
:keyword file_name: Specify the file name of dataset. Type: string (or Expression with
resultType string).
:paramtype file_name: JSON
'''
pass
| 2 | 2 | 23 | 0 | 12 | 11 | 1 | 1.14 | 1 | 3 | 0 | 0 | 1 | 1 | 1 | 17 | 52 | 5 | 22 | 12 | 13 | 25 | 6 | 5 | 4 | 1 | 3 | 0 | 1 |
10,843 |
Azure/azure-cli-extensions
|
src/datafactory/azext_datafactory/vendored_sdks/datafactory/models/_models_py3.py
|
azext_datafactory.vendored_sdks.datafactory.models._models_py3.FileServerReadSettings
|
class FileServerReadSettings(StoreReadSettings):
"""File server read settings.
All required parameters must be populated in order to send to server.
:ivar additional_properties: Unmatched properties from the message are deserialized to this
collection.
:vartype additional_properties: dict[str, JSON]
:ivar type: The read setting type. Required.
:vartype type: str
:ivar max_concurrent_connections: The maximum concurrent connection count for the source data
store. Type: integer (or Expression with resultType integer).
:vartype max_concurrent_connections: JSON
:ivar disable_metrics_collection: If true, disable data store metrics collection. Default is
false. Type: boolean (or Expression with resultType boolean).
:vartype disable_metrics_collection: JSON
:ivar recursive: If true, files under the folder path will be read recursively. Default is
true. Type: boolean (or Expression with resultType boolean).
:vartype recursive: JSON
:ivar wildcard_folder_path: FileServer wildcardFolderPath. Type: string (or Expression with
resultType string).
:vartype wildcard_folder_path: JSON
:ivar wildcard_file_name: FileServer wildcardFileName. Type: string (or Expression with
resultType string).
:vartype wildcard_file_name: JSON
:ivar file_list_path: Point to a text file that lists each file (relative path to the path
configured in the dataset) that you want to copy. Type: string (or Expression with resultType
string).
:vartype file_list_path: JSON
:ivar enable_partition_discovery: Indicates whether to enable partition discovery. Type:
boolean (or Expression with resultType boolean).
:vartype enable_partition_discovery: JSON
:ivar partition_root_path: Specify the root path where partition discovery starts from. Type:
string (or Expression with resultType string).
:vartype partition_root_path: JSON
:ivar delete_files_after_completion: Indicates whether the source files need to be deleted
after copy completion. Default is false. Type: boolean (or Expression with resultType boolean).
:vartype delete_files_after_completion: JSON
:ivar modified_datetime_start: The start of file's modified datetime. Type: string (or
Expression with resultType string).
:vartype modified_datetime_start: JSON
:ivar modified_datetime_end: The end of file's modified datetime. Type: string (or Expression
with resultType string).
:vartype modified_datetime_end: JSON
:ivar file_filter: Specify a filter to be used to select a subset of files in the folderPath
rather than all files. Type: string (or Expression with resultType string).
:vartype file_filter: JSON
"""
_validation = {
"type": {"required": True},
}
_attribute_map = {
"additional_properties": {"key": "", "type": "{object}"},
"type": {"key": "type", "type": "str"},
"max_concurrent_connections": {"key": "maxConcurrentConnections", "type": "object"},
"disable_metrics_collection": {"key": "disableMetricsCollection", "type": "object"},
"recursive": {"key": "recursive", "type": "object"},
"wildcard_folder_path": {"key": "wildcardFolderPath", "type": "object"},
"wildcard_file_name": {"key": "wildcardFileName", "type": "object"},
"file_list_path": {"key": "fileListPath", "type": "object"},
"enable_partition_discovery": {"key": "enablePartitionDiscovery", "type": "object"},
"partition_root_path": {"key": "partitionRootPath", "type": "object"},
"delete_files_after_completion": {"key": "deleteFilesAfterCompletion", "type": "object"},
"modified_datetime_start": {"key": "modifiedDatetimeStart", "type": "object"},
"modified_datetime_end": {"key": "modifiedDatetimeEnd", "type": "object"},
"file_filter": {"key": "fileFilter", "type": "object"},
}
def __init__(
self,
*,
additional_properties: Optional[Dict[str, JSON]] = None,
max_concurrent_connections: Optional[JSON] = None,
disable_metrics_collection: Optional[JSON] = None,
recursive: Optional[JSON] = None,
wildcard_folder_path: Optional[JSON] = None,
wildcard_file_name: Optional[JSON] = None,
file_list_path: Optional[JSON] = None,
enable_partition_discovery: Optional[JSON] = None,
partition_root_path: Optional[JSON] = None,
delete_files_after_completion: Optional[JSON] = None,
modified_datetime_start: Optional[JSON] = None,
modified_datetime_end: Optional[JSON] = None,
file_filter: Optional[JSON] = None,
**kwargs: Any
) -> None:
"""
:keyword additional_properties: Unmatched properties from the message are deserialized to this
collection.
:paramtype additional_properties: dict[str, JSON]
:keyword max_concurrent_connections: The maximum concurrent connection count for the source
data store. Type: integer (or Expression with resultType integer).
:paramtype max_concurrent_connections: JSON
:keyword disable_metrics_collection: If true, disable data store metrics collection. Default is
false. Type: boolean (or Expression with resultType boolean).
:paramtype disable_metrics_collection: JSON
:keyword recursive: If true, files under the folder path will be read recursively. Default is
true. Type: boolean (or Expression with resultType boolean).
:paramtype recursive: JSON
:keyword wildcard_folder_path: FileServer wildcardFolderPath. Type: string (or Expression with
resultType string).
:paramtype wildcard_folder_path: JSON
:keyword wildcard_file_name: FileServer wildcardFileName. Type: string (or Expression with
resultType string).
:paramtype wildcard_file_name: JSON
:keyword file_list_path: Point to a text file that lists each file (relative path to the path
configured in the dataset) that you want to copy. Type: string (or Expression with resultType
string).
:paramtype file_list_path: JSON
:keyword enable_partition_discovery: Indicates whether to enable partition discovery. Type:
boolean (or Expression with resultType boolean).
:paramtype enable_partition_discovery: JSON
:keyword partition_root_path: Specify the root path where partition discovery starts from.
Type: string (or Expression with resultType string).
:paramtype partition_root_path: JSON
:keyword delete_files_after_completion: Indicates whether the source files need to be deleted
after copy completion. Default is false. Type: boolean (or Expression with resultType boolean).
:paramtype delete_files_after_completion: JSON
:keyword modified_datetime_start: The start of file's modified datetime. Type: string (or
Expression with resultType string).
:paramtype modified_datetime_start: JSON
:keyword modified_datetime_end: The end of file's modified datetime. Type: string (or
Expression with resultType string).
:paramtype modified_datetime_end: JSON
:keyword file_filter: Specify a filter to be used to select a subset of files in the folderPath
rather than all files. Type: string (or Expression with resultType string).
:paramtype file_filter: JSON
"""
super().__init__(
additional_properties=additional_properties,
max_concurrent_connections=max_concurrent_connections,
disable_metrics_collection=disable_metrics_collection,
**kwargs
)
self.type: str = "FileServerReadSettings"
self.recursive = recursive
self.wildcard_folder_path = wildcard_folder_path
self.wildcard_file_name = wildcard_file_name
self.file_list_path = file_list_path
self.enable_partition_discovery = enable_partition_discovery
self.partition_root_path = partition_root_path
self.delete_files_after_completion = delete_files_after_completion
self.modified_datetime_start = modified_datetime_start
self.modified_datetime_end = modified_datetime_end
self.file_filter = file_filter
|
class FileServerReadSettings(StoreReadSettings):
'''File server read settings.
All required parameters must be populated in order to send to server.
:ivar additional_properties: Unmatched properties from the message are deserialized to this
collection.
:vartype additional_properties: dict[str, JSON]
:ivar type: The read setting type. Required.
:vartype type: str
:ivar max_concurrent_connections: The maximum concurrent connection count for the source data
store. Type: integer (or Expression with resultType integer).
:vartype max_concurrent_connections: JSON
:ivar disable_metrics_collection: If true, disable data store metrics collection. Default is
false. Type: boolean (or Expression with resultType boolean).
:vartype disable_metrics_collection: JSON
:ivar recursive: If true, files under the folder path will be read recursively. Default is
true. Type: boolean (or Expression with resultType boolean).
:vartype recursive: JSON
:ivar wildcard_folder_path: FileServer wildcardFolderPath. Type: string (or Expression with
resultType string).
:vartype wildcard_folder_path: JSON
:ivar wildcard_file_name: FileServer wildcardFileName. Type: string (or Expression with
resultType string).
:vartype wildcard_file_name: JSON
:ivar file_list_path: Point to a text file that lists each file (relative path to the path
configured in the dataset) that you want to copy. Type: string (or Expression with resultType
string).
:vartype file_list_path: JSON
:ivar enable_partition_discovery: Indicates whether to enable partition discovery. Type:
boolean (or Expression with resultType boolean).
:vartype enable_partition_discovery: JSON
:ivar partition_root_path: Specify the root path where partition discovery starts from. Type:
string (or Expression with resultType string).
:vartype partition_root_path: JSON
:ivar delete_files_after_completion: Indicates whether the source files need to be deleted
after copy completion. Default is false. Type: boolean (or Expression with resultType boolean).
:vartype delete_files_after_completion: JSON
:ivar modified_datetime_start: The start of file's modified datetime. Type: string (or
Expression with resultType string).
:vartype modified_datetime_start: JSON
:ivar modified_datetime_end: The end of file's modified datetime. Type: string (or Expression
with resultType string).
:vartype modified_datetime_end: JSON
:ivar file_filter: Specify a filter to be used to select a subset of files in the folderPath
rather than all files. Type: string (or Expression with resultType string).
:vartype file_filter: JSON
'''
def __init__(
self,
*,
additional_properties: Optional[Dict[str, JSON]] = None,
max_concurrent_connections: Optional[JSON] = None,
disable_metrics_collection: Optional[JSON] = None,
recursive: Optional[JSON] = None,
wildcard_folder_path: Optional[JSON] = None,
wildcard_file_name: Optional[JSON] = None,
file_list_path: Optional[JSON] = None,
enable_partition_discovery: Optional[JSON] = None,
partition_root_path: Optional[JSON] = None,
delete_files_after_completion: Optional[JSON] = None,
modified_datetime_start: Optional[JSON] = None,
modified_datetime_end: Optional[JSON] = None,
file_filter: Optional[JSON] = None,
**kwargs: Any
) -> None:
'''
:keyword additional_properties: Unmatched properties from the message are deserialized to this
collection.
:paramtype additional_properties: dict[str, JSON]
:keyword max_concurrent_connections: The maximum concurrent connection count for the source
data store. Type: integer (or Expression with resultType integer).
:paramtype max_concurrent_connections: JSON
:keyword disable_metrics_collection: If true, disable data store metrics collection. Default is
false. Type: boolean (or Expression with resultType boolean).
:paramtype disable_metrics_collection: JSON
:keyword recursive: If true, files under the folder path will be read recursively. Default is
true. Type: boolean (or Expression with resultType boolean).
:paramtype recursive: JSON
:keyword wildcard_folder_path: FileServer wildcardFolderPath. Type: string (or Expression with
resultType string).
:paramtype wildcard_folder_path: JSON
:keyword wildcard_file_name: FileServer wildcardFileName. Type: string (or Expression with
resultType string).
:paramtype wildcard_file_name: JSON
:keyword file_list_path: Point to a text file that lists each file (relative path to the path
configured in the dataset) that you want to copy. Type: string (or Expression with resultType
string).
:paramtype file_list_path: JSON
:keyword enable_partition_discovery: Indicates whether to enable partition discovery. Type:
boolean (or Expression with resultType boolean).
:paramtype enable_partition_discovery: JSON
:keyword partition_root_path: Specify the root path where partition discovery starts from.
Type: string (or Expression with resultType string).
:paramtype partition_root_path: JSON
:keyword delete_files_after_completion: Indicates whether the source files need to be deleted
after copy completion. Default is false. Type: boolean (or Expression with resultType boolean).
:paramtype delete_files_after_completion: JSON
:keyword modified_datetime_start: The start of file's modified datetime. Type: string (or
Expression with resultType string).
:paramtype modified_datetime_start: JSON
:keyword modified_datetime_end: The end of file's modified datetime. Type: string (or
Expression with resultType string).
:paramtype modified_datetime_end: JSON
:keyword file_filter: Specify a filter to be used to select a subset of files in the folderPath
rather than all files. Type: string (or Expression with resultType string).
:paramtype file_filter: JSON
'''
pass
| 2 | 2 | 77 | 0 | 35 | 42 | 1 | 1.58 | 1 | 3 | 0 | 0 | 1 | 11 | 1 | 17 | 147 | 5 | 55 | 32 | 36 | 87 | 16 | 15 | 14 | 1 | 3 | 0 | 1 |
10,844 |
Azure/azure-cli-extensions
|
src/datafactory/azext_datafactory/vendored_sdks/datafactory/models/_models_py3.py
|
azext_datafactory.vendored_sdks.datafactory.models._models_py3.FileServerWriteSettings
|
class FileServerWriteSettings(StoreWriteSettings):
"""File server write settings.
All required parameters must be populated in order to send to server.
:ivar additional_properties: Unmatched properties from the message are deserialized to this
collection.
:vartype additional_properties: dict[str, JSON]
:ivar type: The write setting type. Required.
:vartype type: str
:ivar max_concurrent_connections: The maximum concurrent connection count for the source data
store. Type: integer (or Expression with resultType integer).
:vartype max_concurrent_connections: JSON
:ivar disable_metrics_collection: If true, disable data store metrics collection. Default is
false. Type: boolean (or Expression with resultType boolean).
:vartype disable_metrics_collection: JSON
:ivar copy_behavior: The type of copy behavior for copy sink.
:vartype copy_behavior: JSON
:ivar metadata: Specify the custom metadata to be added to sink data. Type: array of objects
(or Expression with resultType array of objects).
:vartype metadata: list[~azure.mgmt.datafactory.models.MetadataItem]
"""
_validation = {
"type": {"required": True},
}
_attribute_map = {
"additional_properties": {"key": "", "type": "{object}"},
"type": {"key": "type", "type": "str"},
"max_concurrent_connections": {"key": "maxConcurrentConnections", "type": "object"},
"disable_metrics_collection": {"key": "disableMetricsCollection", "type": "object"},
"copy_behavior": {"key": "copyBehavior", "type": "object"},
"metadata": {"key": "metadata", "type": "[MetadataItem]"},
}
def __init__(
self,
*,
additional_properties: Optional[Dict[str, JSON]] = None,
max_concurrent_connections: Optional[JSON] = None,
disable_metrics_collection: Optional[JSON] = None,
copy_behavior: Optional[JSON] = None,
metadata: Optional[List["_models.MetadataItem"]] = None,
**kwargs: Any
) -> None:
"""
:keyword additional_properties: Unmatched properties from the message are deserialized to this
collection.
:paramtype additional_properties: dict[str, JSON]
:keyword max_concurrent_connections: The maximum concurrent connection count for the source
data store. Type: integer (or Expression with resultType integer).
:paramtype max_concurrent_connections: JSON
:keyword disable_metrics_collection: If true, disable data store metrics collection. Default is
false. Type: boolean (or Expression with resultType boolean).
:paramtype disable_metrics_collection: JSON
:keyword copy_behavior: The type of copy behavior for copy sink.
:paramtype copy_behavior: JSON
:keyword metadata: Specify the custom metadata to be added to sink data. Type: array of objects
(or Expression with resultType array of objects).
:paramtype metadata: list[~azure.mgmt.datafactory.models.MetadataItem]
"""
super().__init__(
additional_properties=additional_properties,
max_concurrent_connections=max_concurrent_connections,
disable_metrics_collection=disable_metrics_collection,
copy_behavior=copy_behavior,
metadata=metadata,
**kwargs
)
self.type: str = "FileServerWriteSettings"
|
class FileServerWriteSettings(StoreWriteSettings):
'''File server write settings.
All required parameters must be populated in order to send to server.
:ivar additional_properties: Unmatched properties from the message are deserialized to this
collection.
:vartype additional_properties: dict[str, JSON]
:ivar type: The write setting type. Required.
:vartype type: str
:ivar max_concurrent_connections: The maximum concurrent connection count for the source data
store. Type: integer (or Expression with resultType integer).
:vartype max_concurrent_connections: JSON
:ivar disable_metrics_collection: If true, disable data store metrics collection. Default is
false. Type: boolean (or Expression with resultType boolean).
:vartype disable_metrics_collection: JSON
:ivar copy_behavior: The type of copy behavior for copy sink.
:vartype copy_behavior: JSON
:ivar metadata: Specify the custom metadata to be added to sink data. Type: array of objects
(or Expression with resultType array of objects).
:vartype metadata: list[~azure.mgmt.datafactory.models.MetadataItem]
'''
def __init__(
self,
*,
additional_properties: Optional[Dict[str, JSON]] = None,
max_concurrent_connections: Optional[JSON] = None,
disable_metrics_collection: Optional[JSON] = None,
copy_behavior: Optional[JSON] = None,
metadata: Optional[List["_models.MetadataItem"]] = None,
**kwargs: Any
) -> None:
'''
:keyword additional_properties: Unmatched properties from the message are deserialized to this
collection.
:paramtype additional_properties: dict[str, JSON]
:keyword max_concurrent_connections: The maximum concurrent connection count for the source
data store. Type: integer (or Expression with resultType integer).
:paramtype max_concurrent_connections: JSON
:keyword disable_metrics_collection: If true, disable data store metrics collection. Default is
false. Type: boolean (or Expression with resultType boolean).
:paramtype disable_metrics_collection: JSON
:keyword copy_behavior: The type of copy behavior for copy sink.
:paramtype copy_behavior: JSON
:keyword metadata: Specify the custom metadata to be added to sink data. Type: array of objects
(or Expression with resultType array of objects).
:paramtype metadata: list[~azure.mgmt.datafactory.models.MetadataItem]
'''
pass
| 2 | 2 | 35 | 0 | 19 | 16 | 1 | 1.13 | 1 | 3 | 0 | 0 | 1 | 1 | 1 | 17 | 71 | 5 | 31 | 14 | 20 | 35 | 6 | 5 | 4 | 1 | 3 | 0 | 1 |
10,845 |
Azure/azure-cli-extensions
|
src/datafactory/azext_datafactory/vendored_sdks/datafactory/models/_models_py3.py
|
azext_datafactory.vendored_sdks.datafactory.models._models_py3.FileShareDataset
|
class FileShareDataset(Dataset):
"""An on-premises file system dataset.
All required parameters must be populated in order to send to server.
:ivar additional_properties: Unmatched properties from the message are deserialized to this
collection.
:vartype additional_properties: dict[str, JSON]
:ivar type: Type of dataset. Required.
:vartype type: str
:ivar description: Dataset description.
:vartype description: str
:ivar structure: Columns that define the structure of the dataset. Type: array (or Expression
with resultType array), itemType: DatasetDataElement.
:vartype structure: JSON
:ivar schema: Columns that define the physical type schema of the dataset. Type: array (or
Expression with resultType array), itemType: DatasetSchemaDataElement.
:vartype schema: JSON
:ivar linked_service_name: Linked service reference. Required.
:vartype linked_service_name: ~azure.mgmt.datafactory.models.LinkedServiceReference
:ivar parameters: Parameters for dataset.
:vartype parameters: dict[str, ~azure.mgmt.datafactory.models.ParameterSpecification]
:ivar annotations: List of tags that can be used for describing the Dataset.
:vartype annotations: list[JSON]
:ivar folder: The folder that this Dataset is in. If not specified, Dataset will appear at the
root level.
:vartype folder: ~azure.mgmt.datafactory.models.DatasetFolder
:ivar folder_path: The path of the on-premises file system. Type: string (or Expression with
resultType string).
:vartype folder_path: JSON
:ivar file_name: The name of the on-premises file system. Type: string (or Expression with
resultType string).
:vartype file_name: JSON
:ivar modified_datetime_start: The start of file's modified datetime. Type: string (or
Expression with resultType string).
:vartype modified_datetime_start: JSON
:ivar modified_datetime_end: The end of file's modified datetime. Type: string (or Expression
with resultType string).
:vartype modified_datetime_end: JSON
:ivar format: The format of the files.
:vartype format: ~azure.mgmt.datafactory.models.DatasetStorageFormat
:ivar file_filter: Specify a filter to be used to select a subset of files in the folderPath
rather than all files. Type: string (or Expression with resultType string).
:vartype file_filter: JSON
:ivar compression: The data compression method used for the file system.
:vartype compression: ~azure.mgmt.datafactory.models.DatasetCompression
"""
_validation = {
"type": {"required": True},
"linked_service_name": {"required": True},
}
_attribute_map = {
"additional_properties": {"key": "", "type": "{object}"},
"type": {"key": "type", "type": "str"},
"description": {"key": "description", "type": "str"},
"structure": {"key": "structure", "type": "object"},
"schema": {"key": "schema", "type": "object"},
"linked_service_name": {"key": "linkedServiceName", "type": "LinkedServiceReference"},
"parameters": {"key": "parameters", "type": "{ParameterSpecification}"},
"annotations": {"key": "annotations", "type": "[object]"},
"folder": {"key": "folder", "type": "DatasetFolder"},
"folder_path": {"key": "typeProperties.folderPath", "type": "object"},
"file_name": {"key": "typeProperties.fileName", "type": "object"},
"modified_datetime_start": {"key": "typeProperties.modifiedDatetimeStart", "type": "object"},
"modified_datetime_end": {"key": "typeProperties.modifiedDatetimeEnd", "type": "object"},
"format": {"key": "typeProperties.format", "type": "DatasetStorageFormat"},
"file_filter": {"key": "typeProperties.fileFilter", "type": "object"},
"compression": {"key": "typeProperties.compression", "type": "DatasetCompression"},
}
def __init__(
self,
*,
linked_service_name: "_models.LinkedServiceReference",
additional_properties: Optional[Dict[str, JSON]] = None,
description: Optional[str] = None,
structure: Optional[JSON] = None,
schema: Optional[JSON] = None,
parameters: Optional[Dict[str, "_models.ParameterSpecification"]] = None,
annotations: Optional[List[JSON]] = None,
folder: Optional["_models.DatasetFolder"] = None,
folder_path: Optional[JSON] = None,
file_name: Optional[JSON] = None,
modified_datetime_start: Optional[JSON] = None,
modified_datetime_end: Optional[JSON] = None,
format: Optional["_models.DatasetStorageFormat"] = None,
file_filter: Optional[JSON] = None,
compression: Optional["_models.DatasetCompression"] = None,
**kwargs: Any
) -> None:
"""
:keyword additional_properties: Unmatched properties from the message are deserialized to this
collection.
:paramtype additional_properties: dict[str, JSON]
:keyword description: Dataset description.
:paramtype description: str
:keyword structure: Columns that define the structure of the dataset. Type: array (or
Expression with resultType array), itemType: DatasetDataElement.
:paramtype structure: JSON
:keyword schema: Columns that define the physical type schema of the dataset. Type: array (or
Expression with resultType array), itemType: DatasetSchemaDataElement.
:paramtype schema: JSON
:keyword linked_service_name: Linked service reference. Required.
:paramtype linked_service_name: ~azure.mgmt.datafactory.models.LinkedServiceReference
:keyword parameters: Parameters for dataset.
:paramtype parameters: dict[str, ~azure.mgmt.datafactory.models.ParameterSpecification]
:keyword annotations: List of tags that can be used for describing the Dataset.
:paramtype annotations: list[JSON]
:keyword folder: The folder that this Dataset is in. If not specified, Dataset will appear at
the root level.
:paramtype folder: ~azure.mgmt.datafactory.models.DatasetFolder
:keyword folder_path: The path of the on-premises file system. Type: string (or Expression with
resultType string).
:paramtype folder_path: JSON
:keyword file_name: The name of the on-premises file system. Type: string (or Expression with
resultType string).
:paramtype file_name: JSON
:keyword modified_datetime_start: The start of file's modified datetime. Type: string (or
Expression with resultType string).
:paramtype modified_datetime_start: JSON
:keyword modified_datetime_end: The end of file's modified datetime. Type: string (or
Expression with resultType string).
:paramtype modified_datetime_end: JSON
:keyword format: The format of the files.
:paramtype format: ~azure.mgmt.datafactory.models.DatasetStorageFormat
:keyword file_filter: Specify a filter to be used to select a subset of files in the folderPath
rather than all files. Type: string (or Expression with resultType string).
:paramtype file_filter: JSON
:keyword compression: The data compression method used for the file system.
:paramtype compression: ~azure.mgmt.datafactory.models.DatasetCompression
"""
super().__init__(
additional_properties=additional_properties,
description=description,
structure=structure,
schema=schema,
linked_service_name=linked_service_name,
parameters=parameters,
annotations=annotations,
folder=folder,
**kwargs
)
self.type: str = "FileShare"
self.folder_path = folder_path
self.file_name = file_name
self.modified_datetime_start = modified_datetime_start
self.modified_datetime_end = modified_datetime_end
self.format = format
self.file_filter = file_filter
self.compression = compression
|
class FileShareDataset(Dataset):
'''An on-premises file system dataset.
All required parameters must be populated in order to send to server.
:ivar additional_properties: Unmatched properties from the message are deserialized to this
collection.
:vartype additional_properties: dict[str, JSON]
:ivar type: Type of dataset. Required.
:vartype type: str
:ivar description: Dataset description.
:vartype description: str
:ivar structure: Columns that define the structure of the dataset. Type: array (or Expression
with resultType array), itemType: DatasetDataElement.
:vartype structure: JSON
:ivar schema: Columns that define the physical type schema of the dataset. Type: array (or
Expression with resultType array), itemType: DatasetSchemaDataElement.
:vartype schema: JSON
:ivar linked_service_name: Linked service reference. Required.
:vartype linked_service_name: ~azure.mgmt.datafactory.models.LinkedServiceReference
:ivar parameters: Parameters for dataset.
:vartype parameters: dict[str, ~azure.mgmt.datafactory.models.ParameterSpecification]
:ivar annotations: List of tags that can be used for describing the Dataset.
:vartype annotations: list[JSON]
:ivar folder: The folder that this Dataset is in. If not specified, Dataset will appear at the
root level.
:vartype folder: ~azure.mgmt.datafactory.models.DatasetFolder
:ivar folder_path: The path of the on-premises file system. Type: string (or Expression with
resultType string).
:vartype folder_path: JSON
:ivar file_name: The name of the on-premises file system. Type: string (or Expression with
resultType string).
:vartype file_name: JSON
:ivar modified_datetime_start: The start of file's modified datetime. Type: string (or
Expression with resultType string).
:vartype modified_datetime_start: JSON
:ivar modified_datetime_end: The end of file's modified datetime. Type: string (or Expression
with resultType string).
:vartype modified_datetime_end: JSON
:ivar format: The format of the files.
:vartype format: ~azure.mgmt.datafactory.models.DatasetStorageFormat
:ivar file_filter: Specify a filter to be used to select a subset of files in the folderPath
rather than all files. Type: string (or Expression with resultType string).
:vartype file_filter: JSON
:ivar compression: The data compression method used for the file system.
:vartype compression: ~azure.mgmt.datafactory.models.DatasetCompression
'''
def __init__(
self,
*,
linked_service_name: "_models.LinkedServiceReference",
additional_properties: Optional[Dict[str, JSON]] = None,
description: Optional[str] = None,
structure: Optional[JSON] = None,
schema: Optional[JSON] = None,
parameters: Optional[Dict[str, "_models.ParameterSpecification"]] = None,
annotations: Optional[List[JSON]] = None,
folder: Optional["_models.DatasetFolder"] = None,
folder_path: Optional[JSON] = None,
file_name: Optional[JSON] = None,
modified_datetime_start: Optional[JSON] = None,
modified_datetime_end: Optional[JSON] = None,
format: Optional["_models.DatasetStorageFormat"] = None,
file_filter: Optional[JSON] = None,
compression: Optional["_models.DatasetCompression"] = None,
**kwargs: Any
) -> None:
'''
:keyword additional_properties: Unmatched properties from the message are deserialized to this
collection.
:paramtype additional_properties: dict[str, JSON]
:keyword description: Dataset description.
:paramtype description: str
:keyword structure: Columns that define the structure of the dataset. Type: array (or
Expression with resultType array), itemType: DatasetDataElement.
:paramtype structure: JSON
:keyword schema: Columns that define the physical type schema of the dataset. Type: array (or
Expression with resultType array), itemType: DatasetSchemaDataElement.
:paramtype schema: JSON
:keyword linked_service_name: Linked service reference. Required.
:paramtype linked_service_name: ~azure.mgmt.datafactory.models.LinkedServiceReference
:keyword parameters: Parameters for dataset.
:paramtype parameters: dict[str, ~azure.mgmt.datafactory.models.ParameterSpecification]
:keyword annotations: List of tags that can be used for describing the Dataset.
:paramtype annotations: list[JSON]
:keyword folder: The folder that this Dataset is in. If not specified, Dataset will appear at
the root level.
:paramtype folder: ~azure.mgmt.datafactory.models.DatasetFolder
:keyword folder_path: The path of the on-premises file system. Type: string (or Expression with
resultType string).
:paramtype folder_path: JSON
:keyword file_name: The name of the on-premises file system. Type: string (or Expression with
resultType string).
:paramtype file_name: JSON
:keyword modified_datetime_start: The start of file's modified datetime. Type: string (or
Expression with resultType string).
:paramtype modified_datetime_start: JSON
:keyword modified_datetime_end: The end of file's modified datetime. Type: string (or
Expression with resultType string).
:paramtype modified_datetime_end: JSON
:keyword format: The format of the files.
:paramtype format: ~azure.mgmt.datafactory.models.DatasetStorageFormat
:keyword file_filter: Specify a filter to be used to select a subset of files in the folderPath
rather than all files. Type: string (or Expression with resultType string).
:paramtype file_filter: JSON
:keyword compression: The data compression method used for the file system.
:paramtype compression: ~azure.mgmt.datafactory.models.DatasetCompression
'''
pass
| 2 | 2 | 80 | 0 | 39 | 41 | 1 | 1.37 | 1 | 3 | 0 | 0 | 1 | 8 | 1 | 17 | 152 | 5 | 62 | 31 | 41 | 85 | 13 | 12 | 11 | 1 | 3 | 0 | 1 |
10,846 |
Azure/azure-cli-extensions
|
src/datafactory/azext_datafactory/vendored_sdks/datafactory/models/_models_py3.py
|
azext_datafactory.vendored_sdks.datafactory.models._models_py3.FileSystemSink
|
class FileSystemSink(CopySink):
"""A copy activity file system sink.
All required parameters must be populated in order to send to server.
:ivar additional_properties: Unmatched properties from the message are deserialized to this
collection.
:vartype additional_properties: dict[str, JSON]
:ivar type: Copy sink type. Required.
:vartype type: str
:ivar write_batch_size: Write batch size. Type: integer (or Expression with resultType
integer), minimum: 0.
:vartype write_batch_size: JSON
:ivar write_batch_timeout: Write batch timeout. Type: string (or Expression with resultType
string), pattern: ((\\d+).)?(\\d\\d):(60|([0-5][0-9])):(60|([0-5][0-9])).
:vartype write_batch_timeout: JSON
:ivar sink_retry_count: Sink retry count. Type: integer (or Expression with resultType
integer).
:vartype sink_retry_count: JSON
:ivar sink_retry_wait: Sink retry wait. Type: string (or Expression with resultType string),
pattern: ((\\d+).)?(\\d\\d):(60|([0-5][0-9])):(60|([0-5][0-9])).
:vartype sink_retry_wait: JSON
:ivar max_concurrent_connections: The maximum concurrent connection count for the sink data
store. Type: integer (or Expression with resultType integer).
:vartype max_concurrent_connections: JSON
:ivar disable_metrics_collection: If true, disable data store metrics collection. Default is
false. Type: boolean (or Expression with resultType boolean).
:vartype disable_metrics_collection: JSON
:ivar copy_behavior: The type of copy behavior for copy sink.
:vartype copy_behavior: JSON
"""
_validation = {
"type": {"required": True},
}
_attribute_map = {
"additional_properties": {"key": "", "type": "{object}"},
"type": {"key": "type", "type": "str"},
"write_batch_size": {"key": "writeBatchSize", "type": "object"},
"write_batch_timeout": {"key": "writeBatchTimeout", "type": "object"},
"sink_retry_count": {"key": "sinkRetryCount", "type": "object"},
"sink_retry_wait": {"key": "sinkRetryWait", "type": "object"},
"max_concurrent_connections": {"key": "maxConcurrentConnections", "type": "object"},
"disable_metrics_collection": {"key": "disableMetricsCollection", "type": "object"},
"copy_behavior": {"key": "copyBehavior", "type": "object"},
}
def __init__(
self,
*,
additional_properties: Optional[Dict[str, JSON]] = None,
write_batch_size: Optional[JSON] = None,
write_batch_timeout: Optional[JSON] = None,
sink_retry_count: Optional[JSON] = None,
sink_retry_wait: Optional[JSON] = None,
max_concurrent_connections: Optional[JSON] = None,
disable_metrics_collection: Optional[JSON] = None,
copy_behavior: Optional[JSON] = None,
**kwargs: Any
) -> None:
"""
:keyword additional_properties: Unmatched properties from the message are deserialized to this
collection.
:paramtype additional_properties: dict[str, JSON]
:keyword write_batch_size: Write batch size. Type: integer (or Expression with resultType
integer), minimum: 0.
:paramtype write_batch_size: JSON
:keyword write_batch_timeout: Write batch timeout. Type: string (or Expression with resultType
string), pattern: ((\\d+).)?(\\d\\d):(60|([0-5][0-9])):(60|([0-5][0-9])).
:paramtype write_batch_timeout: JSON
:keyword sink_retry_count: Sink retry count. Type: integer (or Expression with resultType
integer).
:paramtype sink_retry_count: JSON
:keyword sink_retry_wait: Sink retry wait. Type: string (or Expression with resultType string),
pattern: ((\\d+).)?(\\d\\d):(60|([0-5][0-9])):(60|([0-5][0-9])).
:paramtype sink_retry_wait: JSON
:keyword max_concurrent_connections: The maximum concurrent connection count for the sink data
store. Type: integer (or Expression with resultType integer).
:paramtype max_concurrent_connections: JSON
:keyword disable_metrics_collection: If true, disable data store metrics collection. Default is
false. Type: boolean (or Expression with resultType boolean).
:paramtype disable_metrics_collection: JSON
:keyword copy_behavior: The type of copy behavior for copy sink.
:paramtype copy_behavior: JSON
"""
super().__init__(
additional_properties=additional_properties,
write_batch_size=write_batch_size,
write_batch_timeout=write_batch_timeout,
sink_retry_count=sink_retry_count,
sink_retry_wait=sink_retry_wait,
max_concurrent_connections=max_concurrent_connections,
disable_metrics_collection=disable_metrics_collection,
**kwargs
)
self.type: str = "FileSystemSink"
self.copy_behavior = copy_behavior
|
class FileSystemSink(CopySink):
'''A copy activity file system sink.
All required parameters must be populated in order to send to server.
:ivar additional_properties: Unmatched properties from the message are deserialized to this
collection.
:vartype additional_properties: dict[str, JSON]
:ivar type: Copy sink type. Required.
:vartype type: str
:ivar write_batch_size: Write batch size. Type: integer (or Expression with resultType
integer), minimum: 0.
:vartype write_batch_size: JSON
:ivar write_batch_timeout: Write batch timeout. Type: string (or Expression with resultType
string), pattern: ((\d+).)?(\d\d):(60|([0-5][0-9])):(60|([0-5][0-9])).
:vartype write_batch_timeout: JSON
:ivar sink_retry_count: Sink retry count. Type: integer (or Expression with resultType
integer).
:vartype sink_retry_count: JSON
:ivar sink_retry_wait: Sink retry wait. Type: string (or Expression with resultType string),
pattern: ((\d+).)?(\d\d):(60|([0-5][0-9])):(60|([0-5][0-9])).
:vartype sink_retry_wait: JSON
:ivar max_concurrent_connections: The maximum concurrent connection count for the sink data
store. Type: integer (or Expression with resultType integer).
:vartype max_concurrent_connections: JSON
:ivar disable_metrics_collection: If true, disable data store metrics collection. Default is
false. Type: boolean (or Expression with resultType boolean).
:vartype disable_metrics_collection: JSON
:ivar copy_behavior: The type of copy behavior for copy sink.
:vartype copy_behavior: JSON
'''
def __init__(
self,
*,
additional_properties: Optional[Dict[str, JSON]] = None,
write_batch_size: Optional[JSON] = None,
write_batch_timeout: Optional[JSON] = None,
sink_retry_count: Optional[JSON] = None,
sink_retry_wait: Optional[JSON] = None,
max_concurrent_connections: Optional[JSON] = None,
disable_metrics_collection: Optional[JSON] = None,
copy_behavior: Optional[JSON] = None,
**kwargs: Any
) -> None:
'''
:keyword additional_properties: Unmatched properties from the message are deserialized to this
collection.
:paramtype additional_properties: dict[str, JSON]
:keyword write_batch_size: Write batch size. Type: integer (or Expression with resultType
integer), minimum: 0.
:paramtype write_batch_size: JSON
:keyword write_batch_timeout: Write batch timeout. Type: string (or Expression with resultType
string), pattern: ((\d+).)?(\d\d):(60|([0-5][0-9])):(60|([0-5][0-9])).
:paramtype write_batch_timeout: JSON
:keyword sink_retry_count: Sink retry count. Type: integer (or Expression with resultType
integer).
:paramtype sink_retry_count: JSON
:keyword sink_retry_wait: Sink retry wait. Type: string (or Expression with resultType string),
pattern: ((\d+).)?(\d\d):(60|([0-5][0-9])):(60|([0-5][0-9])).
:paramtype sink_retry_wait: JSON
:keyword max_concurrent_connections: The maximum concurrent connection count for the sink data
store. Type: integer (or Expression with resultType integer).
:paramtype max_concurrent_connections: JSON
:keyword disable_metrics_collection: If true, disable data store metrics collection. Default is
false. Type: boolean (or Expression with resultType boolean).
:paramtype disable_metrics_collection: JSON
:keyword copy_behavior: The type of copy behavior for copy sink.
:paramtype copy_behavior: JSON
'''
pass
| 2 | 2 | 50 | 0 | 25 | 25 | 1 | 1.32 | 1 | 3 | 0 | 0 | 1 | 2 | 1 | 17 | 98 | 5 | 40 | 18 | 26 | 53 | 7 | 6 | 5 | 1 | 3 | 0 | 1 |
10,847 |
Azure/azure-cli-extensions
|
src/datafactory/azext_datafactory/vendored_sdks/datafactory/models/_models_py3.py
|
azext_datafactory.vendored_sdks.datafactory.models._models_py3.FileSystemSource
|
class FileSystemSource(CopySource):
"""A copy activity file system source.
All required parameters must be populated in order to send to server.
:ivar additional_properties: Unmatched properties from the message are deserialized to this
collection.
:vartype additional_properties: dict[str, JSON]
:ivar type: Copy source type. Required.
:vartype type: str
:ivar source_retry_count: Source retry count. Type: integer (or Expression with resultType
integer).
:vartype source_retry_count: JSON
:ivar source_retry_wait: Source retry wait. Type: string (or Expression with resultType
string), pattern: ((\\d+).)?(\\d\\d):(60|([0-5][0-9])):(60|([0-5][0-9])).
:vartype source_retry_wait: JSON
:ivar max_concurrent_connections: The maximum concurrent connection count for the source data
store. Type: integer (or Expression with resultType integer).
:vartype max_concurrent_connections: JSON
:ivar disable_metrics_collection: If true, disable data store metrics collection. Default is
false. Type: boolean (or Expression with resultType boolean).
:vartype disable_metrics_collection: JSON
:ivar recursive: If true, files under the folder path will be read recursively. Default is
true. Type: boolean (or Expression with resultType boolean).
:vartype recursive: JSON
:ivar additional_columns: Specifies the additional columns to be added to source data. Type:
array of objects(AdditionalColumns) (or Expression with resultType array of objects).
:vartype additional_columns: JSON
"""
_validation = {
"type": {"required": True},
}
_attribute_map = {
"additional_properties": {"key": "", "type": "{object}"},
"type": {"key": "type", "type": "str"},
"source_retry_count": {"key": "sourceRetryCount", "type": "object"},
"source_retry_wait": {"key": "sourceRetryWait", "type": "object"},
"max_concurrent_connections": {"key": "maxConcurrentConnections", "type": "object"},
"disable_metrics_collection": {"key": "disableMetricsCollection", "type": "object"},
"recursive": {"key": "recursive", "type": "object"},
"additional_columns": {"key": "additionalColumns", "type": "object"},
}
def __init__(
self,
*,
additional_properties: Optional[Dict[str, JSON]] = None,
source_retry_count: Optional[JSON] = None,
source_retry_wait: Optional[JSON] = None,
max_concurrent_connections: Optional[JSON] = None,
disable_metrics_collection: Optional[JSON] = None,
recursive: Optional[JSON] = None,
additional_columns: Optional[JSON] = None,
**kwargs: Any
) -> None:
"""
:keyword additional_properties: Unmatched properties from the message are deserialized to this
collection.
:paramtype additional_properties: dict[str, JSON]
:keyword source_retry_count: Source retry count. Type: integer (or Expression with resultType
integer).
:paramtype source_retry_count: JSON
:keyword source_retry_wait: Source retry wait. Type: string (or Expression with resultType
string), pattern: ((\\d+).)?(\\d\\d):(60|([0-5][0-9])):(60|([0-5][0-9])).
:paramtype source_retry_wait: JSON
:keyword max_concurrent_connections: The maximum concurrent connection count for the source
data store. Type: integer (or Expression with resultType integer).
:paramtype max_concurrent_connections: JSON
:keyword disable_metrics_collection: If true, disable data store metrics collection. Default is
false. Type: boolean (or Expression with resultType boolean).
:paramtype disable_metrics_collection: JSON
:keyword recursive: If true, files under the folder path will be read recursively. Default is
true. Type: boolean (or Expression with resultType boolean).
:paramtype recursive: JSON
:keyword additional_columns: Specifies the additional columns to be added to source data. Type:
array of objects(AdditionalColumns) (or Expression with resultType array of objects).
:paramtype additional_columns: JSON
"""
super().__init__(
additional_properties=additional_properties,
source_retry_count=source_retry_count,
source_retry_wait=source_retry_wait,
max_concurrent_connections=max_concurrent_connections,
disable_metrics_collection=disable_metrics_collection,
**kwargs
)
self.type: str = "FileSystemSource"
self.recursive = recursive
self.additional_columns = additional_columns
|
class FileSystemSource(CopySource):
'''A copy activity file system source.
All required parameters must be populated in order to send to server.
:ivar additional_properties: Unmatched properties from the message are deserialized to this
collection.
:vartype additional_properties: dict[str, JSON]
:ivar type: Copy source type. Required.
:vartype type: str
:ivar source_retry_count: Source retry count. Type: integer (or Expression with resultType
integer).
:vartype source_retry_count: JSON
:ivar source_retry_wait: Source retry wait. Type: string (or Expression with resultType
string), pattern: ((\d+).)?(\d\d):(60|([0-5][0-9])):(60|([0-5][0-9])).
:vartype source_retry_wait: JSON
:ivar max_concurrent_connections: The maximum concurrent connection count for the source data
store. Type: integer (or Expression with resultType integer).
:vartype max_concurrent_connections: JSON
:ivar disable_metrics_collection: If true, disable data store metrics collection. Default is
false. Type: boolean (or Expression with resultType boolean).
:vartype disable_metrics_collection: JSON
:ivar recursive: If true, files under the folder path will be read recursively. Default is
true. Type: boolean (or Expression with resultType boolean).
:vartype recursive: JSON
:ivar additional_columns: Specifies the additional columns to be added to source data. Type:
array of objects(AdditionalColumns) (or Expression with resultType array of objects).
:vartype additional_columns: JSON
'''
def __init__(
self,
*,
additional_properties: Optional[Dict[str, JSON]] = None,
source_retry_count: Optional[JSON] = None,
source_retry_wait: Optional[JSON] = None,
max_concurrent_connections: Optional[JSON] = None,
disable_metrics_collection: Optional[JSON] = None,
recursive: Optional[JSON] = None,
additional_columns: Optional[JSON] = None,
**kwargs: Any
) -> None:
'''
:keyword additional_properties: Unmatched properties from the message are deserialized to this
collection.
:paramtype additional_properties: dict[str, JSON]
:keyword source_retry_count: Source retry count. Type: integer (or Expression with resultType
integer).
:paramtype source_retry_count: JSON
:keyword source_retry_wait: Source retry wait. Type: string (or Expression with resultType
string), pattern: ((\d+).)?(\d\d):(60|([0-5][0-9])):(60|([0-5][0-9])).
:paramtype source_retry_wait: JSON
:keyword max_concurrent_connections: The maximum concurrent connection count for the source
data store. Type: integer (or Expression with resultType integer).
:paramtype max_concurrent_connections: JSON
:keyword disable_metrics_collection: If true, disable data store metrics collection. Default is
false. Type: boolean (or Expression with resultType boolean).
:paramtype disable_metrics_collection: JSON
:keyword recursive: If true, files under the folder path will be read recursively. Default is
true. Type: boolean (or Expression with resultType boolean).
:paramtype recursive: JSON
:keyword additional_columns: Specifies the additional columns to be added to source data. Type:
array of objects(AdditionalColumns) (or Expression with resultType array of objects).
:paramtype additional_columns: JSON
'''
pass
| 2 | 2 | 46 | 0 | 23 | 23 | 1 | 1.32 | 1 | 3 | 0 | 0 | 1 | 3 | 1 | 17 | 91 | 5 | 37 | 18 | 24 | 49 | 8 | 7 | 6 | 1 | 3 | 0 | 1 |
10,848 |
Azure/azure-cli-extensions
|
src/datafactory/azext_datafactory/vendored_sdks/datafactory/models/_models_py3.py
|
azext_datafactory.vendored_sdks.datafactory.models._models_py3.FilterActivity
|
class FilterActivity(ControlActivity):
"""Filter and return results from input array based on the conditions.
All required parameters must be populated in order to send to server.
:ivar additional_properties: Unmatched properties from the message are deserialized to this
collection.
:vartype additional_properties: dict[str, JSON]
:ivar name: Activity name. Required.
:vartype name: str
:ivar type: Type of activity. Required.
:vartype type: str
:ivar description: Activity description.
:vartype description: str
:ivar state: Activity state. This is an optional property and if not provided, the state will
be Active by default. Known values are: "Active" and "Inactive".
:vartype state: str or ~azure.mgmt.datafactory.models.ActivityState
:ivar on_inactive_mark_as: Status result of the activity when the state is set to Inactive.
This is an optional property and if not provided when the activity is inactive, the status will
be Succeeded by default. Known values are: "Succeeded", "Failed", and "Skipped".
:vartype on_inactive_mark_as: str or ~azure.mgmt.datafactory.models.ActivityOnInactiveMarkAs
:ivar depends_on: Activity depends on condition.
:vartype depends_on: list[~azure.mgmt.datafactory.models.ActivityDependency]
:ivar user_properties: Activity user properties.
:vartype user_properties: list[~azure.mgmt.datafactory.models.UserProperty]
:ivar items: Input array on which filter should be applied. Required.
:vartype items: ~azure.mgmt.datafactory.models.Expression
:ivar condition: Condition to be used for filtering the input. Required.
:vartype condition: ~azure.mgmt.datafactory.models.Expression
"""
_validation = {
"name": {"required": True},
"type": {"required": True},
"items": {"required": True},
"condition": {"required": True},
}
_attribute_map = {
"additional_properties": {"key": "", "type": "{object}"},
"name": {"key": "name", "type": "str"},
"type": {"key": "type", "type": "str"},
"description": {"key": "description", "type": "str"},
"state": {"key": "state", "type": "str"},
"on_inactive_mark_as": {"key": "onInactiveMarkAs", "type": "str"},
"depends_on": {"key": "dependsOn", "type": "[ActivityDependency]"},
"user_properties": {"key": "userProperties", "type": "[UserProperty]"},
"items": {"key": "typeProperties.items", "type": "Expression"},
"condition": {"key": "typeProperties.condition", "type": "Expression"},
}
def __init__(
self,
*,
name: str,
items: "_models.Expression",
condition: "_models.Expression",
additional_properties: Optional[Dict[str, JSON]] = None,
description: Optional[str] = None,
state: Optional[Union[str, "_models.ActivityState"]] = None,
on_inactive_mark_as: Optional[Union[str, "_models.ActivityOnInactiveMarkAs"]] = None,
depends_on: Optional[List["_models.ActivityDependency"]] = None,
user_properties: Optional[List["_models.UserProperty"]] = None,
**kwargs: Any
) -> None:
"""
:keyword additional_properties: Unmatched properties from the message are deserialized to this
collection.
:paramtype additional_properties: dict[str, JSON]
:keyword name: Activity name. Required.
:paramtype name: str
:keyword description: Activity description.
:paramtype description: str
:keyword state: Activity state. This is an optional property and if not provided, the state
will be Active by default. Known values are: "Active" and "Inactive".
:paramtype state: str or ~azure.mgmt.datafactory.models.ActivityState
:keyword on_inactive_mark_as: Status result of the activity when the state is set to Inactive.
This is an optional property and if not provided when the activity is inactive, the status will
be Succeeded by default. Known values are: "Succeeded", "Failed", and "Skipped".
:paramtype on_inactive_mark_as: str or ~azure.mgmt.datafactory.models.ActivityOnInactiveMarkAs
:keyword depends_on: Activity depends on condition.
:paramtype depends_on: list[~azure.mgmt.datafactory.models.ActivityDependency]
:keyword user_properties: Activity user properties.
:paramtype user_properties: list[~azure.mgmt.datafactory.models.UserProperty]
:keyword items: Input array on which filter should be applied. Required.
:paramtype items: ~azure.mgmt.datafactory.models.Expression
:keyword condition: Condition to be used for filtering the input. Required.
:paramtype condition: ~azure.mgmt.datafactory.models.Expression
"""
super().__init__(
additional_properties=additional_properties,
name=name,
description=description,
state=state,
on_inactive_mark_as=on_inactive_mark_as,
depends_on=depends_on,
user_properties=user_properties,
**kwargs
)
self.type: str = "Filter"
self.items = items
self.condition = condition
|
class FilterActivity(ControlActivity):
'''Filter and return results from input array based on the conditions.
All required parameters must be populated in order to send to server.
:ivar additional_properties: Unmatched properties from the message are deserialized to this
collection.
:vartype additional_properties: dict[str, JSON]
:ivar name: Activity name. Required.
:vartype name: str
:ivar type: Type of activity. Required.
:vartype type: str
:ivar description: Activity description.
:vartype description: str
:ivar state: Activity state. This is an optional property and if not provided, the state will
be Active by default. Known values are: "Active" and "Inactive".
:vartype state: str or ~azure.mgmt.datafactory.models.ActivityState
:ivar on_inactive_mark_as: Status result of the activity when the state is set to Inactive.
This is an optional property and if not provided when the activity is inactive, the status will
be Succeeded by default. Known values are: "Succeeded", "Failed", and "Skipped".
:vartype on_inactive_mark_as: str or ~azure.mgmt.datafactory.models.ActivityOnInactiveMarkAs
:ivar depends_on: Activity depends on condition.
:vartype depends_on: list[~azure.mgmt.datafactory.models.ActivityDependency]
:ivar user_properties: Activity user properties.
:vartype user_properties: list[~azure.mgmt.datafactory.models.UserProperty]
:ivar items: Input array on which filter should be applied. Required.
:vartype items: ~azure.mgmt.datafactory.models.Expression
:ivar condition: Condition to be used for filtering the input. Required.
:vartype condition: ~azure.mgmt.datafactory.models.Expression
'''
def __init__(
self,
*,
name: str,
items: "_models.Expression",
condition: "_models.Expression",
additional_properties: Optional[Dict[str, JSON]] = None,
description: Optional[str] = None,
state: Optional[Union[str, "_models.ActivityState"]] = None,
on_inactive_mark_as: Optional[Union[str, "_models.ActivityOnInactiveMarkAs"]] = None,
depends_on: Optional[List["_models.ActivityDependency"]] = None,
user_properties: Optional[List["_models.UserProperty"]] = None,
**kwargs: Any
) -> None:
'''
:keyword additional_properties: Unmatched properties from the message are deserialized to this
collection.
:paramtype additional_properties: dict[str, JSON]
:keyword name: Activity name. Required.
:paramtype name: str
:keyword description: Activity description.
:paramtype description: str
:keyword state: Activity state. This is an optional property and if not provided, the state
will be Active by default. Known values are: "Active" and "Inactive".
:paramtype state: str or ~azure.mgmt.datafactory.models.ActivityState
:keyword on_inactive_mark_as: Status result of the activity when the state is set to Inactive.
This is an optional property and if not provided when the activity is inactive, the status will
be Succeeded by default. Known values are: "Succeeded", "Failed", and "Skipped".
:paramtype on_inactive_mark_as: str or ~azure.mgmt.datafactory.models.ActivityOnInactiveMarkAs
:keyword depends_on: Activity depends on condition.
:paramtype depends_on: list[~azure.mgmt.datafactory.models.ActivityDependency]
:keyword user_properties: Activity user properties.
:paramtype user_properties: list[~azure.mgmt.datafactory.models.UserProperty]
:keyword items: Input array on which filter should be applied. Required.
:paramtype items: ~azure.mgmt.datafactory.models.Expression
:keyword condition: Condition to be used for filtering the input. Required.
:paramtype condition: ~azure.mgmt.datafactory.models.Expression
'''
pass
| 2 | 2 | 51 | 0 | 27 | 24 | 1 | 1.11 | 1 | 3 | 0 | 0 | 1 | 3 | 1 | 18 | 102 | 5 | 46 | 20 | 31 | 51 | 8 | 7 | 6 | 1 | 4 | 0 | 1 |
10,849 |
Azure/azure-cli-extensions
|
src/datafactory/azext_datafactory/vendored_sdks/datafactory/models/_models_py3.py
|
azext_datafactory.vendored_sdks.datafactory.models._models_py3.Flowlet
|
class Flowlet(DataFlow):
"""Data flow flowlet.
All required parameters must be populated in order to send to server.
:ivar type: Type of data flow. Required.
:vartype type: str
:ivar description: The description of the data flow.
:vartype description: str
:ivar annotations: List of tags that can be used for describing the data flow.
:vartype annotations: list[JSON]
:ivar folder: The folder that this data flow is in. If not specified, Data flow will appear at
the root level.
:vartype folder: ~azure.mgmt.datafactory.models.DataFlowFolder
:ivar sources: List of sources in Flowlet.
:vartype sources: list[~azure.mgmt.datafactory.models.DataFlowSource]
:ivar sinks: List of sinks in Flowlet.
:vartype sinks: list[~azure.mgmt.datafactory.models.DataFlowSink]
:ivar transformations: List of transformations in Flowlet.
:vartype transformations: list[~azure.mgmt.datafactory.models.Transformation]
:ivar script: Flowlet script.
:vartype script: str
:ivar script_lines: Flowlet script lines.
:vartype script_lines: list[str]
"""
_validation = {
"type": {"required": True},
}
_attribute_map = {
"type": {"key": "type", "type": "str"},
"description": {"key": "description", "type": "str"},
"annotations": {"key": "annotations", "type": "[object]"},
"folder": {"key": "folder", "type": "DataFlowFolder"},
"sources": {"key": "typeProperties.sources", "type": "[DataFlowSource]"},
"sinks": {"key": "typeProperties.sinks", "type": "[DataFlowSink]"},
"transformations": {"key": "typeProperties.transformations", "type": "[Transformation]"},
"script": {"key": "typeProperties.script", "type": "str"},
"script_lines": {"key": "typeProperties.scriptLines", "type": "[str]"},
}
def __init__(
self,
*,
description: Optional[str] = None,
annotations: Optional[List[JSON]] = None,
folder: Optional["_models.DataFlowFolder"] = None,
sources: Optional[List["_models.DataFlowSource"]] = None,
sinks: Optional[List["_models.DataFlowSink"]] = None,
transformations: Optional[List["_models.Transformation"]] = None,
script: Optional[str] = None,
script_lines: Optional[List[str]] = None,
**kwargs: Any
) -> None:
"""
:keyword description: The description of the data flow.
:paramtype description: str
:keyword annotations: List of tags that can be used for describing the data flow.
:paramtype annotations: list[JSON]
:keyword folder: The folder that this data flow is in. If not specified, Data flow will appear
at the root level.
:paramtype folder: ~azure.mgmt.datafactory.models.DataFlowFolder
:keyword sources: List of sources in Flowlet.
:paramtype sources: list[~azure.mgmt.datafactory.models.DataFlowSource]
:keyword sinks: List of sinks in Flowlet.
:paramtype sinks: list[~azure.mgmt.datafactory.models.DataFlowSink]
:keyword transformations: List of transformations in Flowlet.
:paramtype transformations: list[~azure.mgmt.datafactory.models.Transformation]
:keyword script: Flowlet script.
:paramtype script: str
:keyword script_lines: Flowlet script lines.
:paramtype script_lines: list[str]
"""
super().__init__(description=description, annotations=annotations, folder=folder, **kwargs)
self.type: str = "Flowlet"
self.sources = sources
self.sinks = sinks
self.transformations = transformations
self.script = script
self.script_lines = script_lines
|
class Flowlet(DataFlow):
'''Data flow flowlet.
All required parameters must be populated in order to send to server.
:ivar type: Type of data flow. Required.
:vartype type: str
:ivar description: The description of the data flow.
:vartype description: str
:ivar annotations: List of tags that can be used for describing the data flow.
:vartype annotations: list[JSON]
:ivar folder: The folder that this data flow is in. If not specified, Data flow will appear at
the root level.
:vartype folder: ~azure.mgmt.datafactory.models.DataFlowFolder
:ivar sources: List of sources in Flowlet.
:vartype sources: list[~azure.mgmt.datafactory.models.DataFlowSource]
:ivar sinks: List of sinks in Flowlet.
:vartype sinks: list[~azure.mgmt.datafactory.models.DataFlowSink]
:ivar transformations: List of transformations in Flowlet.
:vartype transformations: list[~azure.mgmt.datafactory.models.Transformation]
:ivar script: Flowlet script.
:vartype script: str
:ivar script_lines: Flowlet script lines.
:vartype script_lines: list[str]
'''
def __init__(
self,
*,
description: Optional[str] = None,
annotations: Optional[List[JSON]] = None,
folder: Optional["_models.DataFlowFolder"] = None,
sources: Optional[List["_models.DataFlowSource"]] = None,
sinks: Optional[List["_models.DataFlowSink"]] = None,
transformations: Optional[List["_models.Transformation"]] = None,
script: Optional[str] = None,
script_lines: Optional[List[str]] = None,
**kwargs: Any
) -> None:
'''
:keyword description: The description of the data flow.
:paramtype description: str
:keyword annotations: List of tags that can be used for describing the data flow.
:paramtype annotations: list[JSON]
:keyword folder: The folder that this data flow is in. If not specified, Data flow will appear
at the root level.
:paramtype folder: ~azure.mgmt.datafactory.models.DataFlowFolder
:keyword sources: List of sources in Flowlet.
:paramtype sources: list[~azure.mgmt.datafactory.models.DataFlowSource]
:keyword sinks: List of sinks in Flowlet.
:paramtype sinks: list[~azure.mgmt.datafactory.models.DataFlowSink]
:keyword transformations: List of transformations in Flowlet.
:paramtype transformations: list[~azure.mgmt.datafactory.models.Transformation]
:keyword script: Flowlet script.
:paramtype script: str
:keyword script_lines: Flowlet script lines.
:paramtype script_lines: list[str]
'''
pass
| 2 | 2 | 39 | 0 | 20 | 19 | 1 | 1.17 | 1 | 3 | 0 | 0 | 1 | 6 | 1 | 17 | 81 | 5 | 35 | 22 | 21 | 41 | 11 | 10 | 9 | 1 | 3 | 0 | 1 |
10,850 |
Azure/azure-cli-extensions
|
src/datafactory/azext_datafactory/vendored_sdks/datafactory/models/_models_py3.py
|
azext_datafactory.vendored_sdks.datafactory.models._models_py3.ForEachActivity
|
class ForEachActivity(ControlActivity):
"""This activity is used for iterating over a collection and execute given activities.
All required parameters must be populated in order to send to server.
:ivar additional_properties: Unmatched properties from the message are deserialized to this
collection.
:vartype additional_properties: dict[str, JSON]
:ivar name: Activity name. Required.
:vartype name: str
:ivar type: Type of activity. Required.
:vartype type: str
:ivar description: Activity description.
:vartype description: str
:ivar state: Activity state. This is an optional property and if not provided, the state will
be Active by default. Known values are: "Active" and "Inactive".
:vartype state: str or ~azure.mgmt.datafactory.models.ActivityState
:ivar on_inactive_mark_as: Status result of the activity when the state is set to Inactive.
This is an optional property and if not provided when the activity is inactive, the status will
be Succeeded by default. Known values are: "Succeeded", "Failed", and "Skipped".
:vartype on_inactive_mark_as: str or ~azure.mgmt.datafactory.models.ActivityOnInactiveMarkAs
:ivar depends_on: Activity depends on condition.
:vartype depends_on: list[~azure.mgmt.datafactory.models.ActivityDependency]
:ivar user_properties: Activity user properties.
:vartype user_properties: list[~azure.mgmt.datafactory.models.UserProperty]
:ivar is_sequential: Should the loop be executed in sequence or in parallel (max 50).
:vartype is_sequential: bool
:ivar batch_count: Batch count to be used for controlling the number of parallel execution
(when isSequential is set to false).
:vartype batch_count: int
:ivar items: Collection to iterate. Required.
:vartype items: ~azure.mgmt.datafactory.models.Expression
:ivar activities: List of activities to execute . Required.
:vartype activities: list[~azure.mgmt.datafactory.models.Activity]
"""
_validation = {
"name": {"required": True},
"type": {"required": True},
"batch_count": {"maximum": 50},
"items": {"required": True},
"activities": {"required": True},
}
_attribute_map = {
"additional_properties": {"key": "", "type": "{object}"},
"name": {"key": "name", "type": "str"},
"type": {"key": "type", "type": "str"},
"description": {"key": "description", "type": "str"},
"state": {"key": "state", "type": "str"},
"on_inactive_mark_as": {"key": "onInactiveMarkAs", "type": "str"},
"depends_on": {"key": "dependsOn", "type": "[ActivityDependency]"},
"user_properties": {"key": "userProperties", "type": "[UserProperty]"},
"is_sequential": {"key": "typeProperties.isSequential", "type": "bool"},
"batch_count": {"key": "typeProperties.batchCount", "type": "int"},
"items": {"key": "typeProperties.items", "type": "Expression"},
"activities": {"key": "typeProperties.activities", "type": "[Activity]"},
}
def __init__(
self,
*,
name: str,
items: "_models.Expression",
activities: List["_models.Activity"],
additional_properties: Optional[Dict[str, JSON]] = None,
description: Optional[str] = None,
state: Optional[Union[str, "_models.ActivityState"]] = None,
on_inactive_mark_as: Optional[Union[str, "_models.ActivityOnInactiveMarkAs"]] = None,
depends_on: Optional[List["_models.ActivityDependency"]] = None,
user_properties: Optional[List["_models.UserProperty"]] = None,
is_sequential: Optional[bool] = None,
batch_count: Optional[int] = None,
**kwargs: Any
) -> None:
"""
:keyword additional_properties: Unmatched properties from the message are deserialized to this
collection.
:paramtype additional_properties: dict[str, JSON]
:keyword name: Activity name. Required.
:paramtype name: str
:keyword description: Activity description.
:paramtype description: str
:keyword state: Activity state. This is an optional property and if not provided, the state
will be Active by default. Known values are: "Active" and "Inactive".
:paramtype state: str or ~azure.mgmt.datafactory.models.ActivityState
:keyword on_inactive_mark_as: Status result of the activity when the state is set to Inactive.
This is an optional property and if not provided when the activity is inactive, the status will
be Succeeded by default. Known values are: "Succeeded", "Failed", and "Skipped".
:paramtype on_inactive_mark_as: str or ~azure.mgmt.datafactory.models.ActivityOnInactiveMarkAs
:keyword depends_on: Activity depends on condition.
:paramtype depends_on: list[~azure.mgmt.datafactory.models.ActivityDependency]
:keyword user_properties: Activity user properties.
:paramtype user_properties: list[~azure.mgmt.datafactory.models.UserProperty]
:keyword is_sequential: Should the loop be executed in sequence or in parallel (max 50).
:paramtype is_sequential: bool
:keyword batch_count: Batch count to be used for controlling the number of parallel execution
(when isSequential is set to false).
:paramtype batch_count: int
:keyword items: Collection to iterate. Required.
:paramtype items: ~azure.mgmt.datafactory.models.Expression
:keyword activities: List of activities to execute . Required.
:paramtype activities: list[~azure.mgmt.datafactory.models.Activity]
"""
super().__init__(
additional_properties=additional_properties,
name=name,
description=description,
state=state,
on_inactive_mark_as=on_inactive_mark_as,
depends_on=depends_on,
user_properties=user_properties,
**kwargs
)
self.type: str = "ForEach"
self.is_sequential = is_sequential
self.batch_count = batch_count
self.items = items
self.activities = activities
|
class ForEachActivity(ControlActivity):
'''This activity is used for iterating over a collection and execute given activities.
All required parameters must be populated in order to send to server.
:ivar additional_properties: Unmatched properties from the message are deserialized to this
collection.
:vartype additional_properties: dict[str, JSON]
:ivar name: Activity name. Required.
:vartype name: str
:ivar type: Type of activity. Required.
:vartype type: str
:ivar description: Activity description.
:vartype description: str
:ivar state: Activity state. This is an optional property and if not provided, the state will
be Active by default. Known values are: "Active" and "Inactive".
:vartype state: str or ~azure.mgmt.datafactory.models.ActivityState
:ivar on_inactive_mark_as: Status result of the activity when the state is set to Inactive.
This is an optional property and if not provided when the activity is inactive, the status will
be Succeeded by default. Known values are: "Succeeded", "Failed", and "Skipped".
:vartype on_inactive_mark_as: str or ~azure.mgmt.datafactory.models.ActivityOnInactiveMarkAs
:ivar depends_on: Activity depends on condition.
:vartype depends_on: list[~azure.mgmt.datafactory.models.ActivityDependency]
:ivar user_properties: Activity user properties.
:vartype user_properties: list[~azure.mgmt.datafactory.models.UserProperty]
:ivar is_sequential: Should the loop be executed in sequence or in parallel (max 50).
:vartype is_sequential: bool
:ivar batch_count: Batch count to be used for controlling the number of parallel execution
(when isSequential is set to false).
:vartype batch_count: int
:ivar items: Collection to iterate. Required.
:vartype items: ~azure.mgmt.datafactory.models.Expression
:ivar activities: List of activities to execute . Required.
:vartype activities: list[~azure.mgmt.datafactory.models.Activity]
'''
def __init__(
self,
*,
name: str,
items: "_models.Expression",
activities: List["_models.Activity"],
additional_properties: Optional[Dict[str, JSON]] = None,
description: Optional[str] = None,
state: Optional[Union[str, "_models.ActivityState"]] = None,
on_inactive_mark_as: Optional[Union[str, "_models.ActivityOnInactiveMarkAs"]] = None,
depends_on: Optional[List["_models.ActivityDependency"]] = None,
user_properties: Optional[List["_models.UserProperty"]] = None,
is_sequential: Optional[bool] = None,
batch_count: Optional[int] = None,
**kwargs: Any
) -> None:
'''
:keyword additional_properties: Unmatched properties from the message are deserialized to this
collection.
:paramtype additional_properties: dict[str, JSON]
:keyword name: Activity name. Required.
:paramtype name: str
:keyword description: Activity description.
:paramtype description: str
:keyword state: Activity state. This is an optional property and if not provided, the state
will be Active by default. Known values are: "Active" and "Inactive".
:paramtype state: str or ~azure.mgmt.datafactory.models.ActivityState
:keyword on_inactive_mark_as: Status result of the activity when the state is set to Inactive.
This is an optional property and if not provided when the activity is inactive, the status will
be Succeeded by default. Known values are: "Succeeded", "Failed", and "Skipped".
:paramtype on_inactive_mark_as: str or ~azure.mgmt.datafactory.models.ActivityOnInactiveMarkAs
:keyword depends_on: Activity depends on condition.
:paramtype depends_on: list[~azure.mgmt.datafactory.models.ActivityDependency]
:keyword user_properties: Activity user properties.
:paramtype user_properties: list[~azure.mgmt.datafactory.models.UserProperty]
:keyword is_sequential: Should the loop be executed in sequence or in parallel (max 50).
:paramtype is_sequential: bool
:keyword batch_count: Batch count to be used for controlling the number of parallel execution
(when isSequential is set to false).
:paramtype batch_count: int
:keyword items: Collection to iterate. Required.
:paramtype items: ~azure.mgmt.datafactory.models.Expression
:keyword activities: List of activities to execute . Required.
:paramtype activities: list[~azure.mgmt.datafactory.models.Activity]
'''
pass
| 2 | 2 | 60 | 0 | 31 | 29 | 1 | 1.15 | 1 | 5 | 0 | 0 | 1 | 5 | 1 | 18 | 119 | 5 | 53 | 24 | 36 | 61 | 10 | 9 | 8 | 1 | 4 | 0 | 1 |
10,851 |
Azure/azure-cli-extensions
|
src/datafactory/azext_datafactory/vendored_sdks/datafactory/models/_models_py3.py
|
azext_datafactory.vendored_sdks.datafactory.models._models_py3.FormatReadSettings
|
class FormatReadSettings(_serialization.Model):
"""Format read settings.
You probably want to use the sub-classes and not this class directly. Known sub-classes are:
BinaryReadSettings, DelimitedTextReadSettings, JsonReadSettings, ParquetReadSettings,
XmlReadSettings
All required parameters must be populated in order to send to server.
:ivar additional_properties: Unmatched properties from the message are deserialized to this
collection.
:vartype additional_properties: dict[str, JSON]
:ivar type: The read setting type. Required.
:vartype type: str
"""
_validation = {
"type": {"required": True},
}
_attribute_map = {
"additional_properties": {"key": "", "type": "{object}"},
"type": {"key": "type", "type": "str"},
}
_subtype_map = {
"type": {
"BinaryReadSettings": "BinaryReadSettings",
"DelimitedTextReadSettings": "DelimitedTextReadSettings",
"JsonReadSettings": "JsonReadSettings",
"ParquetReadSettings": "ParquetReadSettings",
"XmlReadSettings": "XmlReadSettings",
}
}
def __init__(self, *, additional_properties: Optional[Dict[str, JSON]] = None, **kwargs: Any) -> None:
"""
:keyword additional_properties: Unmatched properties from the message are deserialized to this
collection.
:paramtype additional_properties: dict[str, JSON]
"""
super().__init__(**kwargs)
self.additional_properties = additional_properties
self.type: Optional[str] = None
|
class FormatReadSettings(_serialization.Model):
'''Format read settings.
You probably want to use the sub-classes and not this class directly. Known sub-classes are:
BinaryReadSettings, DelimitedTextReadSettings, JsonReadSettings, ParquetReadSettings,
XmlReadSettings
All required parameters must be populated in order to send to server.
:ivar additional_properties: Unmatched properties from the message are deserialized to this
collection.
:vartype additional_properties: dict[str, JSON]
:ivar type: The read setting type. Required.
:vartype type: str
'''
def __init__(self, *, additional_properties: Optional[Dict[str, JSON]] = None, **kwargs: Any) -> None:
'''
:keyword additional_properties: Unmatched properties from the message are deserialized to this
collection.
:paramtype additional_properties: dict[str, JSON]
'''
pass
| 2 | 2 | 9 | 0 | 4 | 5 | 1 | 0.76 | 1 | 3 | 0 | 5 | 1 | 2 | 1 | 16 | 44 | 7 | 21 | 7 | 19 | 16 | 8 | 7 | 6 | 1 | 2 | 0 | 1 |
10,852 |
Azure/azure-cli-extensions
|
src/datafactory/azext_datafactory/vendored_sdks/datafactory/models/_models_py3.py
|
azext_datafactory.vendored_sdks.datafactory.models._models_py3.FormatWriteSettings
|
class FormatWriteSettings(_serialization.Model):
"""Format write settings.
You probably want to use the sub-classes and not this class directly. Known sub-classes are:
AvroWriteSettings, DelimitedTextWriteSettings, IcebergWriteSettings, JsonWriteSettings,
OrcWriteSettings, ParquetWriteSettings
All required parameters must be populated in order to send to server.
:ivar additional_properties: Unmatched properties from the message are deserialized to this
collection.
:vartype additional_properties: dict[str, JSON]
:ivar type: The write setting type. Required.
:vartype type: str
"""
_validation = {
"type": {"required": True},
}
_attribute_map = {
"additional_properties": {"key": "", "type": "{object}"},
"type": {"key": "type", "type": "str"},
}
_subtype_map = {
"type": {
"AvroWriteSettings": "AvroWriteSettings",
"DelimitedTextWriteSettings": "DelimitedTextWriteSettings",
"IcebergWriteSettings": "IcebergWriteSettings",
"JsonWriteSettings": "JsonWriteSettings",
"OrcWriteSettings": "OrcWriteSettings",
"ParquetWriteSettings": "ParquetWriteSettings",
}
}
def __init__(self, *, additional_properties: Optional[Dict[str, JSON]] = None, **kwargs: Any) -> None:
"""
:keyword additional_properties: Unmatched properties from the message are deserialized to this
collection.
:paramtype additional_properties: dict[str, JSON]
"""
super().__init__(**kwargs)
self.additional_properties = additional_properties
self.type: Optional[str] = None
|
class FormatWriteSettings(_serialization.Model):
'''Format write settings.
You probably want to use the sub-classes and not this class directly. Known sub-classes are:
AvroWriteSettings, DelimitedTextWriteSettings, IcebergWriteSettings, JsonWriteSettings,
OrcWriteSettings, ParquetWriteSettings
All required parameters must be populated in order to send to server.
:ivar additional_properties: Unmatched properties from the message are deserialized to this
collection.
:vartype additional_properties: dict[str, JSON]
:ivar type: The write setting type. Required.
:vartype type: str
'''
def __init__(self, *, additional_properties: Optional[Dict[str, JSON]] = None, **kwargs: Any) -> None:
'''
:keyword additional_properties: Unmatched properties from the message are deserialized to this
collection.
:paramtype additional_properties: dict[str, JSON]
'''
pass
| 2 | 2 | 9 | 0 | 4 | 5 | 1 | 0.73 | 1 | 3 | 0 | 6 | 1 | 2 | 1 | 16 | 45 | 7 | 22 | 7 | 20 | 16 | 8 | 7 | 6 | 1 | 2 | 0 | 1 |
10,853 |
Azure/azure-cli-extensions
|
src/datafactory/azext_datafactory/vendored_sdks/datafactory/models/_models_py3.py
|
azext_datafactory.vendored_sdks.datafactory.models._models_py3.FtpReadSettings
|
class FtpReadSettings(StoreReadSettings):
"""Ftp read settings.
All required parameters must be populated in order to send to server.
:ivar additional_properties: Unmatched properties from the message are deserialized to this
collection.
:vartype additional_properties: dict[str, JSON]
:ivar type: The read setting type. Required.
:vartype type: str
:ivar max_concurrent_connections: The maximum concurrent connection count for the source data
store. Type: integer (or Expression with resultType integer).
:vartype max_concurrent_connections: JSON
:ivar disable_metrics_collection: If true, disable data store metrics collection. Default is
false. Type: boolean (or Expression with resultType boolean).
:vartype disable_metrics_collection: JSON
:ivar recursive: If true, files under the folder path will be read recursively. Default is
true. Type: boolean (or Expression with resultType boolean).
:vartype recursive: JSON
:ivar wildcard_folder_path: Ftp wildcardFolderPath. Type: string (or Expression with resultType
string).
:vartype wildcard_folder_path: JSON
:ivar wildcard_file_name: Ftp wildcardFileName. Type: string (or Expression with resultType
string).
:vartype wildcard_file_name: JSON
:ivar enable_partition_discovery: Indicates whether to enable partition discovery. Type:
boolean (or Expression with resultType boolean).
:vartype enable_partition_discovery: JSON
:ivar partition_root_path: Specify the root path where partition discovery starts from. Type:
string (or Expression with resultType string).
:vartype partition_root_path: JSON
:ivar delete_files_after_completion: Indicates whether the source files need to be deleted
after copy completion. Default is false. Type: boolean (or Expression with resultType boolean).
:vartype delete_files_after_completion: JSON
:ivar file_list_path: Point to a text file that lists each file (relative path to the path
configured in the dataset) that you want to copy. Type: string (or Expression with resultType
string).
:vartype file_list_path: JSON
:ivar use_binary_transfer: Specify whether to use binary transfer mode for FTP stores. Type:
boolean (or Expression with resultType boolean).
:vartype use_binary_transfer: JSON
:ivar disable_chunking: If true, disable parallel reading within each file. Default is false.
Type: boolean (or Expression with resultType boolean).
:vartype disable_chunking: JSON
"""
_validation = {
"type": {"required": True},
}
_attribute_map = {
"additional_properties": {"key": "", "type": "{object}"},
"type": {"key": "type", "type": "str"},
"max_concurrent_connections": {"key": "maxConcurrentConnections", "type": "object"},
"disable_metrics_collection": {"key": "disableMetricsCollection", "type": "object"},
"recursive": {"key": "recursive", "type": "object"},
"wildcard_folder_path": {"key": "wildcardFolderPath", "type": "object"},
"wildcard_file_name": {"key": "wildcardFileName", "type": "object"},
"enable_partition_discovery": {"key": "enablePartitionDiscovery", "type": "object"},
"partition_root_path": {"key": "partitionRootPath", "type": "object"},
"delete_files_after_completion": {"key": "deleteFilesAfterCompletion", "type": "object"},
"file_list_path": {"key": "fileListPath", "type": "object"},
"use_binary_transfer": {"key": "useBinaryTransfer", "type": "object"},
"disable_chunking": {"key": "disableChunking", "type": "object"},
}
def __init__(
self,
*,
additional_properties: Optional[Dict[str, JSON]] = None,
max_concurrent_connections: Optional[JSON] = None,
disable_metrics_collection: Optional[JSON] = None,
recursive: Optional[JSON] = None,
wildcard_folder_path: Optional[JSON] = None,
wildcard_file_name: Optional[JSON] = None,
enable_partition_discovery: Optional[JSON] = None,
partition_root_path: Optional[JSON] = None,
delete_files_after_completion: Optional[JSON] = None,
file_list_path: Optional[JSON] = None,
use_binary_transfer: Optional[JSON] = None,
disable_chunking: Optional[JSON] = None,
**kwargs: Any
) -> None:
"""
:keyword additional_properties: Unmatched properties from the message are deserialized to this
collection.
:paramtype additional_properties: dict[str, JSON]
:keyword max_concurrent_connections: The maximum concurrent connection count for the source
data store. Type: integer (or Expression with resultType integer).
:paramtype max_concurrent_connections: JSON
:keyword disable_metrics_collection: If true, disable data store metrics collection. Default is
false. Type: boolean (or Expression with resultType boolean).
:paramtype disable_metrics_collection: JSON
:keyword recursive: If true, files under the folder path will be read recursively. Default is
true. Type: boolean (or Expression with resultType boolean).
:paramtype recursive: JSON
:keyword wildcard_folder_path: Ftp wildcardFolderPath. Type: string (or Expression with
resultType string).
:paramtype wildcard_folder_path: JSON
:keyword wildcard_file_name: Ftp wildcardFileName. Type: string (or Expression with resultType
string).
:paramtype wildcard_file_name: JSON
:keyword enable_partition_discovery: Indicates whether to enable partition discovery. Type:
boolean (or Expression with resultType boolean).
:paramtype enable_partition_discovery: JSON
:keyword partition_root_path: Specify the root path where partition discovery starts from.
Type: string (or Expression with resultType string).
:paramtype partition_root_path: JSON
:keyword delete_files_after_completion: Indicates whether the source files need to be deleted
after copy completion. Default is false. Type: boolean (or Expression with resultType boolean).
:paramtype delete_files_after_completion: JSON
:keyword file_list_path: Point to a text file that lists each file (relative path to the path
configured in the dataset) that you want to copy. Type: string (or Expression with resultType
string).
:paramtype file_list_path: JSON
:keyword use_binary_transfer: Specify whether to use binary transfer mode for FTP stores. Type:
boolean (or Expression with resultType boolean).
:paramtype use_binary_transfer: JSON
:keyword disable_chunking: If true, disable parallel reading within each file. Default is
false. Type: boolean (or Expression with resultType boolean).
:paramtype disable_chunking: JSON
"""
super().__init__(
additional_properties=additional_properties,
max_concurrent_connections=max_concurrent_connections,
disable_metrics_collection=disable_metrics_collection,
**kwargs
)
self.type: str = "FtpReadSettings"
self.recursive = recursive
self.wildcard_folder_path = wildcard_folder_path
self.wildcard_file_name = wildcard_file_name
self.enable_partition_discovery = enable_partition_discovery
self.partition_root_path = partition_root_path
self.delete_files_after_completion = delete_files_after_completion
self.file_list_path = file_list_path
self.use_binary_transfer = use_binary_transfer
self.disable_chunking = disable_chunking
|
class FtpReadSettings(StoreReadSettings):
'''Ftp read settings.
All required parameters must be populated in order to send to server.
:ivar additional_properties: Unmatched properties from the message are deserialized to this
collection.
:vartype additional_properties: dict[str, JSON]
:ivar type: The read setting type. Required.
:vartype type: str
:ivar max_concurrent_connections: The maximum concurrent connection count for the source data
store. Type: integer (or Expression with resultType integer).
:vartype max_concurrent_connections: JSON
:ivar disable_metrics_collection: If true, disable data store metrics collection. Default is
false. Type: boolean (or Expression with resultType boolean).
:vartype disable_metrics_collection: JSON
:ivar recursive: If true, files under the folder path will be read recursively. Default is
true. Type: boolean (or Expression with resultType boolean).
:vartype recursive: JSON
:ivar wildcard_folder_path: Ftp wildcardFolderPath. Type: string (or Expression with resultType
string).
:vartype wildcard_folder_path: JSON
:ivar wildcard_file_name: Ftp wildcardFileName. Type: string (or Expression with resultType
string).
:vartype wildcard_file_name: JSON
:ivar enable_partition_discovery: Indicates whether to enable partition discovery. Type:
boolean (or Expression with resultType boolean).
:vartype enable_partition_discovery: JSON
:ivar partition_root_path: Specify the root path where partition discovery starts from. Type:
string (or Expression with resultType string).
:vartype partition_root_path: JSON
:ivar delete_files_after_completion: Indicates whether the source files need to be deleted
after copy completion. Default is false. Type: boolean (or Expression with resultType boolean).
:vartype delete_files_after_completion: JSON
:ivar file_list_path: Point to a text file that lists each file (relative path to the path
configured in the dataset) that you want to copy. Type: string (or Expression with resultType
string).
:vartype file_list_path: JSON
:ivar use_binary_transfer: Specify whether to use binary transfer mode for FTP stores. Type:
boolean (or Expression with resultType boolean).
:vartype use_binary_transfer: JSON
:ivar disable_chunking: If true, disable parallel reading within each file. Default is false.
Type: boolean (or Expression with resultType boolean).
:vartype disable_chunking: JSON
'''
def __init__(
self,
*,
additional_properties: Optional[Dict[str, JSON]] = None,
max_concurrent_connections: Optional[JSON] = None,
disable_metrics_collection: Optional[JSON] = None,
recursive: Optional[JSON] = None,
wildcard_folder_path: Optional[JSON] = None,
wildcard_file_name: Optional[JSON] = None,
enable_partition_discovery: Optional[JSON] = None,
partition_root_path: Optional[JSON] = None,
delete_files_after_completion: Optional[JSON] = None,
file_list_path: Optional[JSON] = None,
use_binary_transfer: Optional[JSON] = None,
disable_chunking: Optional[JSON] = None,
**kwargs: Any
) -> None:
'''
:keyword additional_properties: Unmatched properties from the message are deserialized to this
collection.
:paramtype additional_properties: dict[str, JSON]
:keyword max_concurrent_connections: The maximum concurrent connection count for the source
data store. Type: integer (or Expression with resultType integer).
:paramtype max_concurrent_connections: JSON
:keyword disable_metrics_collection: If true, disable data store metrics collection. Default is
false. Type: boolean (or Expression with resultType boolean).
:paramtype disable_metrics_collection: JSON
:keyword recursive: If true, files under the folder path will be read recursively. Default is
true. Type: boolean (or Expression with resultType boolean).
:paramtype recursive: JSON
:keyword wildcard_folder_path: Ftp wildcardFolderPath. Type: string (or Expression with
resultType string).
:paramtype wildcard_folder_path: JSON
:keyword wildcard_file_name: Ftp wildcardFileName. Type: string (or Expression with resultType
string).
:paramtype wildcard_file_name: JSON
:keyword enable_partition_discovery: Indicates whether to enable partition discovery. Type:
boolean (or Expression with resultType boolean).
:paramtype enable_partition_discovery: JSON
:keyword partition_root_path: Specify the root path where partition discovery starts from.
Type: string (or Expression with resultType string).
:paramtype partition_root_path: JSON
:keyword delete_files_after_completion: Indicates whether the source files need to be deleted
after copy completion. Default is false. Type: boolean (or Expression with resultType boolean).
:paramtype delete_files_after_completion: JSON
:keyword file_list_path: Point to a text file that lists each file (relative path to the path
configured in the dataset) that you want to copy. Type: string (or Expression with resultType
string).
:paramtype file_list_path: JSON
:keyword use_binary_transfer: Specify whether to use binary transfer mode for FTP stores. Type:
boolean (or Expression with resultType boolean).
:paramtype use_binary_transfer: JSON
:keyword disable_chunking: If true, disable parallel reading within each file. Default is
false. Type: boolean (or Expression with resultType boolean).
:paramtype disable_chunking: JSON
'''
pass
| 2 | 2 | 72 | 0 | 33 | 39 | 1 | 1.56 | 1 | 3 | 0 | 0 | 1 | 10 | 1 | 17 | 138 | 5 | 52 | 30 | 34 | 81 | 15 | 14 | 13 | 1 | 3 | 0 | 1 |
10,854 |
Azure/azure-cli-extensions
|
src/datafactory/azext_datafactory/vendored_sdks/datafactory/models/_models_py3.py
|
azext_datafactory.vendored_sdks.datafactory.models._models_py3.FtpServerLinkedService
|
class FtpServerLinkedService(LinkedService):
"""A FTP server Linked Service.
All required parameters must be populated in order to send to server.
:ivar additional_properties: Unmatched properties from the message are deserialized to this
collection.
:vartype additional_properties: dict[str, JSON]
:ivar type: Type of linked service. Required.
:vartype type: str
:ivar version: Version of the linked service.
:vartype version: str
:ivar connect_via: The integration runtime reference.
:vartype connect_via: ~azure.mgmt.datafactory.models.IntegrationRuntimeReference
:ivar description: Linked service description.
:vartype description: str
:ivar parameters: Parameters for linked service.
:vartype parameters: dict[str, ~azure.mgmt.datafactory.models.ParameterSpecification]
:ivar annotations: List of tags that can be used for describing the linked service.
:vartype annotations: list[JSON]
:ivar host: Host name of the FTP server. Type: string (or Expression with resultType string).
Required.
:vartype host: JSON
:ivar port: The TCP port number that the FTP server uses to listen for client connections.
Default value is 21. Type: integer (or Expression with resultType integer), minimum: 0.
:vartype port: JSON
:ivar authentication_type: The authentication type to be used to connect to the FTP server.
Known values are: "Basic" and "Anonymous".
:vartype authentication_type: str or ~azure.mgmt.datafactory.models.FtpAuthenticationType
:ivar user_name: Username to logon the FTP server. Type: string (or Expression with resultType
string).
:vartype user_name: JSON
:ivar password: Password to logon the FTP server.
:vartype password: ~azure.mgmt.datafactory.models.SecretBase
:ivar encrypted_credential: The encrypted credential used for authentication. Credentials are
encrypted using the integration runtime credential manager. Type: string.
:vartype encrypted_credential: str
:ivar enable_ssl: If true, connect to the FTP server over SSL/TLS channel. Default value is
true. Type: boolean (or Expression with resultType boolean).
:vartype enable_ssl: JSON
:ivar enable_server_certificate_validation: If true, validate the FTP server SSL certificate
when connect over SSL/TLS channel. Default value is true. Type: boolean (or Expression with
resultType boolean).
:vartype enable_server_certificate_validation: JSON
"""
_validation = {
"type": {"required": True},
"host": {"required": True},
}
_attribute_map = {
"additional_properties": {"key": "", "type": "{object}"},
"type": {"key": "type", "type": "str"},
"version": {"key": "version", "type": "str"},
"connect_via": {"key": "connectVia", "type": "IntegrationRuntimeReference"},
"description": {"key": "description", "type": "str"},
"parameters": {"key": "parameters", "type": "{ParameterSpecification}"},
"annotations": {"key": "annotations", "type": "[object]"},
"host": {"key": "typeProperties.host", "type": "object"},
"port": {"key": "typeProperties.port", "type": "object"},
"authentication_type": {"key": "typeProperties.authenticationType", "type": "str"},
"user_name": {"key": "typeProperties.userName", "type": "object"},
"password": {"key": "typeProperties.password", "type": "SecretBase"},
"encrypted_credential": {"key": "typeProperties.encryptedCredential", "type": "str"},
"enable_ssl": {"key": "typeProperties.enableSsl", "type": "object"},
"enable_server_certificate_validation": {
"key": "typeProperties.enableServerCertificateValidation",
"type": "object",
},
}
def __init__(
self,
*,
host: JSON,
additional_properties: Optional[Dict[str, JSON]] = None,
version: Optional[str] = None,
connect_via: Optional["_models.IntegrationRuntimeReference"] = None,
description: Optional[str] = None,
parameters: Optional[Dict[str, "_models.ParameterSpecification"]] = None,
annotations: Optional[List[JSON]] = None,
port: Optional[JSON] = None,
authentication_type: Optional[Union[str, "_models.FtpAuthenticationType"]] = None,
user_name: Optional[JSON] = None,
password: Optional["_models.SecretBase"] = None,
encrypted_credential: Optional[str] = None,
enable_ssl: Optional[JSON] = None,
enable_server_certificate_validation: Optional[JSON] = None,
**kwargs: Any
) -> None:
"""
:keyword additional_properties: Unmatched properties from the message are deserialized to this
collection.
:paramtype additional_properties: dict[str, JSON]
:keyword version: Version of the linked service.
:paramtype version: str
:keyword connect_via: The integration runtime reference.
:paramtype connect_via: ~azure.mgmt.datafactory.models.IntegrationRuntimeReference
:keyword description: Linked service description.
:paramtype description: str
:keyword parameters: Parameters for linked service.
:paramtype parameters: dict[str, ~azure.mgmt.datafactory.models.ParameterSpecification]
:keyword annotations: List of tags that can be used for describing the linked service.
:paramtype annotations: list[JSON]
:keyword host: Host name of the FTP server. Type: string (or Expression with resultType
string). Required.
:paramtype host: JSON
:keyword port: The TCP port number that the FTP server uses to listen for client connections.
Default value is 21. Type: integer (or Expression with resultType integer), minimum: 0.
:paramtype port: JSON
:keyword authentication_type: The authentication type to be used to connect to the FTP server.
Known values are: "Basic" and "Anonymous".
:paramtype authentication_type: str or ~azure.mgmt.datafactory.models.FtpAuthenticationType
:keyword user_name: Username to logon the FTP server. Type: string (or Expression with
resultType string).
:paramtype user_name: JSON
:keyword password: Password to logon the FTP server.
:paramtype password: ~azure.mgmt.datafactory.models.SecretBase
:keyword encrypted_credential: The encrypted credential used for authentication. Credentials
are encrypted using the integration runtime credential manager. Type: string.
:paramtype encrypted_credential: str
:keyword enable_ssl: If true, connect to the FTP server over SSL/TLS channel. Default value is
true. Type: boolean (or Expression with resultType boolean).
:paramtype enable_ssl: JSON
:keyword enable_server_certificate_validation: If true, validate the FTP server SSL certificate
when connect over SSL/TLS channel. Default value is true. Type: boolean (or Expression with
resultType boolean).
:paramtype enable_server_certificate_validation: JSON
"""
super().__init__(
additional_properties=additional_properties,
version=version,
connect_via=connect_via,
description=description,
parameters=parameters,
annotations=annotations,
**kwargs
)
self.type: str = "FtpServer"
self.host = host
self.port = port
self.authentication_type = authentication_type
self.user_name = user_name
self.password = password
self.encrypted_credential = encrypted_credential
self.enable_ssl = enable_ssl
self.enable_server_certificate_validation = enable_server_certificate_validation
|
class FtpServerLinkedService(LinkedService):
'''A FTP server Linked Service.
All required parameters must be populated in order to send to server.
:ivar additional_properties: Unmatched properties from the message are deserialized to this
collection.
:vartype additional_properties: dict[str, JSON]
:ivar type: Type of linked service. Required.
:vartype type: str
:ivar version: Version of the linked service.
:vartype version: str
:ivar connect_via: The integration runtime reference.
:vartype connect_via: ~azure.mgmt.datafactory.models.IntegrationRuntimeReference
:ivar description: Linked service description.
:vartype description: str
:ivar parameters: Parameters for linked service.
:vartype parameters: dict[str, ~azure.mgmt.datafactory.models.ParameterSpecification]
:ivar annotations: List of tags that can be used for describing the linked service.
:vartype annotations: list[JSON]
:ivar host: Host name of the FTP server. Type: string (or Expression with resultType string).
Required.
:vartype host: JSON
:ivar port: The TCP port number that the FTP server uses to listen for client connections.
Default value is 21. Type: integer (or Expression with resultType integer), minimum: 0.
:vartype port: JSON
:ivar authentication_type: The authentication type to be used to connect to the FTP server.
Known values are: "Basic" and "Anonymous".
:vartype authentication_type: str or ~azure.mgmt.datafactory.models.FtpAuthenticationType
:ivar user_name: Username to logon the FTP server. Type: string (or Expression with resultType
string).
:vartype user_name: JSON
:ivar password: Password to logon the FTP server.
:vartype password: ~azure.mgmt.datafactory.models.SecretBase
:ivar encrypted_credential: The encrypted credential used for authentication. Credentials are
encrypted using the integration runtime credential manager. Type: string.
:vartype encrypted_credential: str
:ivar enable_ssl: If true, connect to the FTP server over SSL/TLS channel. Default value is
true. Type: boolean (or Expression with resultType boolean).
:vartype enable_ssl: JSON
:ivar enable_server_certificate_validation: If true, validate the FTP server SSL certificate
when connect over SSL/TLS channel. Default value is true. Type: boolean (or Expression with
resultType boolean).
:vartype enable_server_certificate_validation: JSON
'''
def __init__(
self,
*,
host: JSON,
additional_properties: Optional[Dict[str, JSON]] = None,
version: Optional[str] = None,
connect_via: Optional["_models.IntegrationRuntimeReference"] = None,
description: Optional[str] = None,
parameters: Optional[Dict[str, "_models.ParameterSpecification"]] = None,
annotations: Optional[List[JSON]] = None,
port: Optional[JSON] = None,
authentication_type: Optional[Union[str, "_models.FtpAuthenticationType"]] = None,
user_name: Optional[JSON] = None,
password: Optional["_models.SecretBase"] = None,
encrypted_credential: Optional[str] = None,
enable_ssl: Optional[JSON] = None,
enable_server_certificate_validation: Optional[JSON] = None,
**kwargs: Any
) -> None:
'''
:keyword additional_properties: Unmatched properties from the message are deserialized to this
collection.
:paramtype additional_properties: dict[str, JSON]
:keyword version: Version of the linked service.
:paramtype version: str
:keyword connect_via: The integration runtime reference.
:paramtype connect_via: ~azure.mgmt.datafactory.models.IntegrationRuntimeReference
:keyword description: Linked service description.
:paramtype description: str
:keyword parameters: Parameters for linked service.
:paramtype parameters: dict[str, ~azure.mgmt.datafactory.models.ParameterSpecification]
:keyword annotations: List of tags that can be used for describing the linked service.
:paramtype annotations: list[JSON]
:keyword host: Host name of the FTP server. Type: string (or Expression with resultType
string). Required.
:paramtype host: JSON
:keyword port: The TCP port number that the FTP server uses to listen for client connections.
Default value is 21. Type: integer (or Expression with resultType integer), minimum: 0.
:paramtype port: JSON
:keyword authentication_type: The authentication type to be used to connect to the FTP server.
Known values are: "Basic" and "Anonymous".
:paramtype authentication_type: str or ~azure.mgmt.datafactory.models.FtpAuthenticationType
:keyword user_name: Username to logon the FTP server. Type: string (or Expression with
resultType string).
:paramtype user_name: JSON
:keyword password: Password to logon the FTP server.
:paramtype password: ~azure.mgmt.datafactory.models.SecretBase
:keyword encrypted_credential: The encrypted credential used for authentication. Credentials
are encrypted using the integration runtime credential manager. Type: string.
:paramtype encrypted_credential: str
:keyword enable_ssl: If true, connect to the FTP server over SSL/TLS channel. Default value is
true. Type: boolean (or Expression with resultType boolean).
:paramtype enable_ssl: JSON
:keyword enable_server_certificate_validation: If true, validate the FTP server SSL certificate
when connect over SSL/TLS channel. Default value is true. Type: boolean (or Expression with
resultType boolean).
:paramtype enable_server_certificate_validation: JSON
'''
pass
| 2 | 2 | 76 | 0 | 37 | 39 | 1 | 1.31 | 1 | 3 | 0 | 0 | 1 | 9 | 1 | 17 | 148 | 5 | 62 | 31 | 42 | 81 | 14 | 13 | 12 | 1 | 3 | 0 | 1 |
10,855 |
Azure/azure-cli-extensions
|
src/datafactory/azext_datafactory/vendored_sdks/datafactory/models/_models_py3.py
|
azext_datafactory.vendored_sdks.datafactory.models._models_py3.FtpServerLocation
|
class FtpServerLocation(DatasetLocation):
"""The location of ftp server dataset.
All required parameters must be populated in order to send to server.
:ivar additional_properties: Unmatched properties from the message are deserialized to this
collection.
:vartype additional_properties: dict[str, JSON]
:ivar type: Type of dataset storage location. Required.
:vartype type: str
:ivar folder_path: Specify the folder path of dataset. Type: string (or Expression with
resultType string).
:vartype folder_path: JSON
:ivar file_name: Specify the file name of dataset. Type: string (or Expression with resultType
string).
:vartype file_name: JSON
"""
_validation = {
"type": {"required": True},
}
_attribute_map = {
"additional_properties": {"key": "", "type": "{object}"},
"type": {"key": "type", "type": "str"},
"folder_path": {"key": "folderPath", "type": "object"},
"file_name": {"key": "fileName", "type": "object"},
}
def __init__(
self,
*,
additional_properties: Optional[Dict[str, JSON]] = None,
folder_path: Optional[JSON] = None,
file_name: Optional[JSON] = None,
**kwargs: Any
) -> None:
"""
:keyword additional_properties: Unmatched properties from the message are deserialized to this
collection.
:paramtype additional_properties: dict[str, JSON]
:keyword folder_path: Specify the folder path of dataset. Type: string (or Expression with
resultType string).
:paramtype folder_path: JSON
:keyword file_name: Specify the file name of dataset. Type: string (or Expression with
resultType string).
:paramtype file_name: JSON
"""
super().__init__(
additional_properties=additional_properties, folder_path=folder_path, file_name=file_name, **kwargs
)
self.type: str = "FtpServerLocation"
|
class FtpServerLocation(DatasetLocation):
'''The location of ftp server dataset.
All required parameters must be populated in order to send to server.
:ivar additional_properties: Unmatched properties from the message are deserialized to this
collection.
:vartype additional_properties: dict[str, JSON]
:ivar type: Type of dataset storage location. Required.
:vartype type: str
:ivar folder_path: Specify the folder path of dataset. Type: string (or Expression with
resultType string).
:vartype folder_path: JSON
:ivar file_name: Specify the file name of dataset. Type: string (or Expression with resultType
string).
:vartype file_name: JSON
'''
def __init__(
self,
*,
additional_properties: Optional[Dict[str, JSON]] = None,
folder_path: Optional[JSON] = None,
file_name: Optional[JSON] = None,
**kwargs: Any
) -> None:
'''
:keyword additional_properties: Unmatched properties from the message are deserialized to this
collection.
:paramtype additional_properties: dict[str, JSON]
:keyword folder_path: Specify the folder path of dataset. Type: string (or Expression with
resultType string).
:paramtype folder_path: JSON
:keyword file_name: Specify the file name of dataset. Type: string (or Expression with
resultType string).
:paramtype file_name: JSON
'''
pass
| 2 | 2 | 23 | 0 | 12 | 11 | 1 | 1.14 | 1 | 3 | 0 | 0 | 1 | 1 | 1 | 17 | 52 | 5 | 22 | 12 | 13 | 25 | 6 | 5 | 4 | 1 | 3 | 0 | 1 |
10,856 |
Azure/azure-cli-extensions
|
src/datafactory/azext_datafactory/vendored_sdks/datafactory/models/_models_py3.py
|
azext_datafactory.vendored_sdks.datafactory.models._models_py3.GetDataFactoryOperationStatusResponse
|
class GetDataFactoryOperationStatusResponse(_serialization.Model):
"""Response body structure for get data factory operation status.
:ivar additional_properties: Unmatched properties from the message are deserialized to this
collection.
:vartype additional_properties: dict[str, JSON]
:ivar status: Status of the operation.
:vartype status: str
"""
_attribute_map = {
"additional_properties": {"key": "", "type": "{object}"},
"status": {"key": "status", "type": "str"},
}
def __init__(
self, *, additional_properties: Optional[Dict[str, JSON]] = None, status: Optional[str] = None, **kwargs: Any
) -> None:
"""
:keyword additional_properties: Unmatched properties from the message are deserialized to this
collection.
:paramtype additional_properties: dict[str, JSON]
:keyword status: Status of the operation.
:paramtype status: str
"""
super().__init__(**kwargs)
self.additional_properties = additional_properties
self.status = status
|
class GetDataFactoryOperationStatusResponse(_serialization.Model):
'''Response body structure for get data factory operation status.
:ivar additional_properties: Unmatched properties from the message are deserialized to this
collection.
:vartype additional_properties: dict[str, JSON]
:ivar status: Status of the operation.
:vartype status: str
'''
def __init__(
self, *, additional_properties: Optional[Dict[str, JSON]] = None, status: Optional[str] = None, **kwargs: Any
) -> None:
'''
:keyword additional_properties: Unmatched properties from the message are deserialized to this
collection.
:paramtype additional_properties: dict[str, JSON]
:keyword status: Status of the operation.
:paramtype status: str
'''
pass
| 2 | 2 | 13 | 0 | 6 | 7 | 1 | 1.27 | 1 | 3 | 0 | 0 | 1 | 2 | 1 | 16 | 28 | 3 | 11 | 7 | 7 | 14 | 6 | 5 | 4 | 1 | 2 | 0 | 1 |
10,857 |
Azure/azure-cli-extensions
|
src/datafactory/azext_datafactory/vendored_sdks/datafactory/models/_models_py3.py
|
azext_datafactory.vendored_sdks.datafactory.models._models_py3.FactoryVSTSConfiguration
|
class FactoryVSTSConfiguration(FactoryRepoConfiguration):
"""Factory's VSTS repo information.
All required parameters must be populated in order to send to server.
:ivar type: Type of repo configuration. Required.
:vartype type: str
:ivar account_name: Account name. Required.
:vartype account_name: str
:ivar repository_name: Repository name. Required.
:vartype repository_name: str
:ivar collaboration_branch: Collaboration branch. Required.
:vartype collaboration_branch: str
:ivar root_folder: Root folder. Required.
:vartype root_folder: str
:ivar last_commit_id: Last commit id.
:vartype last_commit_id: str
:ivar disable_publish: Disable manual publish operation in ADF studio to favor automated
publish.
:vartype disable_publish: bool
:ivar project_name: VSTS project name. Required.
:vartype project_name: str
:ivar tenant_id: VSTS tenant id.
:vartype tenant_id: str
"""
_validation = {
"type": {"required": True},
"account_name": {"required": True},
"repository_name": {"required": True},
"collaboration_branch": {"required": True},
"root_folder": {"required": True},
"project_name": {"required": True},
}
_attribute_map = {
"type": {"key": "type", "type": "str"},
"account_name": {"key": "accountName", "type": "str"},
"repository_name": {"key": "repositoryName", "type": "str"},
"collaboration_branch": {"key": "collaborationBranch", "type": "str"},
"root_folder": {"key": "rootFolder", "type": "str"},
"last_commit_id": {"key": "lastCommitId", "type": "str"},
"disable_publish": {"key": "disablePublish", "type": "bool"},
"project_name": {"key": "projectName", "type": "str"},
"tenant_id": {"key": "tenantId", "type": "str"},
}
def __init__(
self,
*,
account_name: str,
repository_name: str,
collaboration_branch: str,
root_folder: str,
project_name: str,
last_commit_id: Optional[str] = None,
disable_publish: Optional[bool] = None,
tenant_id: Optional[str] = None,
**kwargs: Any
) -> None:
"""
:keyword account_name: Account name. Required.
:paramtype account_name: str
:keyword repository_name: Repository name. Required.
:paramtype repository_name: str
:keyword collaboration_branch: Collaboration branch. Required.
:paramtype collaboration_branch: str
:keyword root_folder: Root folder. Required.
:paramtype root_folder: str
:keyword last_commit_id: Last commit id.
:paramtype last_commit_id: str
:keyword disable_publish: Disable manual publish operation in ADF studio to favor automated
publish.
:paramtype disable_publish: bool
:keyword project_name: VSTS project name. Required.
:paramtype project_name: str
:keyword tenant_id: VSTS tenant id.
:paramtype tenant_id: str
"""
super().__init__(
account_name=account_name,
repository_name=repository_name,
collaboration_branch=collaboration_branch,
root_folder=root_folder,
last_commit_id=last_commit_id,
disable_publish=disable_publish,
**kwargs
)
self.type: str = "FactoryVSTSConfiguration"
self.project_name = project_name
self.tenant_id = tenant_id
|
class FactoryVSTSConfiguration(FactoryRepoConfiguration):
'''Factory's VSTS repo information.
All required parameters must be populated in order to send to server.
:ivar type: Type of repo configuration. Required.
:vartype type: str
:ivar account_name: Account name. Required.
:vartype account_name: str
:ivar repository_name: Repository name. Required.
:vartype repository_name: str
:ivar collaboration_branch: Collaboration branch. Required.
:vartype collaboration_branch: str
:ivar root_folder: Root folder. Required.
:vartype root_folder: str
:ivar last_commit_id: Last commit id.
:vartype last_commit_id: str
:ivar disable_publish: Disable manual publish operation in ADF studio to favor automated
publish.
:vartype disable_publish: bool
:ivar project_name: VSTS project name. Required.
:vartype project_name: str
:ivar tenant_id: VSTS tenant id.
:vartype tenant_id: str
'''
def __init__(
self,
*,
account_name: str,
repository_name: str,
collaboration_branch: str,
root_folder: str,
project_name: str,
last_commit_id: Optional[str] = None,
disable_publish: Optional[bool] = None,
tenant_id: Optional[str] = None,
**kwargs: Any
) -> None:
'''
:keyword account_name: Account name. Required.
:paramtype account_name: str
:keyword repository_name: Repository name. Required.
:paramtype repository_name: str
:keyword collaboration_branch: Collaboration branch. Required.
:paramtype collaboration_branch: str
:keyword root_folder: Root folder. Required.
:paramtype root_folder: str
:keyword last_commit_id: Last commit id.
:paramtype last_commit_id: str
:keyword disable_publish: Disable manual publish operation in ADF studio to favor automated
publish.
:paramtype disable_publish: bool
:keyword project_name: VSTS project name. Required.
:paramtype project_name: str
:keyword tenant_id: VSTS tenant id.
:paramtype tenant_id: str
'''
pass
| 2 | 2 | 44 | 0 | 25 | 19 | 1 | 0.91 | 1 | 4 | 0 | 0 | 1 | 3 | 1 | 17 | 91 | 5 | 45 | 19 | 31 | 41 | 8 | 7 | 6 | 1 | 3 | 0 | 1 |
10,858 |
Azure/azure-cli-extensions
|
src/datafactory/azext_datafactory/vendored_sdks/datafactory/models/_models_py3.py
|
azext_datafactory.vendored_sdks.datafactory.models._models_py3.GetSsisObjectMetadataRequest
|
class GetSsisObjectMetadataRequest(_serialization.Model):
"""The request payload of get SSIS object metadata.
:ivar metadata_path: Metadata path.
:vartype metadata_path: str
"""
_attribute_map = {
"metadata_path": {"key": "metadataPath", "type": "str"},
}
def __init__(self, *, metadata_path: Optional[str] = None, **kwargs: Any) -> None:
"""
:keyword metadata_path: Metadata path.
:paramtype metadata_path: str
"""
super().__init__(**kwargs)
self.metadata_path = metadata_path
|
class GetSsisObjectMetadataRequest(_serialization.Model):
'''The request payload of get SSIS object metadata.
:ivar metadata_path: Metadata path.
:vartype metadata_path: str
'''
def __init__(self, *, metadata_path: Optional[str] = None, **kwargs: Any) -> None:
'''
:keyword metadata_path: Metadata path.
:paramtype metadata_path: str
'''
pass
| 2 | 2 | 7 | 0 | 3 | 4 | 1 | 1.14 | 1 | 3 | 0 | 0 | 1 | 1 | 1 | 16 | 18 | 3 | 7 | 4 | 5 | 8 | 5 | 4 | 3 | 1 | 2 | 0 | 1 |
10,859 |
Azure/azure-cli-extensions
|
src/datafactory/azext_datafactory/vendored_sdks/datafactory/models/_models_py3.py
|
azext_datafactory.vendored_sdks.datafactory.models._models_py3.FactoryUpdateParameters
|
class FactoryUpdateParameters(_serialization.Model):
"""Parameters for updating a factory resource.
:ivar tags: The resource tags.
:vartype tags: dict[str, str]
:ivar identity: Managed service identity of the factory.
:vartype identity: ~azure.mgmt.datafactory.models.FactoryIdentity
:ivar public_network_access: Whether or not public network access is allowed for the data
factory. Known values are: "Enabled" and "Disabled".
:vartype public_network_access: str or ~azure.mgmt.datafactory.models.PublicNetworkAccess
"""
_attribute_map = {
"tags": {"key": "tags", "type": "{str}"},
"identity": {"key": "identity", "type": "FactoryIdentity"},
"public_network_access": {"key": "properties.publicNetworkAccess", "type": "str"},
}
def __init__(
self,
*,
tags: Optional[Dict[str, str]] = None,
identity: Optional["_models.FactoryIdentity"] = None,
public_network_access: Optional[Union[str, "_models.PublicNetworkAccess"]] = None,
**kwargs: Any
) -> None:
"""
:keyword tags: The resource tags.
:paramtype tags: dict[str, str]
:keyword identity: Managed service identity of the factory.
:paramtype identity: ~azure.mgmt.datafactory.models.FactoryIdentity
:keyword public_network_access: Whether or not public network access is allowed for the data
factory. Known values are: "Enabled" and "Disabled".
:paramtype public_network_access: str or ~azure.mgmt.datafactory.models.PublicNetworkAccess
"""
super().__init__(**kwargs)
self.tags = tags
self.identity = identity
self.public_network_access = public_network_access
|
class FactoryUpdateParameters(_serialization.Model):
'''Parameters for updating a factory resource.
:ivar tags: The resource tags.
:vartype tags: dict[str, str]
:ivar identity: Managed service identity of the factory.
:vartype identity: ~azure.mgmt.datafactory.models.FactoryIdentity
:ivar public_network_access: Whether or not public network access is allowed for the data
factory. Known values are: "Enabled" and "Disabled".
:vartype public_network_access: str or ~azure.mgmt.datafactory.models.PublicNetworkAccess
'''
def __init__(
self,
*,
tags: Optional[Dict[str, str]] = None,
identity: Optional["_models.FactoryIdentity"] = None,
public_network_access: Optional[Union[str, "_models.PublicNetworkAccess"]] = None,
**kwargs: Any
) -> None:
'''
:keyword tags: The resource tags.
:paramtype tags: dict[str, str]
:keyword identity: Managed service identity of the factory.
:paramtype identity: ~azure.mgmt.datafactory.models.FactoryIdentity
:keyword public_network_access: Whether or not public network access is allowed for the data
factory. Known values are: "Enabled" and "Disabled".
:paramtype public_network_access: str or ~azure.mgmt.datafactory.models.PublicNetworkAccess
'''
pass
| 2 | 2 | 21 | 0 | 12 | 9 | 1 | 1 | 1 | 3 | 0 | 0 | 1 | 3 | 1 | 16 | 39 | 3 | 18 | 13 | 9 | 18 | 7 | 6 | 5 | 1 | 2 | 0 | 1 |
10,860 |
Azure/azure-cli-extensions
|
src/datafactory/azext_datafactory/vendored_sdks/datafactory/models/_models_py3.py
|
azext_datafactory.vendored_sdks.datafactory.models._models_py3.FactoryRepoConfiguration
|
class FactoryRepoConfiguration(_serialization.Model):
"""Factory's git repo information.
You probably want to use the sub-classes and not this class directly. Known sub-classes are:
FactoryGitHubConfiguration, FactoryVSTSConfiguration
All required parameters must be populated in order to send to server.
:ivar type: Type of repo configuration. Required.
:vartype type: str
:ivar account_name: Account name. Required.
:vartype account_name: str
:ivar repository_name: Repository name. Required.
:vartype repository_name: str
:ivar collaboration_branch: Collaboration branch. Required.
:vartype collaboration_branch: str
:ivar root_folder: Root folder. Required.
:vartype root_folder: str
:ivar last_commit_id: Last commit id.
:vartype last_commit_id: str
:ivar disable_publish: Disable manual publish operation in ADF studio to favor automated
publish.
:vartype disable_publish: bool
"""
_validation = {
"type": {"required": True},
"account_name": {"required": True},
"repository_name": {"required": True},
"collaboration_branch": {"required": True},
"root_folder": {"required": True},
}
_attribute_map = {
"type": {"key": "type", "type": "str"},
"account_name": {"key": "accountName", "type": "str"},
"repository_name": {"key": "repositoryName", "type": "str"},
"collaboration_branch": {"key": "collaborationBranch", "type": "str"},
"root_folder": {"key": "rootFolder", "type": "str"},
"last_commit_id": {"key": "lastCommitId", "type": "str"},
"disable_publish": {"key": "disablePublish", "type": "bool"},
}
_subtype_map = {
"type": {
"FactoryGitHubConfiguration": "FactoryGitHubConfiguration",
"FactoryVSTSConfiguration": "FactoryVSTSConfiguration",
}
}
def __init__(
self,
*,
account_name: str,
repository_name: str,
collaboration_branch: str,
root_folder: str,
last_commit_id: Optional[str] = None,
disable_publish: Optional[bool] = None,
**kwargs: Any
) -> None:
"""
:keyword account_name: Account name. Required.
:paramtype account_name: str
:keyword repository_name: Repository name. Required.
:paramtype repository_name: str
:keyword collaboration_branch: Collaboration branch. Required.
:paramtype collaboration_branch: str
:keyword root_folder: Root folder. Required.
:paramtype root_folder: str
:keyword last_commit_id: Last commit id.
:paramtype last_commit_id: str
:keyword disable_publish: Disable manual publish operation in ADF studio to favor automated
publish.
:paramtype disable_publish: bool
"""
super().__init__(**kwargs)
self.type: Optional[str] = None
self.account_name = account_name
self.repository_name = repository_name
self.collaboration_branch = collaboration_branch
self.root_folder = root_folder
self.last_commit_id = last_commit_id
self.disable_publish = disable_publish
|
class FactoryRepoConfiguration(_serialization.Model):
'''Factory's git repo information.
You probably want to use the sub-classes and not this class directly. Known sub-classes are:
FactoryGitHubConfiguration, FactoryVSTSConfiguration
All required parameters must be populated in order to send to server.
:ivar type: Type of repo configuration. Required.
:vartype type: str
:ivar account_name: Account name. Required.
:vartype account_name: str
:ivar repository_name: Repository name. Required.
:vartype repository_name: str
:ivar collaboration_branch: Collaboration branch. Required.
:vartype collaboration_branch: str
:ivar root_folder: Root folder. Required.
:vartype root_folder: str
:ivar last_commit_id: Last commit id.
:vartype last_commit_id: str
:ivar disable_publish: Disable manual publish operation in ADF studio to favor automated
publish.
:vartype disable_publish: bool
'''
def __init__(
self,
*,
account_name: str,
repository_name: str,
collaboration_branch: str,
root_folder: str,
last_commit_id: Optional[str] = None,
disable_publish: Optional[bool] = None,
**kwargs: Any
) -> None:
'''
:keyword account_name: Account name. Required.
:paramtype account_name: str
:keyword repository_name: Repository name. Required.
:paramtype repository_name: str
:keyword collaboration_branch: Collaboration branch. Required.
:paramtype collaboration_branch: str
:keyword root_folder: Root folder. Required.
:paramtype root_folder: str
:keyword last_commit_id: Last commit id.
:paramtype last_commit_id: str
:keyword disable_publish: Disable manual publish operation in ADF studio to favor automated
publish.
:paramtype disable_publish: bool
'''
pass
| 2 | 2 | 34 | 0 | 19 | 15 | 1 | 0.83 | 1 | 4 | 0 | 2 | 1 | 7 | 1 | 16 | 84 | 7 | 42 | 22 | 30 | 35 | 13 | 12 | 11 | 1 | 2 | 0 | 1 |
10,861 |
Azure/azure-cli-extensions
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/Azure_azure-cli-extensions/src/containerapp/azext_containerapp/tests/latest/test_containerapp_patch.py
|
azext_containerapp.tests.latest.test_containerapp_patch.ContainerAppPatchTest
|
class ContainerAppPatchTest(ScenarioTest):
def __init__(self, *arg, **kwargs):
super().__init__(*arg, random_config_dir=True, **kwargs)
@live_only()
@AllowLargeResponse(8192)
@ResourceGroupPreparer(location="eastus2")
def test_containerapp_patch_list_and_apply_with_resource_group_e2e(self, resource_group):
builder_platform_name = "dotnet"
builder_platform_version = "7.0.9"
builder_runtime_image = f"mcr.microsoft.com/oryx/dotnetcore:{builder_platform_version}-debian-bullseye"
source_path = os.path.join(TEST_DIR, os.path.join(
"data", "source_built_using_bullseye_buildpack_net7"))
ingress = 'external'
target_port = '8080'
# Generate oryx.env file for the app to snap to an older version of .NET 7.0
oryx_env_file_path = os.path.join(source_path, "oryx.env")
with open(oryx_env_file_path, "w+") as f:
f.write(f"ORYX_PLATFORM_NAME={builder_platform_name}\n")
f.write(f"ORYX_PLATFORM_VERSION={builder_platform_version}\n")
f.write(f"ORYX_RUNTIME_IMAGE={builder_runtime_image}\n")
try:
# Generate a name for the Container App
app_name = self.create_random_name(
prefix='containerapp', length=24)
# Create a Container App using a .NET 7.0 image with an outdated run image that is eligible for patching
create_and_verify_containerapp_up(self, resource_group, source_path=source_path, ingress=ingress,
target_port=target_port, app_name=app_name, requires_acr_prerequisite=True)
# Execute and verify patch list command
patchable_images = self.cmd(
f'containerapp patch list -g {resource_group}').get_output_in_json()
self.assertTrue(len(patchable_images) == 1)
self.assertEqual(
patchable_images[0]["oldRunImage"], builder_runtime_image)
# Execute and verify patch apply command
self.cmd(f'containerapp patch apply -g {resource_group}')
patchable_images = self.cmd(
f'containerapp patch list -g {resource_group}').get_output_in_json()
self.assertTrue(len(patchable_images) == 0)
finally:
# Delete the oryx.env file so it may not conflict with other tests
os.remove(oryx_env_file_path)
@live_only()
@AllowLargeResponse(8192)
@ResourceGroupPreparer(location="eastus2")
def test_containerapp_patch_list_and_apply_with_environment_e2e(self, resource_group):
builder_platform_name = "dotnet"
builder_platform_version = "7.0.9"
builder_runtime_image = f"mcr.microsoft.com/oryx/dotnetcore:{builder_platform_version}-debian-bullseye"
source_path = os.path.join(TEST_DIR, os.path.join(
"data", "source_built_using_bullseye_buildpack_net7"))
ingress = 'external'
target_port = '8080'
# Generate oryx.env file for the app to snap to an older version of .NET 7.0
oryx_env_file_path = os.path.join(source_path, "oryx.env")
with open(oryx_env_file_path, "w+") as f:
f.write(f"ORYX_PLATFORM_NAME={builder_platform_name}\n")
f.write(f"ORYX_PLATFORM_VERSION={builder_platform_version}\n")
f.write(f"ORYX_RUNTIME_IMAGE={builder_runtime_image}\n")
try:
# Generate a name for the Container App
app_name = self.create_random_name(
prefix='containerapp', length=24)
# Create managed environment
env_name = self.create_random_name(prefix='env', length=24)
self.cmd(
f'containerapp env create -g {resource_group} -n {env_name}')
# Create a Container App using a .NET 7.0 image with an outdated run image that is eligible for patching
create_and_verify_containerapp_up(self, resource_group, env_name=env_name, source_path=source_path,
ingress=ingress, target_port=target_port, app_name=app_name, requires_acr_prerequisite=True)
# Execute and verify patch list command
patchable_images = self.cmd(
f'containerapp patch list -g {resource_group} --environment {env_name}').get_output_in_json()
self.assertTrue(len(patchable_images) == 1)
self.assertEqual(
patchable_images[0]["oldRunImage"], builder_runtime_image)
# Execute and verify patch apply command
self.cmd(
f'containerapp patch apply -g {resource_group} --environment {env_name}')
patchable_images = self.cmd(
f'containerapp patch list -g {resource_group} --environment {env_name}').get_output_in_json()
self.assertTrue(len(patchable_images) == 0)
finally:
# Delete the oryx.env file so it may not conflict with other tests
os.remove(oryx_env_file_path)
@live_only()
@AllowLargeResponse(8192)
@ResourceGroupPreparer(location="eastus2")
def test_containerapp_patch_list_and_apply_with_show_all_e2e(self, resource_group):
image = "mcr.microsoft.com/k8se/quickstart:latest"
# Generate a name for the Container App
app_name = self.create_random_name(prefix='containerapp', length=24)
# Create managed environment
env_name = self.create_random_name(prefix='env', length=24)
self.cmd(f'containerapp env create -g {resource_group} -n {env_name}')
create_and_verify_containerapp_up(
self, resource_group, env_name=env_name, image=image, app_name=app_name, requires_acr_prerequisite=True)
# Execute and verify patch list command
patch_cmd = f'containerapp patch list -g {resource_group} --environment {env_name} --show-all'
output = self.cmd(patch_cmd).get_output_in_json()
self.assertEqual(output[0]["targetImageName"],
"mcr.microsoft.com/k8se/quickstart:latest")
# Execute and verify patch apply command
self.cmd(
f'containerapp patch apply -g {resource_group} --environment {env_name} --show-all')
app = self.cmd(
f"containerapp show -g {resource_group} -n {app_name}").get_output_in_json()
image = app["properties"]["template"]["containers"][0]["image"]
self.assertEqual(image, "mcr.microsoft.com/k8se/quickstart:latest")
@live_only()
@AllowLargeResponse(8192)
@ResourceGroupPreparer(location="eastus2")
def test_containerapp_patch_list_and_apply_without_arguments_e2e(self, resource_group):
builder_platform_name = "dotnet"
builder_platform_version = "7.0.9"
builder_runtime_image = f"mcr.microsoft.com/oryx/dotnetcore:{builder_platform_version}-debian-bullseye"
source_path = os.path.join(TEST_DIR, os.path.join(
"data", "source_built_using_bullseye_buildpack_net7"))
ingress = 'external'
target_port = '8080'
# Generate oryx.env file for the app to snap to an older version of .NET 7.0
oryx_env_file_path = os.path.join(source_path, "oryx.env")
with open(oryx_env_file_path, "w+") as f:
f.write(f"ORYX_PLATFORM_NAME={builder_platform_name}\n")
f.write(f"ORYX_PLATFORM_VERSION={builder_platform_version}\n")
f.write(f"ORYX_RUNTIME_IMAGE={builder_runtime_image}\n")
try:
# Generate a name for the Container App
app_name = self.create_random_name(
prefix='containerapp', length=24)
# Create a Container App using a .NET 7.0 image with an outdated run image that is eligible for patching
create_and_verify_containerapp_up(self, resource_group, source_path=source_path, ingress=ingress,
target_port=target_port, app_name=app_name, requires_acr_prerequisite=True)
# Execute and verify patch list command
self.cmd(f'configure --defaults group={resource_group}')
patchable_images = self.cmd(
f'containerapp patch list').get_output_in_json()
self.assertTrue(len(patchable_images) == 1)
self.assertEqual(
patchable_images[0]["oldRunImage"], builder_runtime_image)
# Execute and verify patch apply command
self.cmd(f'containerapp patch apply')
patchable_images = self.cmd(
f'containerapp patch list').get_output_in_json()
self.assertTrue(len(patchable_images) == 0)
finally:
# Delete the oryx.env file so it may not conflict with other tests
os.remove(oryx_env_file_path)
@live_only()
@AllowLargeResponse(8192)
@ResourceGroupPreparer(location="eastus2")
def test_containerapp_patch_list_and_apply_with_node18_e2e(self, resource_group):
builder_platform_name = "node"
builder_platform_version = "18.16.1"
builder_runtime_image = f"mcr.microsoft.com/oryx/node:{builder_platform_version}-debian-bullseye"
source_path = os.path.join(TEST_DIR, os.path.join(
"data", "source_built_using_bullseye_buildpack_node18"))
ingress = 'external'
target_port = '8080'
# Generate oryx.env file for the app to snap to an older version of Node 18
oryx_env_file_path = os.path.join(source_path, "oryx.env")
with open(oryx_env_file_path, "w+") as f:
f.write(f"ORYX_PLATFORM_NAME={builder_platform_name}\n")
f.write(f"ORYX_PLATFORM_VERSION={builder_platform_version}\n")
f.write(f"ORYX_RUNTIME_IMAGE={builder_runtime_image}\n")
try:
# Generate a name for the Container App
app_name = self.create_random_name(
prefix='containerapp', length=24)
# Create a Container App using a Node 18 image with an outdated run image that is eligible for patching
create_and_verify_containerapp_up(self, resource_group, source_path=source_path, ingress=ingress,
target_port=target_port, app_name=app_name, requires_acr_prerequisite=True)
# Execute and verify patch list command
self.cmd(f'configure --defaults group={resource_group}')
patchable_images = self.cmd(
f'containerapp patch list').get_output_in_json()
self.assertTrue(len(patchable_images) == 1)
self.assertEqual(
patchable_images[0]["oldRunImage"], builder_runtime_image)
# Execute and verify patch apply command
self.cmd(f'containerapp patch apply')
patchable_images = self.cmd(
f'containerapp patch list').get_output_in_json()
self.assertTrue(len(patchable_images) == 0)
finally:
# Delete the oryx.env file so it may not conflict with other tests
os.remove(oryx_env_file_path)
@live_only()
@AllowLargeResponse(8192)
@ResourceGroupPreparer(location="eastus2")
def test_containerapp_patch_list_and_apply_with_python310_e2e(self, resource_group):
builder_platform_name = "python"
builder_platform_version = "3.10.4"
builder_runtime_image = f"mcr.microsoft.com/oryx/python:{builder_platform_version}-debian-bullseye"
source_path = os.path.join(TEST_DIR, os.path.join(
"data", "source_built_using_bullseye_buildpack_python310"))
ingress = 'external'
target_port = '80'
# Generate oryx.env file for the app to snap to an older version of Python 3.10
oryx_env_file_path = os.path.join(source_path, "oryx.env")
with open(oryx_env_file_path, "w+") as f:
f.write(f"ORYX_PLATFORM_NAME={builder_platform_name}\n")
f.write(f"ORYX_PLATFORM_VERSION={builder_platform_version}\n")
f.write(f"ORYX_RUNTIME_IMAGE={builder_runtime_image}\n")
try:
# Generate a name for the Container App
app_name = self.create_random_name(
prefix='containerapp', length=24)
# Create a Container App using a Python 3.10 image with an outdated run image that is eligible for patching
create_and_verify_containerapp_up(self, resource_group, source_path=source_path, ingress=ingress,
target_port=target_port, app_name=app_name, requires_acr_prerequisite=True)
# Execute and verify patch list command
self.cmd(f'configure --defaults group={resource_group}')
patchable_images = self.cmd(
f'containerapp patch list').get_output_in_json()
self.assertTrue(len(patchable_images) == 1)
self.assertEqual(
patchable_images[0]["oldRunImage"], builder_runtime_image)
# Execute and verify patch apply command
self.cmd(f'containerapp patch apply')
patchable_images = self.cmd(
f'containerapp patch list').get_output_in_json()
self.assertTrue(len(patchable_images) == 0)
finally:
# Delete the oryx.env file so it may not conflict with other tests
os.remove(oryx_env_file_path)
|
class ContainerAppPatchTest(ScenarioTest):
def __init__(self, *arg, **kwargs):
pass
@live_only()
@AllowLargeResponse(8192)
@ResourceGroupPreparer(location="eastus2")
def test_containerapp_patch_list_and_apply_with_resource_group_e2e(self, resource_group):
pass
@live_only()
@AllowLargeResponse(8192)
@ResourceGroupPreparer(location="eastus2")
def test_containerapp_patch_list_and_apply_with_environment_e2e(self, resource_group):
pass
@live_only()
@AllowLargeResponse(8192)
@ResourceGroupPreparer(location="eastus2")
def test_containerapp_patch_list_and_apply_with_show_all_e2e(self, resource_group):
pass
@live_only()
@AllowLargeResponse(8192)
@ResourceGroupPreparer(location="eastus2")
def test_containerapp_patch_list_and_apply_without_arguments_e2e(self, resource_group):
pass
@live_only()
@AllowLargeResponse(8192)
@ResourceGroupPreparer(location="eastus2")
def test_containerapp_patch_list_and_apply_with_node18_e2e(self, resource_group):
pass
@live_only()
@AllowLargeResponse(8192)
@ResourceGroupPreparer(location="eastus2")
def test_containerapp_patch_list_and_apply_with_python310_e2e(self, resource_group):
pass
| 26 | 0 | 29 | 5 | 19 | 5 | 1 | 0.23 | 1 | 1 | 0 | 0 | 7 | 0 | 7 | 7 | 233 | 44 | 154 | 71 | 128 | 35 | 131 | 60 | 123 | 1 | 1 | 1 | 7 |
10,862 |
Azure/azure-cli-extensions
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/Azure_azure-cli-extensions/src/containerapp/azext_containerapp/tests/latest/test_containerapp_nfsazurefile.py
|
azext_containerapp.tests.latest.test_containerapp_nfsazurefile.ContainerAppMountNfsAzureFileTest
|
class ContainerAppMountNfsAzureFileTest(ScenarioTest):
@AllowLargeResponse(8192)
@ResourceGroupPreparer(location="eastus")
@SubnetPreparer(location="centralus", delegations='Microsoft.App/environments', service_endpoints="Microsoft.Storage.Global")
def test_container_app_mount_nfsazurefile_e2e(self, resource_group, subnet_id, vnet_name, subnet_name):
self.cmd('configure --defaults location={}'.format(TEST_LOCATION))
env = self.create_random_name(prefix='env', length=24)
app = self.create_random_name(prefix='app1', length=24)
storage = self.create_random_name(prefix='storage', length=24)
share = self.create_random_name(prefix='share', length=10)
storage_account_location = TEST_LOCATION
if format_location(storage_account_location) == format_location(STAGE_LOCATION):
storage_account_location = "eastus"
self.cmd(
f'az storage account create --resource-group {resource_group} --name {storage} --location {storage_account_location} --kind FileStorage --sku Premium_LRS --enable-large-file-share --https-only false --output none')
self.cmd(
f'az storage share-rm create --resource-group {resource_group} --storage-account {storage} --name {share} --quota 1024 --enabled-protocols NFS --root-squash NoRootSquash --output none')
self.cmd(
f'az storage account network-rule add --resource-group {resource_group} --account-name {storage} --vnet-name {vnet_name} --subnet {subnet_name}'
)
print(subnet_id, file=sys.stdout)
create_containerapp_env(
self, env, resource_group, TEST_LOCATION, subnet_id)
containerapp_env = self.cmd('containerapp env show -n {} -g {}'.format(env, resource_group), checks=[
JMESPathCheck('name', env)
]).get_output_in_json()
self.cmd(f'az containerapp env storage set -g {resource_group} -n {env} --storage-type NfsAzureFile --storage-name {share} -s {storage}.file.core.windows.net -f /{storage}/{share} --access-mode ReadWrite', checks=[
JMESPathCheck('name', share),
JMESPathCheck('properties.nfsAzureFile.server',
f'{storage}.file.core.windows.net'),
JMESPathCheck('properties.nfsAzureFile.shareName',
f'/{storage}/{share}'),
JMESPathCheck('properties.nfsAzureFile.accessMode', 'ReadWrite'),
])
self.cmd('containerapp env storage show -n {} -g {} --storage-name {}'.format(env, resource_group, share), checks=[
JMESPathCheck('name', share),
JMESPathCheck('properties.nfsAzureFile.server',
f'{storage}.file.core.windows.net'),
JMESPathCheck('properties.nfsAzureFile.shareName',
f'/{storage}/{share}'),
JMESPathCheck('properties.nfsAzureFile.accessMode', 'ReadWrite'),
])
self.cmd('containerapp env storage list -n {} -g {}'.format(env, resource_group), checks=[
JMESPathCheck('[0].name', share),
JMESPathCheck('[0].properties.nfsAzureFile.server',
f'{storage}.file.core.windows.net'),
JMESPathCheck('[0].properties.nfsAzureFile.shareName',
f'/{storage}/{share}'),
JMESPathCheck(
'[0].properties.nfsAzureFile.accessMode', 'ReadWrite'),
])
containerapp_yaml_text = f"""
location: {TEST_LOCATION}
type: Microsoft.App/containerApps
name: {app}
resourceGroup: {resource_group}
properties:
managedEnvironmentId: {containerapp_env["id"]}
configuration:
activeRevisionsMode: Single
ingress:
external: true
allowInsecure: true
targetPort: 80
traffic:
- latestRevision: true
weight: 100
transport: Auto
template:
containers:
- image: mcr.microsoft.com/k8se/quickstart:latest
name: acamounttest
resources:
cpu: 0.5
ephemeralStorage: 1Gi
memory: 1Gi
volumeMounts:
- mountPath: /mnt/data
volumeName: nfs-azure-files-volume
subPath: sub
volumes:
- name: nfs-azure-files-volume
storageType: NfsAzureFile
storageName: {share}
mountOptions: hard
"""
containerapp_file_name = f"{self._testMethodName}_containerapp.yml"
write_test_file(containerapp_file_name, containerapp_yaml_text)
self.cmd(
f'az containerapp create -g {resource_group} --environment {env} -n {app} --yaml {containerapp_file_name}')
self.cmd('containerapp show -g {} -n {}'.format(resource_group, app), checks=[
JMESPathCheck(
'properties.template.volumes[0].storageType', 'NfsAzureFile'),
JMESPathCheck('properties.template.volumes[0].storageName', share),
JMESPathCheck(
'properties.template.volumes[0].name', 'nfs-azure-files-volume'),
JMESPathCheck(
'properties.template.volumes[0].mountOptions', 'hard'),
JMESPathCheck(
'properties.template.containers[0].volumeMounts[0].subPath', 'sub'),
JMESPathCheck(
'properties.template.containers[0].volumeMounts[0].mountPath', '/mnt/data'),
JMESPathCheck(
'properties.template.containers[0].volumeMounts[0].volumeName', 'nfs-azure-files-volume'),
])
self.cmd('az containerapp revision list -g {} -n {}'.format(resource_group, app), checks=[
JMESPathCheck(
'[0].properties.template.volumes[0].storageType', 'NfsAzureFile'),
JMESPathCheck(
'[0].properties.template.volumes[0].storageName', share),
JMESPathCheck('[0].properties.template.volumes[0].name',
'nfs-azure-files-volume'),
JMESPathCheck(
'[0].properties.template.volumes[0].mountOptions', 'hard'),
JMESPathCheck(
'[0].properties.template.containers[0].volumeMounts[0].subPath', 'sub'),
JMESPathCheck(
'[0].properties.template.containers[0].volumeMounts[0].mountPath', '/mnt/data'),
JMESPathCheck(
'[0].properties.template.containers[0].volumeMounts[0].volumeName', 'nfs-azure-files-volume'),
])
clean_up_test_file(containerapp_file_name)
containerapp_yaml_text = f"""
location: {TEST_LOCATION}
type: Microsoft.App/containerApps
name: {app}
resourceGroup: {resource_group}
properties:
managedEnvironmentId: {containerapp_env["id"]}
configuration:
activeRevisionsMode: Single
ingress:
external: true
allowInsecure: true
targetPort: 80
traffic:
- latestRevision: true
weight: 100
transport: Auto
template:
containers:
- image: mcr.microsoft.com/k8se/quickstart:latest
name: acamounttest
resources:
cpu: 0.5
ephemeralStorage: 1Gi
memory: 1Gi
volumeMounts:
- mountPath: /mnt/data
volumeName: nfs-azure-files-volume
subPath: sub2
volumes:
- name: nfs-azure-files-volume
storageType: NfsAzureFile
storageName: {share}
mountOptions: hard
"""
containerapp_file_name = f"{self._testMethodName}_containerapp_1.yml"
write_test_file(containerapp_file_name, containerapp_yaml_text)
self.cmd(
f'az containerapp update -g {resource_group} -n {app} --yaml {containerapp_file_name}')
self.cmd('containerapp show -g {} -n {}'.format(resource_group, app), checks=[
JMESPathCheck(
'properties.template.volumes[0].storageType', 'NfsAzureFile'),
JMESPathCheck('properties.template.volumes[0].storageName', share),
JMESPathCheck(
'properties.template.volumes[0].name', 'nfs-azure-files-volume'),
JMESPathCheck(
'properties.template.volumes[0].mountOptions', 'hard'),
JMESPathCheck(
'properties.template.containers[0].volumeMounts[0].subPath', 'sub2'),
JMESPathCheck(
'properties.template.containers[0].volumeMounts[0].mountPath', '/mnt/data'),
JMESPathCheck(
'properties.template.containers[0].volumeMounts[0].volumeName', 'nfs-azure-files-volume'),
])
self.cmd('az containerapp revision list -g {} -n {}'.format(resource_group, app), checks=[
JMESPathCheck(
'[1].properties.template.volumes[0].storageType', 'NfsAzureFile'),
JMESPathCheck(
'[1].properties.template.volumes[0].storageName', share),
JMESPathCheck('[1].properties.template.volumes[0].name',
'nfs-azure-files-volume'),
JMESPathCheck(
'[1].properties.template.volumes[0].mountOptions', 'hard'),
JMESPathCheck(
'[1].properties.template.containers[0].volumeMounts[0].subPath', 'sub2'),
JMESPathCheck(
'[1].properties.template.containers[0].volumeMounts[0].mountPath', '/mnt/data'),
JMESPathCheck(
'[1].properties.template.containers[0].volumeMounts[0].volumeName', 'nfs-azure-files-volume'),
])
clean_up_test_file(containerapp_file_name)
|
class ContainerAppMountNfsAzureFileTest(ScenarioTest):
@AllowLargeResponse(8192)
@ResourceGroupPreparer(location="eastus")
@SubnetPreparer(location="centralus", delegations='Microsoft.App/environments', service_endpoints="Microsoft.Storage.Global")
def test_container_app_mount_nfsazurefile_e2e(self, resource_group, subnet_id, vnet_name, subnet_name):
pass
| 5 | 0 | 171 | 16 | 155 | 0 | 2 | 0 | 1 | 0 | 0 | 0 | 1 | 0 | 1 | 1 | 175 | 16 | 159 | 11 | 154 | 0 | 33 | 10 | 31 | 2 | 1 | 1 | 2 |
10,863 |
Azure/azure-cli-extensions
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/Azure_azure-cli-extensions/src/containerapp/azext_containerapp/tests/latest/test_containerapp_mount_secret_volume.py
|
azext_containerapp.tests.latest.test_containerapp_mount_secret_volume.ContainerAppMountSecretTest
|
class ContainerAppMountSecretTest(ScenarioTest):
@AllowLargeResponse(8192)
@ResourceGroupPreparer(location="northcentralus")
def test_container_app_mount_secret_e2e(self, resource_group):
self.cmd('configure --defaults location={}'.format(TEST_LOCATION))
env = self.create_random_name(prefix='env', length=24)
app = self.create_random_name(prefix='app1', length=24)
create_containerapp_env(self, env, resource_group)
self.cmd('containerapp env show -n {} -g {}'.format(env, resource_group), checks=[
JMESPathCheck('name', env)
])
secretRef1 = "mysecret"
secretValue1 = "secretvalue1"
secretRef2 = "anothersecret"
secretValue2 = "secretvalue2"
self.cmd(
f'az containerapp create -g {resource_group} --environment {env} -n {app} --secrets {secretRef1}={secretValue1} {secretRef2}={secretValue2} --secret-volume-mount "mnt/secrets"')
self.cmd('containerapp show -g {} -n {}'.format(resource_group, app), checks=[
JMESPathCheck(
'properties.template.volumes[0].storageType', 'Secret'),
# --secret-volume-mount mounts all secrets, not specific secrets, therefore no secrets should be returned.
JMESPathCheck('properties.template.volumes[0].secrets', None),
JMESPathCheck(
'properties.template.containers[0].volumeMounts[0].mountPath', 'mnt/secrets'),
])
# test using update to update the secret volume mount path
self.cmd(
f'az containerapp update -n {app} -g {resource_group} --secret-volume-mount "mnt/newpath"')
self.cmd('containerapp show -g {} -n {}'.format(resource_group, app), checks=[
JMESPathCheck(
'properties.template.containers[0].volumeMounts[0].mountPath', 'mnt/newpath'),
])
@AllowLargeResponse(8192)
@ResourceGroupPreparer(location="northcentralus")
def test_container_app_mount_secret_update_e2e(self, resource_group):
self.cmd('configure --defaults location={}'.format(TEST_LOCATION))
# test creating a container app that does not have a secret volume mount, then uses update to add a secret volume mount
app = self.create_random_name(prefix='app2', length=24)
env = self.create_random_name(prefix='env', length=24)
create_containerapp_env(self, env, resource_group)
secretRef1 = "mysecret"
secretValue1 = "secretvalue1"
secretRef2 = "anothersecret"
secretValue2 = "secretvalue2"
self.cmd('containerapp env show -n {} -g {}'.format(env, resource_group), checks=[
JMESPathCheck('name', env)
])
self.cmd(
f'az containerapp create -g {resource_group} --environment {env} -n {app} --secrets {secretRef1}={secretValue1} {secretRef2}={secretValue2}')
self.cmd('containerapp show -g {} -n {}'.format(resource_group, app), checks=[
JMESPathCheck('properties.template.volumes', None),
])
self.cmd(
f'az containerapp update -n {app} -g {resource_group} --secret-volume-mount "mnt/secrets"')
self.cmd('containerapp show -g {} -n {}'.format(resource_group, app), checks=[
JMESPathCheck(
'properties.template.volumes[0].storageType', 'Secret'),
JMESPathCheck('properties.template.volumes[0].secrets', None),
JMESPathCheck(
'properties.template.containers[0].volumeMounts[0].mountPath', 'mnt/secrets'),
])
|
class ContainerAppMountSecretTest(ScenarioTest):
@AllowLargeResponse(8192)
@ResourceGroupPreparer(location="northcentralus")
def test_container_app_mount_secret_e2e(self, resource_group):
pass
@AllowLargeResponse(8192)
@ResourceGroupPreparer(location="northcentralus")
def test_container_app_mount_secret_update_e2e(self, resource_group):
pass
| 7 | 0 | 30 | 6 | 22 | 2 | 1 | 0.06 | 1 | 0 | 0 | 0 | 2 | 0 | 2 | 2 | 65 | 13 | 49 | 17 | 42 | 3 | 29 | 15 | 26 | 1 | 1 | 0 | 2 |
10,864 |
Azure/azure-cli-extensions
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/Azure_azure-cli-extensions/src/containerapp/azext_containerapp/tests/latest/test_containerapp_maintenance_config.py
|
azext_containerapp.tests.latest.test_containerapp_maintenance_config.ContainerAppMaintenanceConfigTest
|
class ContainerAppMaintenanceConfigTest(ScenarioTest):
@AllowLargeResponse(8192)
@ResourceGroupPreparer(location="eastus")
def test_containerapp_maintenanceconfig_crudoperations_e2e(self, resource_group):
self.cmd('configure --defaults location={}'.format(TEST_LOCATION))
env_name = self.create_random_name(
prefix='aca-maintenance-config-env', length=30)
self.cmd('containerapp env create -g {} -n {} --location {} --logs-destination none --enable-workload-profiles'.format(
resource_group, env_name, TEST_LOCATION))
duration = 10
weekday = "Sunday"
startHour = 12
# create a container app environment for a Container App Maintenance Config resource
self.cmd('containerapp env show -n {} -g {}'.format(env_name, resource_group), checks=[
JMESPathCheck('name', env_name)
])
# test for CRUD operations on Maintenance Config
self.cmd("az containerapp env maintenance-config add --resource-group {} --environment {} -d {} -w {} -s {}".format(
resource_group, env_name, duration, weekday, startHour))
# verify the resource
self.cmd("az containerapp env maintenance-config list --resource-group {} --environment {}".format(resource_group, env_name), checks=[
JMESPathCheck(
'properties.scheduledEntries[0].durationHours', duration),
JMESPathCheck(
'properties.scheduledEntries[0].startHourUtc', startHour),
JMESPathCheck('properties.scheduledEntries[0].weekDay', weekday),
])
updatedDuration = 11
updatedWeekday = "Tuesday"
# update the MaintenanceConfig, check duration and weekday are updated and start hour remains the same
self.cmd("az containerapp env maintenance-config update --resource-group {} --environment {} -d {} -w {}".format(resource_group, env_name, updatedDuration, updatedWeekday), checks=[
JMESPathCheck(
'properties.scheduledEntries[0].durationHours', updatedDuration),
JMESPathCheck(
'properties.scheduledEntries[0].startHourUtc', startHour),
JMESPathCheck(
'properties.scheduledEntries[0].weekDay', updatedWeekday),
])
# update nothing to confirm all properties remain
self.cmd("az containerapp env maintenance-config update --resource-group {} --environment {}".format(resource_group, env_name), checks=[
JMESPathCheck(
'properties.scheduledEntries[0].durationHours', updatedDuration),
JMESPathCheck(
'properties.scheduledEntries[0].startHourUtc', startHour),
JMESPathCheck(
'properties.scheduledEntries[0].weekDay', updatedWeekday),
])
# delete the Container App Maintenance Config resource
self.cmd("az containerapp env maintenance-config remove --resource-group {} --environment {} -y".format(resource_group, env_name))
self.cmd("az containerapp env maintenance-config list --resource-group {} --environment {}".format(resource_group, env_name), checks=[
JMESPathCheck('length(@)', 0),
])
self.cmd(
'containerapp env delete -g {} -n {} -y'.format(resource_group, env_name))
|
class ContainerAppMaintenanceConfigTest(ScenarioTest):
@AllowLargeResponse(8192)
@ResourceGroupPreparer(location="eastus")
def test_containerapp_maintenanceconfig_crudoperations_e2e(self, resource_group):
pass
| 4 | 0 | 52 | 13 | 33 | 6 | 1 | 0.17 | 1 | 0 | 0 | 0 | 1 | 0 | 1 | 1 | 55 | 13 | 36 | 9 | 32 | 6 | 18 | 8 | 16 | 1 | 1 | 0 | 1 |
10,865 |
Azure/azure-cli-extensions
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/Azure_azure-cli-extensions/src/containerapp/azext_containerapp/tests/latest/test_containerapp_java_logger.py
|
azext_containerapp.tests.latest.test_containerapp_java_logger.ContainerappJavaLoggerTests
|
class ContainerappJavaLoggerTests(ScenarioTest):
@AllowLargeResponse(8192)
@ResourceGroupPreparer()
def test_containerapp_java_loggers(self, resource_group):
self.cmd('configure --defaults location={}'.format(TEST_LOCATION))
app = self.create_random_name(prefix='aca', length=24)
image = "mcr.microsoft.com/azurespringapps/samples/hello-world:0.0.1"
env = prepare_containerapp_env_for_app_e2e_tests(self)
# Create container app
self.cmd(
f'containerapp create -g {resource_group} -n {app} --image {image} --environment {env}')
self.cmd(f'containerapp show -g {resource_group} -n {app}', checks=[
JMESPathCheck('properties.provisioningState', "Succeeded"),
JMESPathCheck("properties.configuration.runtime", None)
])
self.cmd(f'containerapp java logger set --logger-name root --logger-level debug -g {resource_group} -n {app}',
expect_failure=True)
self.cmd(f'containerapp java logger delete --logger-name testpkg -g {resource_group} -n {app}',
expect_failure=True)
self.cmd(f'containerapp java logger show --logger-name "org.springframework.boot" -g {resource_group} -n {app}',
expect_failure=True)
# Enable java agent
self.cmd(f'containerapp update -g {resource_group} -n {app} --enable-java-agent', checks=[
JMESPathCheck('properties.provisioningState', "Succeeded"),
JMESPathCheck(
"properties.configuration.runtime.java.javaAgent.enabled", True),
JMESPathCheckNotExists(
"properties.configuration.runtime.java.javaAgent.logging")
])
# Add logger
self.cmd(f'containerapp java logger set --logger-name root --logger-level debug -g {resource_group} -n {app}', checks=[
JMESPathCheck("length([*])", 1),
JMESPathCheck("[0].level", "debug"),
JMESPathCheck("[0].logger", "root")
])
# Add logger
self.cmd(f'containerapp java logger set --logger-name testpkg --logger-level info -g {resource_group} -n {app}', checks=[
JMESPathCheck("length([*])", 2),
JMESPathCheck("[0].level", "debug"),
JMESPathCheck("[0].logger", "root"),
JMESPathCheck("[1].level", "info"),
JMESPathCheck("[1].logger", "testpkg")
])
# Update logger
self.cmd(f'containerapp java logger set --logger-name testpkg --logger-level debug -g {resource_group} -n {app}', checks=[
JMESPathCheck("length([*])", 2),
JMESPathCheck("[0].level", "debug"),
JMESPathCheck("[0].logger", "root"),
JMESPathCheck("[1].level", "debug"),
JMESPathCheck("[1].logger", "testpkg")
])
# Delete not exist logger
self.cmd(
f'containerapp java logger delete --logger-name notexistlogger -g {resource_group} -n {app}', expect_failure=True)
# Delete logger
self.cmd(f'containerapp java logger delete --logger-name testpkg -g {resource_group} -n {app}', checks=[
JMESPathCheck("length([*])", 1),
JMESPathCheck("[0].level", "debug"),
JMESPathCheck("[0].logger", "root"),
])
# Add logger
self.cmd(f'containerapp java logger set --logger-name "org.springframework.boot" --logger-level debug -g {resource_group} -n {app}', checks=[
JMESPathCheck("length([*])", 2),
JMESPathCheck("[0].level", "debug"),
JMESPathCheck("[0].logger", "root"),
JMESPathCheck("[1].level", "debug"),
JMESPathCheck("[1].logger", "org.springframework.boot")
])
# Display logger
self.cmd(f'containerapp java logger show --logger-name "org.springframework.boot" -g {resource_group} -n {app}', checks=[
JMESPathCheck('logger', "org.springframework.boot"),
JMESPathCheck('level', "debug")
])
# Display not exist logger
self.cmd(
f'containerapp java logger show --logger-name "notexistlogger" -g {resource_group} -n {app}', expect_failure=True)
# Display all loggers
self.cmd(f'containerapp java logger show --all -g {resource_group} -n {app}', checks=[
JMESPathCheck("length([*])", 2),
JMESPathCheck('[0].logger', "root"),
JMESPathCheck('[0].level', "debug"),
JMESPathCheck('[1].logger', "org.springframework.boot"),
JMESPathCheck('[1].level', "debug")
])
# Update container app with runtime=java and enable-java-metrics is set
self.cmd(f'containerapp update -g {resource_group} -n {app} --runtime=java --enable-java-metrics', checks=[
JMESPathCheck('properties.provisioningState', "Succeeded"),
JMESPathCheck(
"properties.configuration.runtime.java.enableMetrics", True),
JMESPathCheck(
"properties.configuration.runtime.java.javaAgent.enabled", True),
JMESPathCheck(
"length(properties.configuration.runtime.java.javaAgent.logging.loggerSettings[*])", 2),
JMESPathCheck(
'properties.configuration.runtime.java.javaAgent.logging.loggerSettings[0].logger', "root"),
JMESPathCheck(
'properties.configuration.runtime.java.javaAgent.logging.loggerSettings[0].level', "debug"),
JMESPathCheck(
'properties.configuration.runtime.java.javaAgent.logging.loggerSettings[1].logger', "org.springframework.boot"),
JMESPathCheck(
'properties.configuration.runtime.java.javaAgent.logging.loggerSettings[1].level', "debug")
])
# Update container app with enable-java-agent is set
self.cmd(f'containerapp update -g {resource_group} -n {app} --enable-java-agent', checks=[
JMESPathCheck('properties.provisioningState', "Succeeded"),
JMESPathCheck(
"properties.configuration.runtime.java.enableMetrics", True),
JMESPathCheck(
"properties.configuration.runtime.java.javaAgent.enabled", True),
JMESPathCheck(
"length(properties.configuration.runtime.java.javaAgent.logging.loggerSettings[*])", 2),
JMESPathCheck(
'properties.configuration.runtime.java.javaAgent.logging.loggerSettings[0].logger', "root"),
JMESPathCheck(
'properties.configuration.runtime.java.javaAgent.logging.loggerSettings[0].level', "debug"),
JMESPathCheck('properties.configuration.runtime.java.javaAgent.logging.loggerSettings[1].logger',
"org.springframework.boot"),
JMESPathCheck(
'properties.configuration.runtime.java.javaAgent.logging.loggerSettings[1].level', "debug")
])
# Delete all loggers
self.cmd(f'containerapp java logger delete --all -g {resource_group} -n {app}', checks=[
JMESPathCheck("length([*])", 0)
])
# Delete container app
self.cmd(f'containerapp delete -g {resource_group} -n {app} --yes')
|
class ContainerappJavaLoggerTests(ScenarioTest):
@AllowLargeResponse(8192)
@ResourceGroupPreparer()
def test_containerapp_java_loggers(self, resource_group):
pass
| 4 | 0 | 123 | 18 | 90 | 15 | 1 | 0.16 | 1 | 0 | 0 | 0 | 1 | 0 | 1 | 1 | 126 | 18 | 93 | 6 | 89 | 15 | 25 | 5 | 23 | 1 | 1 | 0 | 1 |
10,866 |
Azure/azure-cli-extensions
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/Azure_azure-cli-extensions/src/containerapp/azext_containerapp/tests/latest/test_containerapp_java_component.py
|
azext_containerapp.tests.latest.test_containerapp_java_component.ContainerappJavaComponentTests
|
class ContainerappJavaComponentTests(ScenarioTest):
@ResourceGroupPreparer(location='eastus')
def test_containerapp_java_component(self, resource_group):
# type "linkers" is not available in North Central US (Stage), if the TEST_LOCATION is "northcentralusstage", use francecentral as location
location = TEST_LOCATION
if format_location(location) == format_location(STAGE_LOCATION):
location = "francecentral"
self.cmd('configure --defaults location={}'.format(location))
env_name = self.create_random_name(prefix='aca-java-env', length=24)
ca_name = self.create_random_name(prefix='javaapp1', length=24)
config_name = "myconfig"
eureka_name = "myeureka"
sba_name = "mysba"
create_containerapp_env(self, env_name, resource_group)
env = self.cmd(
'containerapp env show -g {} -n {}'.format(resource_group, env_name)).get_output_in_json()
default_domain = env['properties']['defaultDomain']
# List Java Components
java_component_list = self.cmd(
"containerapp env java-component list -g {} --environment {}".format(resource_group, env_name)).get_output_in_json()
self.assertTrue(len(java_component_list) == 0)
# Create Config & Eureka
self.cmd('containerapp env java-component config-server-for-spring create -g {} -n {} --environment {}'.format(resource_group, config_name, env_name), checks=[
JMESPathCheck('name', config_name),
JMESPathCheck('properties.componentType', "SpringCloudConfig"),
JMESPathCheck('properties.provisioningState', "Succeeded"),
JMESPathCheck('properties.scale.minReplicas', 1),
JMESPathCheck('properties.scale.maxReplicas', 1)
])
self.cmd(
'containerapp env java-component eureka-server-for-spring create -g {} -n {} --environment {} --configuration eureka.server.renewal-percent-threshold=0.85 eureka.server.enable-self-preservation=false'.format(
resource_group, eureka_name, env_name), checks=[
JMESPathCheck('name', eureka_name),
JMESPathCheck('properties.componentType', "SpringCloudEureka"),
JMESPathCheck('properties.provisioningState', "Succeeded"),
JMESPathCheck('properties.ingress.fqdn',
eureka_name + "-azure-java.ext." + default_domain),
JMESPathCheck('length(properties.configurations)', 2),
JMESPathCheck('properties.scale.minReplicas', 1),
JMESPathCheck('properties.scale.maxReplicas', 1)
])
# List Java Components
java_component_list = self.cmd(
"containerapp env java-component list -g {} --environment {}".format(resource_group, env_name)).get_output_in_json()
self.assertTrue(len(java_component_list) == 2)
# Create SBA and bind with eureka
self.cmd('containerapp env java-component admin-for-spring create -g {} -n {} --environment {} --min-replicas 2 --max-replicas 2 --configuration'.format(resource_group, sba_name, env_name), checks=[
JMESPathCheck('name', sba_name),
JMESPathCheck('properties.componentType', "SpringBootAdmin"),
JMESPathCheck('properties.ingress.fqdn', sba_name +
"-azure-java.ext." + default_domain),
JMESPathCheck('properties.provisioningState', "Succeeded"),
JMESPathCheck('properties.scale.minReplicas', 2),
JMESPathCheck('properties.scale.maxReplicas', 2)
])
# List Java Components
java_component_list = self.cmd(
"containerapp env java-component list -g {} --environment {}".format(resource_group, env_name)).get_output_in_json()
self.assertTrue(len(java_component_list) == 3)
# Update Java Components
self.cmd(
'containerapp env java-component config-server-for-spring update -g {} -n {} --environment {} --configuration spring.cloud.config.server.git.uri=https://github.com/Azure-Samples/piggymetrics-config.git'.format(
resource_group, config_name, env_name), checks=[
JMESPathCheck('name', config_name),
JMESPathCheck('properties.componentType', "SpringCloudConfig"),
JMESPathCheck('properties.provisioningState', "Succeeded"),
JMESPathCheck('length(properties.configurations)', 1),
JMESPathCheck('properties.scale.minReplicas', 1),
JMESPathCheck('properties.scale.maxReplicas', 1)
])
self.cmd('containerapp env java-component eureka-server-for-spring update -g {} -n {} --environment {} --configuration'.format(resource_group, eureka_name, env_name), checks=[
JMESPathCheck('name', eureka_name),
JMESPathCheck('properties.componentType', "SpringCloudEureka"),
JMESPathCheck('properties.ingress.fqdn', eureka_name +
"-azure-java.ext." + default_domain),
JMESPathCheck('properties.provisioningState', "Succeeded"),
JMESPathCheck('properties.scale.minReplicas', 1),
JMESPathCheck('properties.scale.maxReplicas', 1)
])
self.cmd('containerapp env java-component admin-for-spring update -g {} -n {} --environment {} --bind {}:myeureka --min-replicas 1 --max-replicas 1 --configuration'.format(resource_group, sba_name, env_name, eureka_name), checks=[
JMESPathCheck('name', sba_name),
JMESPathCheck('properties.componentType', "SpringBootAdmin"),
JMESPathCheck('properties.serviceBinds[0].name', eureka_name),
JMESPathCheck('properties.ingress.fqdn', sba_name +
"-azure-java.ext." + default_domain),
JMESPathCheck('properties.provisioningState', "Succeeded"),
JMESPathCheck('properties.scale.minReplicas', 1),
JMESPathCheck('properties.scale.maxReplicas', 1)
])
# Show Java Components
self.cmd('containerapp env java-component config-server-for-spring show -g {} -n {} --environment {}'.format(resource_group, config_name, env_name), checks=[
JMESPathCheck('name', config_name),
JMESPathCheck('properties.componentType', "SpringCloudConfig"),
JMESPathCheck('properties.provisioningState', "Succeeded"),
JMESPathCheck('length(properties.configurations)', 1),
JMESPathCheck('properties.scale.minReplicas', 1),
JMESPathCheck('properties.scale.maxReplicas', 1)
])
self.cmd('containerapp env java-component eureka-server-for-spring show -g {} -n {} --environment {}'.format(resource_group, eureka_name, env_name), checks=[
JMESPathCheck('name', eureka_name),
JMESPathCheck('properties.componentType', "SpringCloudEureka"),
JMESPathCheck('properties.ingress.fqdn', eureka_name +
"-azure-java.ext." + default_domain),
JMESPathCheck('properties.provisioningState', "Succeeded"),
JMESPathCheck('properties.scale.minReplicas', 1),
JMESPathCheck('properties.scale.maxReplicas', 1)
])
self.cmd('containerapp env java-component admin-for-spring show -g {} -n {} --environment {}'.format(resource_group, sba_name, env_name), checks=[
JMESPathCheck('name', sba_name),
JMESPathCheck('properties.componentType', "SpringBootAdmin"),
JMESPathCheck('properties.ingress.fqdn', sba_name +
"-azure-java.ext." + default_domain),
JMESPathCheck('properties.serviceBinds[0].name', eureka_name),
JMESPathCheck('properties.provisioningState', "Succeeded"),
JMESPathCheck('properties.scale.minReplicas', 1),
JMESPathCheck('properties.scale.maxReplicas', 1)
])
# Create App with wrong binding name
self.cmd('containerapp create -n {} -g {} --environment {} --bind {}:my-config'.format(
ca_name, resource_group, env_name, config_name), expect_failure=True)
# Create App with bind
self.cmd('containerapp create -n {} -g {} --environment {} --bind {} {}'.format(ca_name, resource_group, env_name, config_name, eureka_name), expect_failure=False, checks=[
JMESPathCheck('properties.provisioningState', "Succeeded"),
JMESPathCheck('length(properties.template.serviceBinds)', 2),
JMESPathCheck(
'properties.template.serviceBinds[0].name', config_name),
JMESPathCheck(
'properties.template.serviceBinds[1].name', eureka_name)
])
# Update App with unbind
self.cmd('containerapp update -n {} -g {} --unbind {}'.format(ca_name, resource_group, config_name), expect_failure=False, checks=[
JMESPathCheck('properties.provisioningState', "Succeeded"),
JMESPathCheck('length(properties.template.serviceBinds)', 1),
JMESPathCheck(
'properties.template.serviceBinds[0].name', eureka_name)
])
# Update App with bind
self.cmd('containerapp update -n {} -g {} --bind {}'.format(ca_name, resource_group, config_name), expect_failure=False, checks=[
JMESPathCheck('properties.provisioningState', "Succeeded"),
JMESPathCheck('length(properties.template.serviceBinds)', 2)
])
# (JavaComponentInUse) Cannot remove Java Component: myconfig because it is being binded by container apps, please unbind the Java Component first.
with self.assertRaisesRegex(CLIInternalError,
"please unbind the Java Component first"):
self.cmd('containerapp env java-component config-server-for-spring delete -g {} -n {} --environment {} --yes'.format(
resource_group, config_name, env_name))
# Unbind all java component from container apps
self.cmd('containerapp update -n {} -g {} --unbind {} {}'.format(ca_name, resource_group, config_name, eureka_name), expect_failure=False, checks=[
JMESPathCheck('properties.provisioningState', "Succeeded"),
JMESPathCheck('properties.template.serviceBinds', None),
])
# Delete Java Components
self.cmd('containerapp env java-component config-server-for-spring delete -g {} -n {} --environment {} --yes'.format(
resource_group, config_name, env_name), expect_failure=False)
self.cmd('containerapp env java-component eureka-server-for-spring delete -g {} -n {} --environment {} --yes'.format(
resource_group, eureka_name, env_name), expect_failure=False)
self.cmd('containerapp env java-component admin-for-spring delete -g {} -n {} --environment {} --yes'.format(
resource_group, sba_name, env_name), expect_failure=False)
# List Java Components
java_component_list = self.cmd(
"containerapp env java-component list -g {} --environment {}".format(resource_group, env_name)).get_output_in_json()
self.assertTrue(len(java_component_list) == 0)
@ResourceGroupPreparer(location='eastus')
def test_containerapp_gateway_for_spring_component(self, resource_group):
# type "linkers" is not available in North Central US (Stage), if the TEST_LOCATION is "northcentralusstage", use francecentral as location
location = TEST_LOCATION
if format_location(location) == format_location(STAGE_LOCATION):
location = "francecentral"
self.cmd('configure --defaults location={}'.format(location))
env_name = self.create_random_name(prefix='aca-java-env', length=24)
ca_name = self.create_random_name(prefix='javaapp1', length=24)
gateway_name = "mygateway"
create_containerapp_env(self, env_name, resource_group)
env = self.cmd(
'containerapp env show -g {} -n {}'.format(resource_group, env_name)).get_output_in_json()
# List Java Components
java_component_list = self.cmd(
"containerapp env java-component list -g {} --environment {}".format(resource_group, env_name)).get_output_in_json()
self.assertTrue(len(java_component_list) == 0)
route_yaml_text_create = f"""
springCloudGatewayRoutes:
- id: "route1"
uri: "https://otherjavacomponent.myenvironment.test.net"
predicates:
- "Path=/v1/path1"
- "After=2024-01-01T00:00:00.000-00:00[America/Denver]"
filters:
- "SetPath=/filter1"
- id: "route2"
uri: "https://otherjavacomponent.myenvironment.test.net"
predicates:
- "Path=/v2/path2"
- "After=2024-01-01T00:00:00.000-00:00[America/Denver]"
filters:
- "SetPath=/filter2"
"""
route_yaml_name_create = f"{self._testMethodName}_route_create.yml"
write_test_file(route_yaml_name_create, route_yaml_text_create)
# Create Gateway for spring with route files
self.cmd("containerapp env java-component gateway-for-spring create -g {} -n {} --environment {} --route-yaml {}".format(resource_group, gateway_name, env_name, route_yaml_name_create), checks=[
JMESPathCheck('name', gateway_name),
JMESPathCheck('properties.componentType', "SpringCloudGateway"),
JMESPathCheck('properties.provisioningState', "Succeeded"),
JMESPathCheck('length(properties.springCloudGatewayRoutes)', 2),
JMESPathCheck(
'properties.springCloudGatewayRoutes[0].id', "route1"),
JMESPathCheck(
'properties.springCloudGatewayRoutes[0].uri', "https://otherjavacomponent.myenvironment.test.net"),
JMESPathCheck(
'length(properties.springCloudGatewayRoutes[0].predicates)', 2),
JMESPathCheck(
'properties.springCloudGatewayRoutes[0].predicates[0]', "Path=/v1/path1"),
JMESPathCheck(
'properties.springCloudGatewayRoutes[0].predicates[1]', "After=2024-01-01T00:00:00.000-00:00[America/Denver]"),
JMESPathCheck(
'length(properties.springCloudGatewayRoutes[0].filters)', 1),
JMESPathCheck(
'properties.springCloudGatewayRoutes[0].filters[0]', "SetPath=/filter1"),
JMESPathCheck('properties.scale.minReplicas', 1),
JMESPathCheck('properties.scale.maxReplicas', 1)
])
# Update Gateway for spring without route files
self.cmd("containerapp env java-component gateway-for-spring update -g {} -n {} --environment {} --configuration spring.cloud.gateway.fail-on-route-definition-error=true".format(resource_group, gateway_name, env_name, route_yaml_name_create), checks=[
JMESPathCheck('name', gateway_name),
JMESPathCheck('properties.componentType', "SpringCloudGateway"),
JMESPathCheck('properties.provisioningState', "Succeeded"),
JMESPathCheck('length(properties.springCloudGatewayRoutes)', 2),
JMESPathCheck('length(properties.configurations)', 1),
JMESPathCheck('properties.scale.minReplicas', 1),
JMESPathCheck('properties.scale.maxReplicas', 1)
])
route_yaml_text_update = f"""
springCloudGatewayRoutes:
"""
route_yaml_name_update = f"{self._testMethodName}_route_update.yml"
# Update Gateway for spring with empty route files
write_test_file(route_yaml_name_update, route_yaml_text_update)
self.cmd("containerapp env java-component gateway-for-spring update -g {} -n {} --environment {} --route-yaml {}".format(resource_group, gateway_name, env_name, route_yaml_name_update), checks=[
JMESPathCheck('name', gateway_name),
JMESPathCheck('properties.componentType', "SpringCloudGateway"),
JMESPathCheck('properties.provisioningState', "Succeeded"),
JMESPathCheck('length(properties.springCloudGatewayRoutes)', 0),
JMESPathCheck('length(properties.configurations)', 1),
JMESPathCheck('properties.scale.minReplicas', 1),
JMESPathCheck('properties.scale.maxReplicas', 1)
])
self.cmd("containerapp env java-component gateway-for-spring show -g {} -n {} --environment {}".format(resource_group, gateway_name, env_name), checks=[
JMESPathCheck('name', gateway_name),
JMESPathCheck('properties.componentType', "SpringCloudGateway"),
JMESPathCheck('properties.provisioningState', "Succeeded"),
JMESPathCheck('length(properties.springCloudGatewayRoutes)', 0),
JMESPathCheck('length(properties.configurations)', 1),
JMESPathCheck('properties.scale.minReplicas', 1),
JMESPathCheck('properties.scale.maxReplicas', 1)
])
# Delete Gateway for spring
self.cmd("containerapp env java-component gateway-for-spring delete -g {} -n {} --environment {} --yes".format(
resource_group, gateway_name, env_name), expect_failure=False)
# List Java Components
java_component_list = self.cmd(
"containerapp env java-component list -g {} --environment {}".format(resource_group, env_name)).get_output_in_json()
self.assertTrue(len(java_component_list) == 0)
# Clean up route files
clean_up_test_file(route_yaml_name_create)
clean_up_test_file(route_yaml_name_update)
@ResourceGroupPreparer(location='eastus')
def test_containerapp_java_component_configurations(self, resource_group):
location = TEST_LOCATION
if format_location(location) == format_location(STAGE_LOCATION):
location = "francecentral"
self.cmd('configure --defaults location={}'.format(location))
env_name = self.create_random_name(prefix='aca-java-env', length=24)
ca_name = self.create_random_name(prefix='javaapp1', length=24)
eureka_name = "myeureka"
create_containerapp_env(self, env_name, resource_group)
env = self.cmd(
'containerapp env show -g {} -n {}'.format(resource_group, env_name)).get_output_in_json()
# List Java Components
java_component_list = self.cmd(
"containerapp env java-component list -g {} --environment {}".format(resource_group, env_name)).get_output_in_json()
self.assertTrue(len(java_component_list) == 0)
with self.assertRaisesRegex(ValidationError,
"Please use the later form for better flexibility and clarity."):
self.cmd('containerapp env java-component eureka-server-for-spring create -g {} -n {} --environment {} --configuration eureka.server.renewal-percent-threshold=0.85 --set-configurations eureka.server.enable-self-preservation=false'.format(resource_group, eureka_name, env_name))
self.cmd(
'containerapp env java-component eureka-server-for-spring create -g {} -n {} --environment {} --set-configurations eureka.server.renewal-percent-threshold=0.85 eureka.server.enable-self-preservation=false'.format(
resource_group, eureka_name, env_name), checks=[
JMESPathCheck('name', eureka_name),
JMESPathCheck('properties.componentType', "SpringCloudEureka"),
JMESPathCheck('properties.provisioningState', "Succeeded"),
JMESPathCheck('length(properties.configurations)', 2),
])
self.cmd(
'containerapp env java-component eureka-server-for-spring update -g {} -n {} --environment {} --set-configurations eureka.server.renewal-percent-threshold=0.95 eureka.server.renewal-threshold-update-interval-ms=1000'.format(
resource_group, eureka_name, env_name), checks=[
JMESPathCheck('name', eureka_name),
JMESPathCheck('properties.componentType', "SpringCloudEureka"),
JMESPathCheck('properties.provisioningState', "Succeeded"),
JMESPathCheck('length(properties.configurations)', 3)
])
self.cmd(
'containerapp env java-component eureka-server-for-spring update -g {} -n {} --environment {} --remove-configurations eureka.server.renewal-percent-threshold eureka.server.enable-self-preservation'.format(
resource_group, eureka_name, env_name), checks=[
JMESPathCheck('name', eureka_name),
JMESPathCheck('properties.componentType', "SpringCloudEureka"),
JMESPathCheck('properties.provisioningState', "Succeeded"),
JMESPathCheck('length(properties.configurations)', 1)
])
self.cmd(
'containerapp env java-component eureka-server-for-spring update -g {} -n {} --environment {} --replace-configurations eureka.server.expected-client-renewal-interval-seconds=100 eureka.server.response-cache-auto-expiration-in-seconds=100'.format(
resource_group, eureka_name, env_name), checks=[
JMESPathCheck('name', eureka_name),
JMESPathCheck('properties.componentType', "SpringCloudEureka"),
JMESPathCheck('properties.provisioningState', "Succeeded"),
JMESPathCheck('length(properties.configurations)', 2)
])
self.cmd(
'containerapp env java-component eureka-server-for-spring update -g {} -n {} --environment {} --remove-all-configurations'.format(
resource_group, eureka_name, env_name), checks=[
JMESPathCheck('name', eureka_name),
JMESPathCheck('properties.componentType', "SpringCloudEureka"),
JMESPathCheck('properties.provisioningState', "Succeeded"),
JMESPathCheck('length(properties.configurations)', 0)
])
self.cmd('containerapp env java-component eureka-server-for-spring delete -g {} -n {} --environment {} --yes'.format(
resource_group, eureka_name, env_name), expect_failure=False)
# List Java Components
java_component_list = self.cmd(
"containerapp env java-component list -g {} --environment {}".format(resource_group, env_name)).get_output_in_json()
self.assertTrue(len(java_component_list) == 0)
|
class ContainerappJavaComponentTests(ScenarioTest):
@ResourceGroupPreparer(location='eastus')
def test_containerapp_java_component(self, resource_group):
pass
@ResourceGroupPreparer(location='eastus')
def test_containerapp_gateway_for_spring_component(self, resource_group):
pass
@ResourceGroupPreparer(location='eastus')
def test_containerapp_java_component_configurations(self, resource_group):
pass
| 7 | 0 | 110 | 13 | 89 | 9 | 2 | 0.1 | 1 | 0 | 0 | 0 | 3 | 0 | 3 | 3 | 336 | 40 | 270 | 32 | 263 | 26 | 90 | 29 | 86 | 2 | 1 | 1 | 6 |
10,867 |
Azure/azure-cli-extensions
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/Azure_azure-cli-extensions/src/containerapp/azext_containerapp/tests/latest/test_containerapp_ingress_sticky_session.py
|
azext_containerapp.tests.latest.test_containerapp_ingress_sticky_session.ContainerAppIngressStickySessionsTest
|
class ContainerAppIngressStickySessionsTest(ScenarioTest):
@AllowLargeResponse(8192)
@ResourceGroupPreparer(location="northcentralus")
@live_only() # encounters 'CannotOverwriteExistingCassetteException' only when run from recording (passes when run live)
def test_containerapp_ingress_sticky_sessions_e2e(self, resource_group):
app = self.create_random_name(prefix='app2', length=24)
env_id = prepare_containerapp_env_for_app_e2e_tests(self)
env_rg = parse_resource_id(env_id).get('resource_group')
env_name = parse_resource_id(env_id).get('name')
self.cmd('containerapp env show -n {} -g {}'.format(env_name, env_rg), checks=[
JMESPathCheck('name', env_name)
])
self.cmd("az containerapp create -g {} --target-port 80 --ingress external --image mcr.microsoft.com/k8se/quickstart:latest --environment {} -n {} ".format(resource_group, env_id, app))
self.cmd(
"az containerapp ingress sticky-sessions set -n {} -g {} --affinity sticky".format(app, resource_group))
self.cmd('containerapp show -g {} -n {}'.format(resource_group, app), checks=[
JMESPathCheck(
'properties.configuration.ingress.stickySessions.affinity', "sticky"),
])
|
class ContainerAppIngressStickySessionsTest(ScenarioTest):
@AllowLargeResponse(8192)
@ResourceGroupPreparer(location="northcentralus")
@live_only()
def test_containerapp_ingress_sticky_sessions_e2e(self, resource_group):
pass
| 5 | 0 | 17 | 4 | 13 | 0 | 1 | 0.06 | 1 | 0 | 0 | 0 | 1 | 0 | 1 | 1 | 21 | 4 | 17 | 7 | 12 | 1 | 10 | 6 | 8 | 1 | 1 | 0 | 1 |
10,868 |
Azure/azure-cli-extensions
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/Azure_azure-cli-extensions/src/containerapp/azext_containerapp/tests/latest/test_containerapp_env_telemetry.py
|
azext_containerapp.tests.latest.test_containerapp_env_telemetry.ContainerappEnvTelemetryScenarioTest
|
class ContainerappEnvTelemetryScenarioTest(ScenarioTest):
@AllowLargeResponse(8192)
@ResourceGroupPreparer(location="northeurope")
def test_containerapp_env_telemetry_data_dog_e2e(self, resource_group):
self.cmd('configure --defaults location={}'.format(TEST_LOCATION))
env_name = self.create_random_name(
prefix='containerapp-env', length=24)
data_dog_site = self.create_random_name(prefix='dataDog', length=16)
data_dog_key = self.create_random_name(prefix='dataDog', length=16)
self.cmd(
'containerapp env create -g {} -n {} --logs-destination none'.format(resource_group, env_name))
containerapp_env = self.cmd(
'containerapp env show -g {} -n {}'.format(resource_group, env_name)).get_output_in_json()
while containerapp_env["properties"]["provisioningState"].lower() == "waiting":
time.sleep(5)
containerapp_env = self.cmd(
'containerapp env show -g {} -n {}'.format(resource_group, env_name)).get_output_in_json()
self.cmd('containerapp env list -g {}'.format(resource_group), checks=[
JMESPathCheck('length(@)', 1),
JMESPathCheck('[0].name', env_name),
])
self.cmd(
f'containerapp env telemetry data-dog set -g {resource_group} -n {env_name} --site {data_dog_site} --key {data_dog_key} --enable-open-telemetry-traces true --enable-open-telemetry-metrics true')
containerapp_env = self.cmd(
'containerapp env show -g {} -n {}'.format(resource_group, env_name)).get_output_in_json()
while containerapp_env["properties"]["provisioningState"].lower() == "waiting":
time.sleep(5)
containerapp_env = self.cmd(
'containerapp env show -g {} -n {}'.format(resource_group, env_name)).get_output_in_json()
self.cmd('containerapp env show -n {} -g {}'.format(env_name, resource_group), checks=[
JMESPathCheck('name', env_name),
JMESPathCheck('properties.provisioningState', "Succeeded"),
JMESPathCheck(
'properties.openTelemetryConfiguration.destinationsConfiguration.dataDogConfiguration.site', data_dog_site),
JMESPathCheck(
'properties.openTelemetryConfiguration.tracesConfiguration.destinations[0]', 'dataDog'),
JMESPathCheck(
'properties.openTelemetryConfiguration.metricsConfiguration.destinations[0]', 'dataDog'),
])
self.cmd('containerapp env telemetry data-dog show -n {} -g {}'.format(env_name, resource_group), checks=[
JMESPathCheck('enableOpenTelemetryMetrics', True),
JMESPathCheck('enableOpenTelemetryTraces', True),
JMESPathCheck('key', None),
JMESPathCheck('site', data_dog_site),
])
self.cmd(
f'containerapp env telemetry data-dog set -g {resource_group} -n {env_name} --enable-open-telemetry-metrics false')
containerapp_env = self.cmd(
'containerapp env show -g {} -n {}'.format(resource_group, env_name)).get_output_in_json()
while containerapp_env["properties"]["provisioningState"].lower() == "waiting":
time.sleep(5)
containerapp_env = self.cmd(
'containerapp env show -g {} -n {}'.format(resource_group, env_name)).get_output_in_json()
self.cmd('containerapp env show -n {} -g {}'.format(env_name, resource_group), checks=[
JMESPathCheck('name', env_name),
JMESPathCheck('properties.provisioningState', "Succeeded"),
JMESPathCheck(
'properties.openTelemetryConfiguration.destinationsConfiguration.dataDogConfiguration.site', data_dog_site),
JMESPathCheck(
'properties.openTelemetryConfiguration.tracesConfiguration.destinations[0]', 'dataDog'),
JMESPathCheck(
'properties.openTelemetryConfiguration.metricsConfiguration', None),
])
self.cmd('containerapp env telemetry data-dog show -n {} -g {}'.format(env_name, resource_group), checks=[
JMESPathCheck('enableOpenTelemetryMetrics', False),
JMESPathCheck('enableOpenTelemetryTraces', True),
JMESPathCheck('key', None),
JMESPathCheck('site', data_dog_site),
])
self.cmd(
f'containerapp env telemetry data-dog delete -g {resource_group} -n {env_name} --yes')
containerapp_env = self.cmd(
'containerapp env show -g {} -n {}'.format(resource_group, env_name)).get_output_in_json()
while containerapp_env["properties"]["provisioningState"].lower() == "waiting":
time.sleep(5)
containerapp_env = self.cmd(
'containerapp env show -g {} -n {}'.format(resource_group, env_name)).get_output_in_json()
self.cmd('containerapp env show -n {} -g {}'.format(env_name, resource_group), checks=[
JMESPathCheck('name', env_name),
JMESPathCheck('properties.provisioningState', "Succeeded"),
JMESPathCheck(
'properties.openTelemetryConfiguration.tracesConfiguration', None),
JMESPathCheck(
'properties.openTelemetryConfiguration.metricsConfiguration', None),
])
self.cmd(
f'containerapp env telemetry data-dog show -g {resource_group} -n {env_name}', expect_failure=True)
self.cmd(
f'containerapp env delete -g {resource_group} -n {env_name} --yes --no-wait')
@AllowLargeResponse(8192)
@ResourceGroupPreparer(location="northeurope")
def test_containerapp_env_telemetry_app_insights_e2e(self, resource_group):
self.cmd('configure --defaults location={}'.format(TEST_LOCATION))
env_name = self.create_random_name(
prefix='containerapp-env', length=24)
ai_conn_str = f'InstrumentationKey={self.create_random_name(prefix="ik", length=8)};IngestionEndpoint={self.create_random_name(prefix="ie", length=8)};LiveEndpoint={self.create_random_name(prefix="le", length=8)}'
self.cmd(
'containerapp env create -g {} -n {} --logs-destination none'.format(resource_group, env_name))
containerapp_env = self.cmd(
'containerapp env show -g {} -n {}'.format(resource_group, env_name)).get_output_in_json()
while containerapp_env["properties"]["provisioningState"].lower() == "waiting":
time.sleep(5)
containerapp_env = self.cmd(
'containerapp env show -g {} -n {}'.format(resource_group, env_name)).get_output_in_json()
self.cmd('containerapp env list -g {}'.format(resource_group), checks=[
JMESPathCheck('length(@)', 1),
JMESPathCheck('[0].name', env_name),
])
self.cmd(
f'containerapp env telemetry app-insights set -g {resource_group} -n {env_name} --connection-string {ai_conn_str} --enable-open-telemetry-traces true --enable-open-telemetry-logs true')
containerapp_env = self.cmd(
'containerapp env show -g {} -n {}'.format(resource_group, env_name)).get_output_in_json()
while containerapp_env["properties"]["provisioningState"].lower() == "waiting":
time.sleep(5)
containerapp_env = self.cmd(
'containerapp env show -g {} -n {}'.format(resource_group, env_name)).get_output_in_json()
self.cmd('containerapp env show -n {} -g {}'.format(env_name, resource_group), checks=[
JMESPathCheck('name', env_name),
JMESPathCheck('properties.provisioningState', "Succeeded"),
JMESPathCheck(
'properties.openTelemetryConfiguration.tracesConfiguration.destinations[0]', 'appInsights'),
JMESPathCheck(
'properties.openTelemetryConfiguration.logsConfiguration.destinations[0]', 'appInsights'),
])
self.cmd('containerapp env telemetry app-insights show -n {} -g {}'.format(env_name, resource_group), checks=[
JMESPathCheck('enableOpenTelemetryLogs', True),
JMESPathCheck('enableOpenTelemetryTraces', True),
JMESPathCheck('connectionString', None),
])
self.cmd(
f'containerapp env telemetry app-insights set -g {resource_group} -n {env_name} --enable-open-telemetry-traces false')
containerapp_env = self.cmd(
'containerapp env show -g {} -n {}'.format(resource_group, env_name)).get_output_in_json()
while containerapp_env["properties"]["provisioningState"].lower() == "waiting":
time.sleep(5)
containerapp_env = self.cmd(
'containerapp env show -g {} -n {}'.format(resource_group, env_name)).get_output_in_json()
self.cmd('containerapp env show -n {} -g {}'.format(env_name, resource_group), checks=[
JMESPathCheck('name', env_name),
JMESPathCheck('properties.provisioningState', "Succeeded"),
JMESPathCheck(
'properties.openTelemetryConfiguration.tracesConfiguration', None),
JMESPathCheck(
'properties.openTelemetryConfiguration.logsConfiguration.destinations[0]', 'appInsights'),
])
self.cmd('containerapp env telemetry app-insights show -n {} -g {}'.format(env_name, resource_group), checks=[
JMESPathCheck('enableOpenTelemetryLogs', True),
JMESPathCheck('enableOpenTelemetryTraces', False),
JMESPathCheck('connectionString', None),
])
self.cmd(
f'containerapp env telemetry app-insights delete -g {resource_group} -n {env_name} --yes')
containerapp_env = self.cmd(
'containerapp env show -g {} -n {}'.format(resource_group, env_name)).get_output_in_json()
while containerapp_env["properties"]["provisioningState"].lower() == "waiting":
time.sleep(5)
containerapp_env = self.cmd(
'containerapp env show -g {} -n {}'.format(resource_group, env_name)).get_output_in_json()
self.cmd('containerapp env show -n {} -g {}'.format(env_name, resource_group), checks=[
JMESPathCheck('name', env_name),
JMESPathCheck('properties.provisioningState', "Succeeded"),
JMESPathCheck(
'properties.openTelemetryConfiguration.tracesConfiguration', None),
JMESPathCheck(
'properties.openTelemetryConfiguration.metricsConfiguration', None),
])
self.cmd(
f'containerapp env telemetry app-insights show -g {resource_group} -n {env_name}', expect_failure=True)
self.cmd(
f'containerapp env delete -g {resource_group} -n {env_name} --yes --no-wait')
@AllowLargeResponse(8192)
@ResourceGroupPreparer(location="northeurope")
def test_containerapp_env_telemetry_otlp_e2e(self, resource_group):
self.cmd('configure --defaults location={}'.format(TEST_LOCATION))
env_name = self.create_random_name(
prefix='containerapp-env', length=24)
self.cmd(
'containerapp env create -g {} -n {} --logs-destination none'.format(resource_group, env_name))
containerapp_env = self.cmd(
'containerapp env show -g {} -n {}'.format(resource_group, env_name)).get_output_in_json()
while containerapp_env["properties"]["provisioningState"].lower() == "waiting":
time.sleep(5)
containerapp_env = self.cmd(
'containerapp env show -g {} -n {}'.format(resource_group, env_name)).get_output_in_json()
self.cmd('containerapp env list -g {}'.format(resource_group), checks=[
JMESPathCheck('length(@)', 1),
JMESPathCheck('[0].name', env_name),
])
otlp_name = "newrelic"
otlp_endpoint = "otlp.nr-data.net:4317"
otlp_insecure = False
otlp_headers = "api-key=test"
otlp_incorrect_headers = "key"
self.cmd(f'containerapp env telemetry otlp add -g {resource_group} -n {env_name} --otlp-name {otlp_name} --endpoint {otlp_endpoint} --insecure {otlp_insecure} --headers {otlp_incorrect_headers} --enable-open-telemetry-traces true --enable-open-telemetry-logs true --enable-open-telemetry-metrics true', expect_failure=True)
self.cmd(f'containerapp env telemetry otlp add -g {resource_group} -n {env_name} --otlp-name {otlp_name} --endpoint {otlp_endpoint} --insecure {otlp_insecure} --headers {otlp_headers} --enable-open-telemetry-traces true --enable-open-telemetry-logs true --enable-open-telemetry-metrics true')
containerapp_env = self.cmd(
'containerapp env show -g {} -n {}'.format(resource_group, env_name)).get_output_in_json()
while containerapp_env["properties"]["provisioningState"].lower() == "waiting":
time.sleep(5)
containerapp_env = self.cmd(
'containerapp env show -g {} -n {}'.format(resource_group, env_name)).get_output_in_json()
self.cmd('containerapp env show -n {} -g {}'.format(env_name, resource_group), checks=[
JMESPathCheck('name', env_name),
JMESPathCheck('properties.provisioningState', "Succeeded"),
JMESPathCheck(
'properties.openTelemetryConfiguration.destinationsConfiguration.otlpConfigurations[0].name', otlp_name),
JMESPathCheck(
'properties.openTelemetryConfiguration.destinationsConfiguration.otlpConfigurations[0].endpoint', otlp_endpoint),
JMESPathCheck(
'properties.openTelemetryConfiguration.tracesConfiguration.destinations[0]', otlp_name),
JMESPathCheck(
'properties.openTelemetryConfiguration.logsConfiguration.destinations[0]', otlp_name),
JMESPathCheck(
'properties.openTelemetryConfiguration.metricsConfiguration.destinations[0]', otlp_name),
])
self.cmd('containerapp env telemetry otlp show -n {} -g {} --otlp-name {}'.format(env_name, resource_group, otlp_name), checks=[
JMESPathCheck('name', otlp_name),
JMESPathCheck('endpoint', otlp_endpoint),
JMESPathCheck('enableOpenTelemetryMetrics', True),
JMESPathCheck('enableOpenTelemetryTraces', True),
JMESPathCheck('enableOpenTelemetryLogs', True),
])
otlp_name_test = "testotlp"
otlp_endpoint_test = "otlp.net:4318"
otlp_insecure_test = False
otlp_headers_test = "api-key=test"
self.cmd(
f'containerapp env telemetry otlp add -g {resource_group} -n {env_name} --otlp-name {otlp_name_test} --endpoint {otlp_endpoint_test} --insecure {otlp_insecure_test} --headers {otlp_headers_test} --enable-open-telemetry-traces true')
containerapp_env = self.cmd(
'containerapp env show -g {} -n {}'.format(resource_group, env_name)).get_output_in_json()
while containerapp_env["properties"]["provisioningState"].lower() == "waiting":
time.sleep(5)
containerapp_env = self.cmd(
'containerapp env show -g {} -n {}'.format(resource_group, env_name)).get_output_in_json()
self.cmd('containerapp env show -n {} -g {}'.format(env_name, resource_group), checks=[
JMESPathCheck('name', env_name),
JMESPathCheck('properties.provisioningState', "Succeeded"),
JMESPathCheck(
'properties.openTelemetryConfiguration.destinationsConfiguration.otlpConfigurations[0].name', otlp_name),
JMESPathCheck(
'properties.openTelemetryConfiguration.destinationsConfiguration.otlpConfigurations[0].endpoint', otlp_endpoint),
JMESPathCheck(
'properties.openTelemetryConfiguration.destinationsConfiguration.otlpConfigurations[1].name', otlp_name_test),
JMESPathCheck(
'properties.openTelemetryConfiguration.destinationsConfiguration.otlpConfigurations[1].endpoint', otlp_endpoint_test),
JMESPathCheck(
'properties.openTelemetryConfiguration.tracesConfiguration.destinations[0]', otlp_name),
JMESPathCheck(
'properties.openTelemetryConfiguration.tracesConfiguration.destinations[1]', otlp_name_test),
])
self.cmd('containerapp env telemetry otlp show -n {} -g {} --otlp-name {}'.format(env_name, resource_group, otlp_name_test), checks=[
JMESPathCheck('name', otlp_name_test),
JMESPathCheck('endpoint', otlp_endpoint_test),
JMESPathCheck('enableOpenTelemetryMetrics', False),
JMESPathCheck('enableOpenTelemetryTraces', True),
JMESPathCheck('enableOpenTelemetryLogs', False),
])
otlp_endpoint_update = "otlp.nr-dataupdate.net:4317"
self.cmd(
f'containerapp env telemetry otlp update -g {resource_group} -n {env_name} --otlp-name {otlp_name} --endpoint {otlp_endpoint_update} --enable-open-telemetry-traces false')
containerapp_env = self.cmd(
'containerapp env show -g {} -n {}'.format(resource_group, env_name)).get_output_in_json()
while containerapp_env["properties"]["provisioningState"].lower() == "waiting":
time.sleep(5)
containerapp_env = self.cmd(
'containerapp env show -g {} -n {}'.format(resource_group, env_name)).get_output_in_json()
self.cmd('containerapp env show -n {} -g {}'.format(env_name, resource_group), checks=[
JMESPathCheck('name', env_name),
JMESPathCheck('properties.provisioningState', "Succeeded"),
JMESPathCheck(
'properties.openTelemetryConfiguration.destinationsConfiguration.otlpConfigurations[0].name', otlp_name),
JMESPathCheck(
'properties.openTelemetryConfiguration.destinationsConfiguration.otlpConfigurations[0].endpoint', otlp_endpoint_update),
JMESPathCheck(
'properties.openTelemetryConfiguration.destinationsConfiguration.otlpConfigurations[1].name', otlp_name_test),
JMESPathCheck(
'properties.openTelemetryConfiguration.destinationsConfiguration.otlpConfigurations[1].endpoint', otlp_endpoint_test),
JMESPathCheck(
'properties.openTelemetryConfiguration.tracesConfiguration.destinations[0]', otlp_name_test),
JMESPathCheck(
'properties.openTelemetryConfiguration.logsConfiguration.destinations[0]', otlp_name),
JMESPathCheck(
'properties.openTelemetryConfiguration.metricsConfiguration.destinations[0]', otlp_name),
])
self.cmd(
f'containerapp env telemetry otlp remove -g {resource_group} -n {env_name} --otlp-name {otlp_name_test} --yes')
containerapp_env = self.cmd(
'containerapp env show -g {} -n {}'.format(resource_group, env_name)).get_output_in_json()
while containerapp_env["properties"]["provisioningState"].lower() == "waiting":
time.sleep(5)
containerapp_env = self.cmd(
'containerapp env show -g {} -n {}'.format(resource_group, env_name)).get_output_in_json()
self.cmd('containerapp env show -n {} -g {}'.format(env_name, resource_group), checks=[
JMESPathCheck('name', env_name),
JMESPathCheck('properties.provisioningState', "Succeeded"),
JMESPathCheck(
'properties.openTelemetryConfiguration.destinationsConfiguration.otlpConfigurations[0].name', otlp_name),
JMESPathCheck(
'properties.openTelemetryConfiguration.destinationsConfiguration.otlpConfigurations[0].endpoint', otlp_endpoint_update),
JMESPathCheck(
'properties.openTelemetryConfiguration.destinationsConfiguration.otlpConfigurations[1]', None),
JMESPathCheck(
'properties.openTelemetryConfiguration.tracesConfiguration', None),
JMESPathCheck(
'properties.openTelemetryConfiguration.logsConfiguration.destinations[0]', otlp_name),
JMESPathCheck(
'properties.openTelemetryConfiguration.metricsConfiguration.destinations[0]', otlp_name),
])
self.cmd(
f'containerapp env delete -g {resource_group} -n {env_name} --yes --no-wait')
|
class ContainerappEnvTelemetryScenarioTest(ScenarioTest):
@AllowLargeResponse(8192)
@ResourceGroupPreparer(location="northeurope")
def test_containerapp_env_telemetry_data_dog_e2e(self, resource_group):
pass
@AllowLargeResponse(8192)
@ResourceGroupPreparer(location="northeurope")
def test_containerapp_env_telemetry_app_insights_e2e(self, resource_group):
pass
@AllowLargeResponse(8192)
@ResourceGroupPreparer(location="northeurope")
def test_containerapp_env_telemetry_otlp_e2e(self, resource_group):
pass
| 10 | 0 | 92 | 19 | 73 | 0 | 5 | 0 | 1 | 0 | 0 | 0 | 3 | 0 | 3 | 3 | 288 | 62 | 226 | 26 | 216 | 0 | 113 | 23 | 109 | 6 | 1 | 1 | 16 |
10,869 |
Azure/azure-cli-extensions
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/Azure_azure-cli-extensions/src/containerapp/azext_containerapp/tests/latest/test_containerapp_env_http_route_config.py
|
azext_containerapp.tests.latest.test_containerapp_env_http_route_config.ContainerAppEnvHttpRouteConfigTest
|
class ContainerAppEnvHttpRouteConfigTest(ScenarioTest):
@AllowLargeResponse(8192)
@ResourceGroupPreparer(location="eastus")
def test_containerapp_env_http_route_config_crudoperations_e2e(self, resource_group):
app1 = self.create_random_name(prefix='routed1', length=24)
app2 = self.create_random_name(prefix='routed2', length=24)
containerapp_yaml_text = f"""
location: {TEST_LOCATION}
type: Microsoft.App/containerApps
properties:
configuration:
activeRevisionsMode: Single
ingress:
external: false
allowInsecure: false
targetPort: 80
template:
revisionSuffix: myrevision
containers:
- image: nginx
name: nginx
env:
- name: HTTP_PORT
value: 80
command:
- npm
- start
resources:
cpu: 0.5
memory: 1Gi
scale:
minReplicas: 1
maxReplicas: 1
"""
containerapp_file_name = os.path.join(
TEST_DIR, f"{self._testMethodName}_containerapp.yml")
write_test_file(containerapp_file_name, containerapp_yaml_text)
http_route_config1_yaml_text = f"""
rules:
- description: "rule 1"
routes:
- match:
prefix: "/1"
action:
PrefixRewrite: "/"
targets:
- ContainerApp: "{app1}"
"""
http_route_config1_file_name = os.path.join(
TEST_DIR, f"{self._testMethodName}_http_route_config1.yml")
write_test_file(http_route_config1_file_name,
http_route_config1_yaml_text)
http_route_config2_yaml_text = f"""
rules:
- description: "rule 2"
routes:
- match:
prefix: "/2"
action:
PrefixRewrite: "/"
targets:
- ContainerApp: "{app2}"
"""
http_route_config2_file_name = os.path.join(
TEST_DIR, f"{self._testMethodName}_http_route_config2.yml")
write_test_file(http_route_config2_file_name,
http_route_config2_yaml_text)
self.cmd(f'configure --defaults location={TEST_LOCATION}')
env_name = self.create_random_name(
prefix='aca-http-route-config-env', length=30)
self.cmd(
f'containerapp env create -g {resource_group} -n {env_name} --location {TEST_LOCATION} --logs-destination none --enable-workload-profiles')
self.cmd(f"az containerapp env http-route-config list -g {resource_group} -n {env_name}", checks=[
JMESPathCheck('length(@)', 0),
])
route_name = "route1"
self.cmd(f"az containerapp env http-route-config create -g {resource_group} -n {env_name} -r {route_name} --yaml {http_route_config1_file_name}", checks=[
JMESPathCheck('properties.provisioningState',
"SucceededWithErrors"),
JMESPathCheck('properties.provisioningErrors[0].message',
f"error when trying to get containerapp {app1} from cluster. error ContainerApp.k8se.microsoft.com \"{app1}\" not found"),
# Not deployed yet
# JMESPathCheck('properties.rules[0].description', "rule 1"),
JMESPathCheck('properties.rules[0].routes[0].match.prefix', "/1"),
JMESPathCheck(
'properties.rules[0].routes[0].action.prefixRewrite', "/"),
JMESPathCheck('properties.rules[0].targets[0].containerApp', app1),
])
self.cmd(f"az containerapp env http-route-config show -g {resource_group} -n {env_name} -r {route_name}", checks=[
JMESPathCheck('properties.provisioningState',
"SucceededWithErrors"),
JMESPathCheck('properties.provisioningErrors[0].message',
f"error when trying to get containerapp {app1} from cluster. error ContainerApp.k8se.microsoft.com \"{app1}\" not found"),
# Not deployed yet
# JMESPathCheck('properties.rules[0].description', "rule 1"),
JMESPathCheck('properties.rules[0].routes[0].match.prefix', "/1"),
JMESPathCheck(
'properties.rules[0].routes[0].action.prefixRewrite', "/"),
JMESPathCheck('properties.rules[0].targets[0].containerApp', app1),
])
self.cmd(f"az containerapp env http-route-config list -g {resource_group} -n {env_name}", checks=[
JMESPathCheck('[0].properties.provisioningState',
"SucceededWithErrors"),
JMESPathCheck('[0].properties.provisioningErrors[0].message',
f"error when trying to get containerapp {app1} from cluster. error ContainerApp.k8se.microsoft.com \"{app1}\" not found"),
# Not deployed yet
# JMESPathCheck('[0].properties.rules[0].description', "rule 1"),
JMESPathCheck(
'[0].properties.rules[0].routes[0].match.prefix', "/1"),
JMESPathCheck(
'[0].properties.rules[0].routes[0].action.prefixRewrite', "/"),
JMESPathCheck(
'[0].properties.rules[0].targets[0].containerApp', app1),
])
self.cmd(
f'containerapp create -n {app1} -g {resource_group} --environment {env_name} --yaml {containerapp_file_name}')
self.cmd(f'containerapp show -g {resource_group} -n {app1}', checks=[
JMESPathCheck("properties.provisioningState", "Succeeded"),
])
self.cmd(
f'containerapp create -n {app2} -g {resource_group} --environment {env_name} --yaml {containerapp_file_name}')
self.cmd(f'containerapp show -g {resource_group} -n {app2}', checks=[
JMESPathCheck("properties.provisioningState", "Succeeded"),
])
self.cmd(f"az containerapp env http-route-config show -g {resource_group} -n {env_name} -r {route_name}", checks=[
JMESPathCheck('properties.provisioningState', "Succeeded"),
# Not deployed yet
# JMESPathCheck('properties.rules[0].description', "rule 1"),
JMESPathCheck('properties.rules[0].routes[0].match.prefix', "/1"),
JMESPathCheck(
'properties.rules[0].routes[0].action.prefixRewrite', "/"),
JMESPathCheck('properties.rules[0].targets[0].containerApp', app1),
])
self.cmd(f"az containerapp env http-route-config update -g {resource_group} -n {env_name} -r {route_name} --yaml {http_route_config2_file_name}", checks=[
JMESPathCheck('properties.provisioningState', "Succeeded"),
# Not deployed yet
# JMESPathCheck('properties.rules[0].description', "rule 2"),
JMESPathCheck('properties.rules[0].routes[0].match.prefix', "/2"),
JMESPathCheck(
'properties.rules[0].routes[0].action.prefixRewrite', "/"),
JMESPathCheck('properties.rules[0].targets[0].containerApp', app2),
])
self.cmd(
f"az containerapp env http-route-config delete -g {resource_group} -n {env_name} -r {route_name} -y")
self.cmd(f"az containerapp env http-route-config list -g {resource_group} -n {env_name}", checks=[
JMESPathCheck('length(@)', 0),
])
self.cmd(f'containerapp delete -g {resource_group} -n {app1} -y')
self.cmd(f'containerapp delete -g {resource_group} -n {app2} -y')
self.cmd(
f'containerapp env delete -g {resource_group} -n {env_name} -y')
clean_up_test_file(http_route_config1_file_name)
clean_up_test_file(http_route_config2_file_name)
clean_up_test_file(containerapp_file_name)
|
class ContainerAppEnvHttpRouteConfigTest(ScenarioTest):
@AllowLargeResponse(8192)
@ResourceGroupPreparer(location="eastus")
def test_containerapp_env_http_route_config_crudoperations_e2e(self, resource_group):
pass
| 4 | 0 | 146 | 19 | 117 | 10 | 1 | 0.08 | 1 | 0 | 0 | 0 | 1 | 0 | 1 | 1 | 149 | 19 | 120 | 13 | 116 | 10 | 35 | 12 | 33 | 1 | 1 | 0 | 1 |
10,870 |
Azure/azure-cli-extensions
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/Azure_azure-cli-extensions/src/containerapp/azext_containerapp/tests/latest/test_containerapp_env_commands.py
|
azext_containerapp.tests.latest.test_containerapp_env_commands.ContainerappEnvScenarioTest
|
class ContainerappEnvScenarioTest(ScenarioTest):
@AllowLargeResponse(8192)
@ResourceGroupPreparer(location="northeurope")
def test_containerapp_env_e2e(self, resource_group):
self.cmd('configure --defaults location={}'.format(TEST_LOCATION))
env_name = self.create_random_name(
prefix='containerapp-e2e-env', length=24)
logs_workspace_name = self.create_random_name(
prefix='containerapp-env', length=24)
logs_workspace_id = self.cmd('monitor log-analytics workspace create -g {} -n {} -l eastus'.format(
resource_group, logs_workspace_name)).get_output_in_json()["customerId"]
logs_workspace_key = self.cmd('monitor log-analytics workspace get-shared-keys -g {} -n {}'.format(
resource_group, logs_workspace_name)).get_output_in_json()["primarySharedKey"]
self.cmd('containerapp env create -g {} -n {} --logs-workspace-id {} --logs-workspace-key {}'.format(
resource_group, env_name, logs_workspace_id, logs_workspace_key))
containerapp_env = self.cmd(
'containerapp env show -g {} -n {}'.format(resource_group, env_name)).get_output_in_json()
while containerapp_env["properties"]["provisioningState"].lower() == "waiting":
time.sleep(5)
containerapp_env = self.cmd(
'containerapp env show -g {} -n {}'.format(resource_group, env_name)).get_output_in_json()
self.cmd('containerapp env list -g {}'.format(resource_group), checks=[
JMESPathCheck('length(@)', 1),
JMESPathCheck('[0].name', env_name),
])
self.cmd('containerapp env show -n {} -g {}'.format(env_name, resource_group), checks=[
JMESPathCheck('name', env_name),
])
self.cmd(
'containerapp env delete -g {} -n {} --yes'.format(resource_group, env_name))
self.cmd('containerapp env list -g {}'.format(resource_group), checks=[
JMESPathCheck('length(@)', 0),
])
@AllowLargeResponse(8192)
@ResourceGroupPreparer(location="australiaeast")
def test_containerapp_env_la_dynamic_json(self, resource_group):
self.cmd('configure --defaults location={}'.format(TEST_LOCATION))
logs_workspace_name = self.create_random_name(
prefix='containerapp-env', length=24)
logs_workspace_id = self.cmd('monitor log-analytics workspace create -g {} -n {} -l eastus'.format(
resource_group, logs_workspace_name)).get_output_in_json()["customerId"]
logs_workspace_key = self.cmd('monitor log-analytics workspace get-shared-keys -g {} -n {}'.format(
resource_group, logs_workspace_name)).get_output_in_json()["primarySharedKey"]
default_env_name = self.create_random_name(
prefix='containerapp-env', length=24)
self.cmd('containerapp env create -g {} -n {} --logs-workspace-id {} --logs-workspace-key {} --logs-destination log-analytics -j'.format(resource_group, default_env_name, logs_workspace_id, logs_workspace_key), checks=[
JMESPathCheck('name', default_env_name),
JMESPathCheck(
'properties.appLogsConfiguration.destination', "log-analytics"),
JMESPathCheck(
'properties.appLogsConfiguration.logAnalyticsConfiguration.dynamicJsonColumns', True),
])
default_env_name2 = self.create_random_name(
prefix='containerapp-env', length=24)
self.cmd('containerapp env create -g {} -n {} --logs-workspace-id {} --logs-workspace-key {} -j false'.format(resource_group, default_env_name2, logs_workspace_id, logs_workspace_key), checks=[
JMESPathCheck('name', default_env_name2),
JMESPathCheck(
'properties.appLogsConfiguration.destination', "log-analytics"),
JMESPathCheck(
'properties.appLogsConfiguration.logAnalyticsConfiguration.dynamicJsonColumns', False),
])
env_name = self.create_random_name(
prefix='containerapp-env', length=24)
self.cmd('containerapp env create -g {} -n {} --logs-workspace-id {} --logs-workspace-key {} --logs-destination log-analytics'.format(
resource_group, env_name, logs_workspace_id, logs_workspace_key))
containerapp_env = self.cmd(
'containerapp env show -g {} -n {}'.format(resource_group, env_name)).get_output_in_json()
while containerapp_env["properties"]["provisioningState"].lower() == "waiting":
time.sleep(5)
containerapp_env = self.cmd(
'containerapp env show -g {} -n {}'.format(resource_group, env_name)).get_output_in_json()
self.cmd('containerapp env show -n {} -g {}'.format(env_name, resource_group), checks=[
JMESPathCheck('name', env_name),
JMESPathCheck(
'properties.appLogsConfiguration.destination', "log-analytics"),
JMESPathCheck(
'properties.appLogsConfiguration.logAnalyticsConfiguration.customerId', logs_workspace_id),
JMESPathCheck(
'properties.appLogsConfiguration.logAnalyticsConfiguration.dynamicJsonColumns', False),
])
self.cmd(
'containerapp env update -g {} -n {} -j'.format(resource_group, env_name))
self.cmd('containerapp env show -n {} -g {}'.format(env_name, resource_group), checks=[
JMESPathCheck('name', env_name),
JMESPathCheck(
'properties.appLogsConfiguration.destination', "log-analytics"),
JMESPathCheck(
'properties.appLogsConfiguration.logAnalyticsConfiguration.customerId', logs_workspace_id),
JMESPathCheck(
'properties.appLogsConfiguration.logAnalyticsConfiguration.dynamicJsonColumns', True),
])
self.cmd(
'containerapp env update -g {} -n {}'.format(resource_group, env_name))
self.cmd('containerapp env show -n {} -g {}'.format(env_name, resource_group), checks=[
JMESPathCheck('name', env_name),
JMESPathCheck(
'properties.appLogsConfiguration.destination', "log-analytics"),
JMESPathCheck(
'properties.appLogsConfiguration.logAnalyticsConfiguration.customerId', logs_workspace_id),
JMESPathCheck(
'properties.appLogsConfiguration.logAnalyticsConfiguration.dynamicJsonColumns', True),
])
self.cmd(
'containerapp env update -g {} -n {} -j false'.format(resource_group, env_name))
self.cmd('containerapp env show -n {} -g {}'.format(env_name, resource_group), checks=[
JMESPathCheck('name', env_name),
JMESPathCheck(
'properties.appLogsConfiguration.destination', "log-analytics"),
JMESPathCheck(
'properties.appLogsConfiguration.logAnalyticsConfiguration.customerId', logs_workspace_id),
JMESPathCheck(
'properties.appLogsConfiguration.logAnalyticsConfiguration.dynamicJsonColumns', False),
])
@AllowLargeResponse(8192)
@ResourceGroupPreparer(location="northeurope")
@live_only() # encounters 'CannotOverwriteExistingCassetteException' only when run from recording (passes when run live)
def test_containerapp_env_dapr_components(self, resource_group):
self.cmd('configure --defaults location={}'.format(TEST_LOCATION))
env_name = self.create_random_name(
prefix='containerapp-e2e-env', length=24)
dapr_comp_name = self.create_random_name(
prefix='dapr-component', length=24)
logs_workspace_name = self.create_random_name(
prefix='containerapp-env', length=24)
logs_workspace_id = self.cmd('monitor log-analytics workspace create -g {} -n {} -l eastus'.format(
resource_group, logs_workspace_name)).get_output_in_json()["customerId"]
logs_workspace_key = self.cmd('monitor log-analytics workspace get-shared-keys -g {} -n {}'.format(
resource_group, logs_workspace_name)).get_output_in_json()["primarySharedKey"]
self.cmd('containerapp env create -g {} -n {} --logs-workspace-id {} --logs-workspace-key {}'.format(
resource_group, env_name, logs_workspace_id, logs_workspace_key))
import tempfile
file_ref, dapr_file = tempfile.mkstemp(suffix=".yml")
dapr_yaml = """
name: statestore
componentType: state.azure.blobstorage
version: v1
metadata:
- name: accountName
secretRef: storage-account-name
secrets:
- name: storage-account-name
value: storage-account-name
"""
daprloaded = yaml.safe_load(dapr_yaml)
with open(dapr_file, 'w') as outfile:
yaml.dump(daprloaded, outfile, default_flow_style=False)
containerapp_env = self.cmd(
'containerapp env show -g {} -n {}'.format(resource_group, env_name)).get_output_in_json()
while containerapp_env["properties"]["provisioningState"].lower() == "waiting":
time.sleep(5)
containerapp_env = self.cmd(
'containerapp env show -g {} -n {}'.format(resource_group, env_name)).get_output_in_json()
self.cmd('containerapp env dapr-component set -n {} -g {} --dapr-component-name {} --yaml {}'.format(env_name, resource_group, dapr_comp_name, dapr_file.replace(os.sep, os.sep + os.sep)), checks=[
JMESPathCheck('name', dapr_comp_name),
])
os.close(file_ref)
self.cmd('containerapp env dapr-component list -n {} -g {}'.format(env_name, resource_group), checks=[
JMESPathCheck('length(@)', 1),
JMESPathCheck('[0].name', dapr_comp_name),
])
self.cmd('containerapp env dapr-component show -n {} -g {} --dapr-component-name {}'.format(env_name, resource_group, dapr_comp_name), checks=[
JMESPathCheck('name', dapr_comp_name),
JMESPathCheck('properties.version', 'v1'),
JMESPathCheck(
'properties.secrets[0].name', 'storage-account-name'),
JMESPathCheck('properties.metadata[0].name', 'accountName'),
JMESPathCheck(
'properties.metadata[0].secretRef', 'storage-account-name'),
])
self.cmd('containerapp env dapr-component remove -n {} -g {} --dapr-component-name {}'.format(
env_name, resource_group, dapr_comp_name))
self.cmd('containerapp env dapr-component list -n {} -g {}'.format(env_name, resource_group), checks=[
JMESPathCheck('length(@)', 0),
])
# Invalid pubsub service type should throw an error.
self.cmd('containerapp env dapr-component init -n {} -g {} --pubsub {}'.format(
env_name, resource_group, "invalid1"), expect_failure=True)
# Invalid statestore service type should throw an error.
self.cmd('containerapp env dapr-component init -n {} -g {} --statestore {}'.format(
env_name, resource_group, "invalid2"), expect_failure=True)
# Should create a Redis statestore and pubsub components as default.
output_json = self.cmd('containerapp env dapr-component init -n {} -g {}'.format(env_name, resource_group), checks=[
JMESPathCheck('length(@)', 2),
JMESPathCheck('message', "Operation successful."),
# Redis statestore and pubsub components
JMESPathCheck('length(resources.daprComponents)', 2),
JMESPathCheck('length(resources.devServices)',
1), # Single Redis instance
]).get_output_in_json()
self.assertIn("daprComponents/statestore",
output_json["resources"]["daprComponents"][0])
self.assertIn("daprComponents/pubsub",
output_json["resources"]["daprComponents"][1])
self.assertIn("containerapps/dapr-redis",
output_json["resources"]["devServices"][0])
# Should not create a Redis statestore and pubsub components if they already exist.
output_json = self.cmd('containerapp env dapr-component init -n {} -g {}'.format(env_name, resource_group), checks=[
JMESPathCheck('length(@)', 2),
JMESPathCheck('message', "Operation successful."),
# Redis statestore and pubsub components
JMESPathCheck('length(resources.daprComponents)', 2),
JMESPathCheck('length(resources.devServices)',
1), # Single Redis instance
]).get_output_in_json()
self.assertIn("daprComponents/statestore",
output_json["resources"]["daprComponents"][0])
self.assertIn("daprComponents/pubsub",
output_json["resources"]["daprComponents"][1])
self.assertIn("containerapps/dapr-redis",
output_json["resources"]["devServices"][0])
# Redis statestore should be correctly created.
self.cmd('containerapp env dapr-component show --dapr-component-name {} -n {} -g {}'.format("statestore", env_name, resource_group), checks=[
JMESPathCheck('name', "statestore"),
JMESPathCheck('properties.componentType', "state.redis"),
JMESPathCheck('length(properties.metadata)', 1),
JMESPathCheck('properties.metadata[0].name', "actorStateStore"),
JMESPathCheck('properties.metadata[0].value', "true"),
JMESPathCheck(
'properties.serviceComponentBind.name', "dapr-redis"),
JMESPathCheck('properties.serviceComponentBind.serviceId',
output_json["resources"]["devServices"][0]),
JMESPathCheck('properties.serviceComponentBind.metadata.DCI_SB_CREATED_BY',
"azcli_azext_containerapp_daprutils"),
JMESPathCheck('properties.version', "v1"),
])
# Redis pubsub should be correctly created.
self.cmd('containerapp env dapr-component show --dapr-component-name {} -n {} -g {}'.format("pubsub", env_name, resource_group), checks=[
JMESPathCheck('name', "pubsub"),
JMESPathCheck('properties.componentType', "pubsub.redis"),
JMESPathCheck('length(properties.metadata)', 0),
JMESPathCheck(
'properties.serviceComponentBind.name', "dapr-redis"),
JMESPathCheck('properties.serviceComponentBind.serviceId',
output_json["resources"]["devServices"][0]),
JMESPathCheck('properties.serviceComponentBind.metadata.DCI_SB_CREATED_BY',
"azcli_azext_containerapp_daprutils"),
JMESPathCheck('properties.version', "v1"),
])
@ResourceGroupPreparer(location="eastus")
@SubnetPreparer(location="centralus", vnet_address_prefixes='14.0.0.0/23', delegations='Microsoft.App/environments', subnet_address_prefixes='14.0.0.0/23')
def test_containerapp_env_infrastructure_rg(self, resource_group, subnet_id):
self.cmd('configure --defaults location={}'.format(TEST_LOCATION))
env = self.create_random_name(prefix='env', length=24)
infra_rg = self.create_random_name(prefix='irg', length=24)
self.cmd(
f'containerapp env create -g {resource_group} -n {env} -s {subnet_id} -i {infra_rg} --enable-workload-profiles true --logs-destination none')
containerapp_env = self.cmd(
f'containerapp env show -g {resource_group} -n {env}').get_output_in_json()
while containerapp_env["properties"]["provisioningState"].lower() == "waiting":
time.sleep(5)
containerapp_env = self.cmd(
f'containerapp env show -g {resource_group} -n {env}').get_output_in_json()
self.cmd(f'containerapp env show -n {env} -g {resource_group}', checks=[
JMESPathCheck('name', env),
JMESPathCheck('properties.infrastructureResourceGroup', infra_rg),
])
self.cmd(
f'containerapp env delete -n {env} -g {resource_group} --yes --no-wait')
@AllowLargeResponse(8192)
@ResourceGroupPreparer(location="northeurope")
def test_containerapp_env_mtls(self, resource_group):
self.cmd('configure --defaults location={}'.format(TEST_LOCATION))
env_name = self.create_random_name(
prefix='containerapp-e2e-env', length=24)
logs_workspace_name = self.create_random_name(
prefix='containerapp-env', length=24)
logs_workspace_id = self.cmd('monitor log-analytics workspace create -g {} -n {} -l eastus'.format(
resource_group, logs_workspace_name)).get_output_in_json()["customerId"]
logs_workspace_key = self.cmd('monitor log-analytics workspace get-shared-keys -g {} -n {}'.format(
resource_group, logs_workspace_name)).get_output_in_json()["primarySharedKey"]
self.cmd('containerapp env create -g {} -n {} --logs-workspace-id {} --logs-workspace-key {} --enable-mtls'.format(
resource_group, env_name, logs_workspace_id, logs_workspace_key))
containerapp_env = self.cmd(
'containerapp env show -g {} -n {}'.format(resource_group, env_name)).get_output_in_json()
while containerapp_env["properties"]["provisioningState"].lower() == "waiting":
time.sleep(5)
containerapp_env = self.cmd(
'containerapp env show -g {} -n {}'.format(resource_group, env_name)).get_output_in_json()
self.cmd('containerapp env show -n {} -g {}'.format(env_name, resource_group), checks=[
JMESPathCheck('name', env_name),
JMESPathCheck('properties.peerAuthentication.mtls.enabled', True),
])
self.cmd(
'containerapp env update -g {} -n {} --enable-mtls false'.format(resource_group, env_name))
while containerapp_env["properties"]["provisioningState"].lower() == "waiting":
time.sleep(5)
containerapp_env = self.cmd(
'containerapp env show -g {} -n {}'.format(resource_group, env_name)).get_output_in_json()
self.cmd('containerapp env show -n {} -g {}'.format(env_name, resource_group), checks=[
JMESPathCheck('name', env_name),
JMESPathCheck('properties.peerAuthentication.mtls.enabled', False),
])
@AllowLargeResponse(8192)
@ResourceGroupPreparer(location="northeurope")
def test_containerapp_env_p2p_traffic_encryption(self, resource_group):
self.cmd('configure --defaults location={}'.format(TEST_LOCATION))
env_name = self.create_random_name(
prefix='containerapp-e2e-env', length=24)
logs_workspace_name = self.create_random_name(
prefix='containerapp-env', length=24)
logs_workspace_id = self.cmd('monitor log-analytics workspace create -g {} -n {} -l eastus'.format(
resource_group, logs_workspace_name)).get_output_in_json()["customerId"]
logs_workspace_key = self.cmd('monitor log-analytics workspace get-shared-keys -g {} -n {}'.format(
resource_group, logs_workspace_name)).get_output_in_json()["primarySharedKey"]
self.cmd('containerapp env create -g {} -n {} --logs-workspace-id {} --logs-workspace-key {} --enable-peer-to-peer-encryption false --enable-mtls'
.format(resource_group, env_name, logs_workspace_id, logs_workspace_key), expect_failure=True)
self.cmd('containerapp env create -g {} -n {} --logs-workspace-id {} --logs-workspace-key {} --enable-peer-to-peer-encryption'
.format(resource_group, env_name, logs_workspace_id, logs_workspace_key))
containerapp_env = self.cmd(
'containerapp env show -g {} -n {}'.format(resource_group, env_name)).get_output_in_json()
while containerapp_env["properties"]["provisioningState"].lower() == "waiting":
time.sleep(5)
containerapp_env = self.cmd(
'containerapp env show -g {} -n {}'.format(resource_group, env_name)).get_output_in_json()
self.cmd('containerapp env show -n {} -g {}'.format(env_name, resource_group), checks=[
JMESPathCheck('name', env_name),
JMESPathCheck(
'properties.peerTrafficConfiguration.encryption.enabled', True),
])
self.cmd('containerapp env update -g {} -n {} --enable-peer-to-peer-encryption false'.format(resource_group, env_name))
while containerapp_env["properties"]["provisioningState"].lower() == "waiting":
time.sleep(5)
containerapp_env = self.cmd(
'containerapp env show -g {} -n {}'.format(resource_group, env_name)).get_output_in_json()
self.cmd('containerapp env show -n {} -g {}'.format(env_name, resource_group), checks=[
JMESPathCheck('name', env_name),
JMESPathCheck(
'properties.peerTrafficConfiguration.encryption.enabled', False),
])
@ResourceGroupPreparer(location="northeurope")
def test_containerapp_env_dapr_connection_string_extension(self, resource_group):
self.cmd('configure --defaults location={}'.format(TEST_LOCATION))
env_name = self.create_random_name(
prefix='containerapp-e2e-env', length=24)
self.cmd('containerapp env create -g {} -n {} --logs-destination none -d "Endpoint=https://foo.azconfig.io;Id=osOX-l9-s0:sig;InstrumentationKey=00000000000000000000000000000000000000000000"'.format(resource_group, env_name), expect_failure=False)
self.cmd('containerapp env delete -g {} -n {} --yes --no-wait'.format(
resource_group, env_name), expect_failure=False)
@AllowLargeResponse(8192)
@ResourceGroupPreparer(location="northeurope")
def test_containerapp_env_usages(self, resource_group):
self.cmd('configure --defaults location={}'.format(TEST_LOCATION))
result = self.cmd('containerapp list-usages').get_output_in_json()
usages = result["value"]
self.assertEqual(len(usages), 1)
self.assertEqual(usages[0]["name"]["value"], "ManagedEnvironmentCount")
self.assertGreater(usages[0]["limit"], 0)
self.assertGreaterEqual(usages[0]["usage"], 0)
env_name = self.create_random_name(
prefix='containerapp-e2e-env', length=24)
logs_workspace_name = self.create_random_name(
prefix='containerapp-env', length=24)
logs_workspace_id = self.cmd('monitor log-analytics workspace create -g {} -n {} -l eastus'.format(
resource_group, logs_workspace_name)).get_output_in_json()["customerId"]
logs_workspace_key = self.cmd('monitor log-analytics workspace get-shared-keys -g {} -n {}'.format(
resource_group, logs_workspace_name)).get_output_in_json()["primarySharedKey"]
self.cmd('containerapp env create -g {} -n {} --logs-workspace-id {} --logs-workspace-key {} --enable-mtls'.format(
resource_group, env_name, logs_workspace_id, logs_workspace_key))
containerapp_env = self.cmd(
'containerapp env show -g {} -n {}'.format(resource_group, env_name)).get_output_in_json()
while containerapp_env["properties"]["provisioningState"].lower() == "waiting":
time.sleep(5)
containerapp_env = self.cmd(
'containerapp env show -g {} -n {}'.format(resource_group, env_name)).get_output_in_json()
self.cmd('containerapp env show -n {} -g {}'.format(env_name, resource_group), checks=[
JMESPathCheck('name', env_name)
])
result = self.cmd(
'containerapp env list-usages --id {}'.format(containerapp_env["id"])).get_output_in_json()
usages = result["value"]
self.assertEqual(len(usages), 4)
self.assertGreater(usages[0]["limit"], 0)
self.assertGreaterEqual(usages[0]["usage"], 0)
@AllowLargeResponse(8192)
@ResourceGroupPreparer(location="northeurope")
def test_containerapp_env_public_network_access(self, resource_group):
location = TEST_LOCATION
self.cmd('configure --defaults location={}'.format(location))
env_name = self.create_random_name(
prefix='containerapp-e2e-env', length=24)
self.cmd(
'containerapp env create -g {} -n {} --logs-destination none'.format(resource_group, env_name))
containerapp_env = self.cmd(
'containerapp env show -g {} -n {}'.format(resource_group, env_name)).get_output_in_json()
while containerapp_env["properties"]["provisioningState"].lower() == "waiting":
time.sleep(5)
containerapp_env = self.cmd(
'containerapp env show -g {} -n {}'.format(resource_group, env_name)).get_output_in_json()
self.cmd('containerapp env show -g {} -n {}'.format(resource_group, env_name), checks=[
JMESPathCheck('properties.publicNetworkAccess', 'Enabled'),
])
self.cmd('containerapp env update -g {} -n {} --public-network-access Disabled'.format(
resource_group, env_name),
checks=[
JMESPathCheck(
'properties.publicNetworkAccess', 'Disabled'),
])
self.cmd(
'containerapp env delete -g {} -n {} -y --no-wait'.format(resource_group, env_name))
enabled_env_name = self.create_random_name(
prefix='containerapp-e2e-env', length=24)
self.cmd('containerapp env create -g {} -n {} --public-network-access Disabled --logs-destination none'.format(
resource_group, enabled_env_name),
checks=[
JMESPathCheck(
'properties.publicNetworkAccess', 'Disabled'),
])
self.cmd(
'containerapp env delete -g {} -n {} -y --no-wait'.format(resource_group, enabled_env_name))
|
class ContainerappEnvScenarioTest(ScenarioTest):
@AllowLargeResponse(8192)
@ResourceGroupPreparer(location="northeurope")
def test_containerapp_env_e2e(self, resource_group):
pass
@AllowLargeResponse(8192)
@ResourceGroupPreparer(location="australiaeast")
def test_containerapp_env_la_dynamic_json(self, resource_group):
pass
@AllowLargeResponse(8192)
@ResourceGroupPreparer(location="northeurope")
@live_only()
def test_containerapp_env_dapr_components(self, resource_group):
pass
@ResourceGroupPreparer(location="eastus")
@SubnetPreparer(location="centralus", vnet_address_prefixes='14.0.0.0/23', delegations='Microsoft.App/environments', subnet_address_prefixes='14.0.0.0/23')
def test_containerapp_env_infrastructure_rg(self, resource_group, subnet_id):
pass
@AllowLargeResponse(8192)
@ResourceGroupPreparer(location="northeurope")
def test_containerapp_env_mtls(self, resource_group):
pass
@AllowLargeResponse(8192)
@ResourceGroupPreparer(location="northeurope")
def test_containerapp_env_p2p_traffic_encryption(self, resource_group):
pass
@ResourceGroupPreparer(location="northeurope")
def test_containerapp_env_dapr_connection_string_extension(self, resource_group):
pass
@AllowLargeResponse(8192)
@ResourceGroupPreparer(location="northeurope")
def test_containerapp_env_usages(self, resource_group):
pass
@AllowLargeResponse(8192)
@ResourceGroupPreparer(location="northeurope")
def test_containerapp_env_public_network_access(self, resource_group):
pass
| 28 | 0 | 42 | 10 | 31 | 1 | 2 | 0.04 | 1 | 0 | 0 | 0 | 9 | 0 | 9 | 9 | 405 | 98 | 301 | 68 | 272 | 11 | 163 | 58 | 152 | 3 | 1 | 1 | 19 |
10,871 |
Azure/azure-cli-extensions
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/Azure_azure-cli-extensions/src/containerapp/azext_containerapp/tests/latest/test_containerapp_env_commands.py
|
azext_containerapp.tests.latest.test_containerapp_env_commands.ContainerappEnvLocationNotInStageScenarioTest
|
class ContainerappEnvLocationNotInStageScenarioTest(ScenarioTest):
def __init__(self, *arg, **kwargs):
super().__init__(*arg, random_config_dir=True, **kwargs)
@AllowLargeResponse(8192)
@ResourceGroupPreparer(location="australiaeast")
def test_containerapp_env_logs_e2e(self, resource_group):
# azure-monitor is not available in North Central US (Stage), if the TEST_LOCATION is "northcentralusstage", use eastus as location
location = TEST_LOCATION
if format_location(location) == format_location(STAGE_LOCATION):
location = "eastus"
self.cmd('configure --defaults location={}'.format(location))
env_name = self.create_random_name(
prefix='containerapp-env', length=24)
logs_workspace_name = self.create_random_name(
prefix='containerapp-env', length=24)
logs_workspace_id = self.cmd('monitor log-analytics workspace create -g {} -n {} -l eastus'.format(
resource_group, logs_workspace_name)).get_output_in_json()["customerId"]
logs_workspace_key = self.cmd('monitor log-analytics workspace get-shared-keys -g {} -n {}'.format(
resource_group, logs_workspace_name)).get_output_in_json()["primarySharedKey"]
# create env with log-analytics
self.cmd('containerapp env create -g {} -n {} --logs-workspace-id {} --logs-workspace-key {} --logs-destination log-analytics'.format(
resource_group, env_name, logs_workspace_id, logs_workspace_key))
containerapp_env = self.cmd(
'containerapp env show -g {} -n {}'.format(resource_group, env_name)).get_output_in_json()
while containerapp_env["properties"]["provisioningState"].lower() == "waiting":
time.sleep(5)
containerapp_env = self.cmd(
'containerapp env show -g {} -n {}'.format(resource_group, env_name)).get_output_in_json()
self.cmd('containerapp env show -n {} -g {}'.format(env_name, resource_group), checks=[
JMESPathCheck('name', env_name),
JMESPathCheck(
'properties.appLogsConfiguration.destination', "log-analytics"),
JMESPathCheck(
'properties.appLogsConfiguration.logAnalyticsConfiguration.customerId', logs_workspace_id),
])
# update env log destination to none
self.cmd('containerapp env update -g {} -n {} --logs-destination none'.format(resource_group, env_name), checks=[
JMESPathCheck('properties.appLogsConfiguration.destination', None),
])
# update env log destination from log-analytics to none
self.cmd('containerapp env update -g {} -n {} --logs-workspace-id {} --logs-workspace-key {} --logs-destination log-analytics'.format(resource_group, env_name, logs_workspace_id, logs_workspace_key), checks=[
JMESPathCheck(
'properties.appLogsConfiguration.destination', "log-analytics"),
JMESPathCheck(
'properties.appLogsConfiguration.logAnalyticsConfiguration.customerId', logs_workspace_id),
])
storage_account_name = self.create_random_name(
prefix='cappstorage', length=24)
storage_account = self.cmd('storage account create -g {} -n {} --https-only'.format(
resource_group, storage_account_name)).get_output_in_json()["id"]
# update env log destination from none to azure-monitor
self.cmd('containerapp env update -g {} -n {} --logs-destination azure-monitor --storage-account {}'.format(
resource_group, env_name, storage_account))
env = self.cmd('containerapp env show -n {} -g {}'.format(env_name, resource_group), checks=[
JMESPathCheck('name', env_name),
JMESPathCheck(
'properties.appLogsConfiguration.destination', "azure-monitor"),
]).get_output_in_json()
diagnostic_settings = self.cmd(
'monitor diagnostic-settings show --name diagnosticsettings --resource {}'.format(env["id"])).get_output_in_json()
self.assertEqual(
storage_account in diagnostic_settings["storageAccountId"], True)
# update env log destination from azure-monitor to none
self.cmd(
'containerapp env update -g {} -n {} --logs-destination none'.format(resource_group, env_name))
self.cmd('containerapp env show -n {} -g {}'.format(env_name, resource_group), checks=[
JMESPathCheck('name', env_name),
JMESPathCheck('properties.appLogsConfiguration.destination', None),
])
# update env log destination from none to log-analytics
self.cmd('containerapp env update -g {} -n {} --logs-workspace-id {} --logs-workspace-key {} --logs-destination log-analytics'.format(
resource_group, env_name, logs_workspace_id, logs_workspace_key))
self.cmd('containerapp env show -n {} -g {}'.format(env_name, resource_group), checks=[
JMESPathCheck('name', env_name),
JMESPathCheck(
'properties.appLogsConfiguration.destination', "log-analytics"),
JMESPathCheck(
'properties.appLogsConfiguration.logAnalyticsConfiguration.customerId', logs_workspace_id),
])
self.cmd('containerapp env create -g {} -n {} --logs-destination azure-monitor --storage-account {}'.format(
resource_group, env_name, storage_account))
env = self.cmd('containerapp env show -n {} -g {}'.format(env_name, resource_group), checks=[
JMESPathCheck('name', env_name),
JMESPathCheck(
'properties.appLogsConfiguration.destination', "azure-monitor"),
]).get_output_in_json()
diagnostic_settings = self.cmd(
'monitor diagnostic-settings show --name diagnosticsettings --resource {}'.format(env["id"])).get_output_in_json()
self.assertEqual(
storage_account in diagnostic_settings["storageAccountId"], True)
self.cmd(
'containerapp env create -g {} -n {} --logs-destination none'.format(resource_group, env_name))
self.cmd('containerapp env show -n {} -g {}'.format(env_name, resource_group), checks=[
JMESPathCheck('name', env_name),
JMESPathCheck('properties.appLogsConfiguration.destination', None),
])
self.cmd(
'containerapp env update -g {} -n {} --logs-destination none --no-wait'.format(resource_group, env_name))
self.cmd('containerapp env show -n {} -g {}'.format(env_name, resource_group), checks=[
JMESPathCheck('name', env_name),
JMESPathCheck('properties.appLogsConfiguration.destination', None),
])
@AllowLargeResponse(8192)
@live_only() # encounters 'CannotOverwriteExistingCassetteException' only when run from recording (passes when run live)
@ResourceGroupPreparer(location="westeurope")
def test_containerapp_env_custom_domains(self, resource_group):
location = TEST_LOCATION
if format_location(location) == format_location(STAGE_LOCATION):
location = "eastus"
self.cmd('configure --defaults location={}'.format(location))
env_name = self.create_random_name(
prefix='containerapp-env', length=24)
logs_workspace_name = self.create_random_name(
prefix='containerapp-env', length=24)
logs_workspace_id = self.cmd('monitor log-analytics workspace create -g {} -n {} -l eastus'.format(
resource_group, logs_workspace_name)).get_output_in_json()["customerId"]
logs_workspace_key = self.cmd('monitor log-analytics workspace get-shared-keys -g {} -n {}'.format(
resource_group, logs_workspace_name)).get_output_in_json()["primarySharedKey"]
self.cmd('containerapp env create -g {} -n {} --logs-workspace-id {} --logs-workspace-key {}'.format(
resource_group, env_name, logs_workspace_id, logs_workspace_key))
containerapp_env = self.cmd(
'containerapp env show -g {} -n {}'.format(resource_group, env_name)).get_output_in_json()
while containerapp_env["properties"]["provisioningState"].lower() == "waiting":
time.sleep(5)
containerapp_env = self.cmd(
'containerapp env show -g {} -n {}'.format(resource_group, env_name)).get_output_in_json()
# create an App service domain and update its txt records
contacts = os.path.join(TEST_DIR, 'domain-contact.json')
zone_name = "{}.com".format(env_name)
subdomain_1 = "devtest"
subdomain_2 = "clitest"
txt_name_1 = "asuid.{}".format(subdomain_1)
txt_name_2 = "asuid.{}".format(subdomain_2)
hostname_1 = "{}.{}".format(subdomain_1, zone_name)
hostname_2 = "{}.{}".format(subdomain_2, zone_name)
verification_id = containerapp_env["properties"]["customDomainConfiguration"]["customDomainVerificationId"]
self.cmd("appservice domain create -g {} --hostname {} --contact-info=@'{}' --accept-terms".format(
resource_group, zone_name, contacts)).get_output_in_json()
self.cmd('network dns record-set txt add-record -g {} -z {} -n {} -v {}'.format(
resource_group, zone_name, txt_name_1, verification_id)).get_output_in_json()
self.cmd('network dns record-set txt add-record -g {} -z {} -n {} -v {}'.format(
resource_group, zone_name, txt_name_2, verification_id)).get_output_in_json()
# upload cert, add hostname & binding
pfx_file = os.path.join(TEST_DIR, 'cert.pfx')
pfx_password = 'test12'
self.cmd('containerapp env create -g {} -n {} --logs-workspace-id {} --logs-workspace-key {} --dns-suffix {} --certificate-file "{}" --certificate-password {}'.format(
resource_group, env_name, logs_workspace_id, logs_workspace_key, hostname_1, pfx_file, pfx_password))
self.cmd(f'containerapp env show -n {env_name} -g {resource_group}', checks=[
JMESPathCheck('name', env_name),
JMESPathCheck(
'properties.customDomainConfiguration.dnsSuffix', hostname_1),
])
@AllowLargeResponse(8192)
@live_only() # encounters 'CannotOverwriteExistingCassetteException' only when run from recording (passes when run live)
@ResourceGroupPreparer(location="westeurope")
def test_containerapp_env_update_custom_domains(self, resource_group):
location = TEST_LOCATION
if format_location(location) == format_location(STAGE_LOCATION):
location = "eastus"
self.cmd('configure --defaults location={}'.format(location))
env_name = self.create_random_name(
prefix='containerapp-env', length=24)
logs_workspace_name = self.create_random_name(
prefix='containerapp-env', length=24)
logs_workspace_id = self.cmd('monitor log-analytics workspace create -g {} -n {} -l eastus'.format(
resource_group, logs_workspace_name)).get_output_in_json()["customerId"]
logs_workspace_key = self.cmd('monitor log-analytics workspace get-shared-keys -g {} -n {}'.format(
resource_group, logs_workspace_name)).get_output_in_json()["primarySharedKey"]
self.cmd('containerapp env create -g {} -n {} --logs-workspace-id {} --logs-workspace-key {}'.format(
resource_group, env_name, logs_workspace_id, logs_workspace_key))
containerapp_env = self.cmd(
'containerapp env show -g {} -n {}'.format(resource_group, env_name)).get_output_in_json()
while containerapp_env["properties"]["provisioningState"].lower() == "waiting":
time.sleep(5)
containerapp_env = self.cmd(
'containerapp env show -g {} -n {}'.format(resource_group, env_name)).get_output_in_json()
# create an App service domain and update its txt records
contacts = os.path.join(TEST_DIR, 'domain-contact.json')
zone_name = "{}.com".format(env_name)
subdomain_1 = "devtest"
subdomain_2 = "clitest"
txt_name_1 = "asuid.{}".format(subdomain_1)
txt_name_2 = "asuid.{}".format(subdomain_2)
hostname_1 = "{}.{}".format(subdomain_1, zone_name)
hostname_2 = "{}.{}".format(subdomain_2, zone_name)
verification_id = containerapp_env["properties"]["customDomainConfiguration"]["customDomainVerificationId"]
self.cmd("appservice domain create -g {} --hostname {} --contact-info=@'{}' --accept-terms".format(
resource_group, zone_name, contacts)).get_output_in_json()
self.cmd('network dns record-set txt add-record -g {} -z {} -n {} -v {}'.format(
resource_group, zone_name, txt_name_1, verification_id)).get_output_in_json()
self.cmd('network dns record-set txt add-record -g {} -z {} -n {} -v {}'.format(
resource_group, zone_name, txt_name_2, verification_id)).get_output_in_json()
# upload cert, add hostname & binding
pfx_file = os.path.join(TEST_DIR, 'cert.pfx')
pfx_password = 'test12'
self.cmd('containerapp env update -g {} -n {} --dns-suffix {} --certificate-file "{}" --certificate-password {}'.format(
resource_group, env_name, hostname_1, pfx_file, pfx_password))
self.cmd(f'containerapp env show -n {env_name} -g {resource_group}', checks=[
JMESPathCheck('name', env_name),
JMESPathCheck(
'properties.customDomainConfiguration.dnsSuffix', hostname_1),
])
self.cmd('containerapp env update -g {} -n {} --dns-suffix {}'.format(
resource_group, env_name, hostname_2))
self.cmd(f'containerapp env show -n {env_name} -g {resource_group}', checks=[
JMESPathCheck('name', env_name),
JMESPathCheck(
'properties.customDomainConfiguration.dnsSuffix', hostname_2),
])
@AllowLargeResponse(8192)
@ResourceGroupPreparer(location="northeurope")
@live_only() # passes live but hits CannotOverwriteExistingCassetteException when run from recording
def test_containerapp_env_internal_only_e2e(self, resource_group):
# network is not available in North Central US (Stage), if the TEST_LOCATION is "northcentralusstage", use eastus as location
location = TEST_LOCATION
if format_location(location) == format_location(STAGE_LOCATION):
location = "eastus"
self.cmd('configure --defaults location={}'.format(location))
env = self.create_random_name(prefix='env', length=24)
logs = self.create_random_name(prefix='logs', length=24)
vnet = self.create_random_name(prefix='name', length=24)
self.cmd(
f"az network vnet create --address-prefixes '14.0.0.0/23' -g {resource_group} -n {vnet}")
sub_id = self.cmd(
f"az network vnet subnet create --address-prefixes '14.0.0.0/23' --delegations Microsoft.App/environments -n sub -g {resource_group} --vnet-name {vnet}").get_output_in_json()["id"]
logs_id = self.cmd(
f"monitor log-analytics workspace create -g {resource_group} -n {logs} -l eastus").get_output_in_json()["customerId"]
logs_key = self.cmd(
f'monitor log-analytics workspace get-shared-keys -g {resource_group} -n {logs}').get_output_in_json()["primarySharedKey"]
self.cmd(
f'containerapp env create -g {resource_group} -n {env} --logs-workspace-id {logs_id} --logs-workspace-key {logs_key} --internal-only -s {sub_id}')
containerapp_env = self.cmd(
f'containerapp env show -g {resource_group} -n {env}').get_output_in_json()
while containerapp_env["properties"]["provisioningState"].lower() == "waiting":
time.sleep(5)
containerapp_env = self.cmd(
f'containerapp env show -g {resource_group} -n {env}').get_output_in_json()
self.cmd(f'containerapp env show -n {env} -g {resource_group}', checks=[
JMESPathCheck('name', env),
JMESPathCheck('properties.vnetConfiguration.internal', True),
])
@AllowLargeResponse(8192)
@live_only() # encounters 'CannotOverwriteExistingCassetteException' only when run from recording (passes when run live)
@ResourceGroupPreparer(location="northeurope")
def test_containerapp_env_certificate_e2e(self, resource_group):
location = TEST_LOCATION
if format_location(location) == format_location(STAGE_LOCATION):
location = "eastus"
self.cmd('configure --defaults location={}'.format(location))
env_name = self.create_random_name(
prefix='containerapp-e2e-env', length=24)
logs_workspace_name = self.create_random_name(
prefix='containerapp-env', length=24)
logs_workspace_id = self.cmd(
'monitor log-analytics workspace create -g {} -n {} -l eastus'.format(resource_group,
logs_workspace_name)).get_output_in_json()[
"customerId"]
logs_workspace_key = self.cmd(
'monitor log-analytics workspace get-shared-keys -g {} -n {}'.format(resource_group,
logs_workspace_name)).get_output_in_json()[
"primarySharedKey"]
self.cmd(
'containerapp env create -g {} -n {} --logs-workspace-id {} --logs-workspace-key {}'.format(resource_group,
env_name,
logs_workspace_id,
logs_workspace_key))
containerapp_env = self.cmd(
'containerapp env show -g {} -n {}'.format(resource_group, env_name)).get_output_in_json()
while containerapp_env["properties"]["provisioningState"].lower() == "waiting":
time.sleep(5)
containerapp_env = self.cmd(
'containerapp env show -g {} -n {}'.format(resource_group, env_name)).get_output_in_json()
self.cmd('containerapp env certificate list -g {} -n {}'.format(resource_group, env_name), checks=[
JMESPathCheck('length(@)', 0),
])
# test that non pfx or pem files are not supported
txt_file = os.path.join(TEST_DIR, 'cert.txt')
self.cmd(
'containerapp env certificate upload -g {} -n {} --certificate-file "{}"'.format(resource_group, env_name,
txt_file),
expect_failure=True)
# test pfx file with password
pfx_file = os.path.join(TEST_DIR, 'cert.pfx')
pfx_password = 'test12'
cert = self.cmd('containerapp env certificate upload -g {} -n {} --certificate-file "{}" --password {}'.format(
resource_group, env_name, pfx_file, pfx_password), checks=[
JMESPathCheck(
'type', "Microsoft.App/managedEnvironments/certificates"),
]).get_output_in_json()
cert_name = cert["name"]
cert_id = cert["id"]
cert_thumbprint = cert["properties"]["thumbprint"]
cert_location = cert["location"]
self.cmd(
'containerapp env certificate list -n {} -g {} -l "{}"'.format(
env_name, resource_group, cert_location),
checks=[
JMESPathCheck('length(@)', 1),
JMESPathCheck('[0].properties.thumbprint', cert_thumbprint),
JMESPathCheck('[0].name', cert_name),
JMESPathCheck('[0].id', cert_id),
])
# list certs with a wrong location
self.cmd(
'containerapp env certificate upload -g {} -n {} --certificate-file "{}"'.format(resource_group, env_name,
pfx_file),
expect_failure=True)
self.cmd('containerapp env certificate list -n {} -g {} --certificate {}'.format(env_name, resource_group,
cert_name), checks=[
JMESPathCheck('length(@)', 1),
JMESPathCheck('[0].name', cert_name),
JMESPathCheck('[0].id', cert_id),
JMESPathCheck('[0].properties.thumbprint', cert_thumbprint),
])
self.cmd(
'containerapp env certificate list -n {} -g {} --certificate {}'.format(
env_name, resource_group, cert_id),
checks=[
JMESPathCheck('length(@)', 1),
JMESPathCheck('[0].name', cert_name),
JMESPathCheck('[0].id', cert_id),
JMESPathCheck('[0].properties.thumbprint', cert_thumbprint),
])
self.cmd('containerapp env certificate list -n {} -g {} --thumbprint {}'.format(env_name, resource_group,
cert_thumbprint), checks=[
JMESPathCheck('length(@)', 1),
JMESPathCheck('[0].name', cert_name),
JMESPathCheck('[0].id', cert_id),
JMESPathCheck('[0].properties.thumbprint', cert_thumbprint),
])
# create a container app
ca_name = self.create_random_name(prefix='containerapp', length=24)
app = self.cmd('containerapp create -g {} -n {} --environment {} --ingress external --target-port 80'.format(
resource_group, ca_name, env_name)).get_output_in_json()
# create an App service domain and update its DNS records
contacts = os.path.join(TEST_DIR, 'domain-contact.json')
zone_name = "{}.com".format(ca_name)
subdomain_1 = "devtest"
txt_name_1 = "asuid.{}".format(subdomain_1)
hostname_1 = "{}.{}".format(subdomain_1, zone_name)
verification_id = app["properties"]["customDomainVerificationId"]
fqdn = app["properties"]["configuration"]["ingress"]["fqdn"]
self.cmd(
"appservice domain create -g {} --hostname {} --contact-info=@'{}' --accept-terms".format(resource_group,
zone_name,
contacts)).get_output_in_json()
self.cmd('network dns record-set txt add-record -g {} -z {} -n {} -v {}'.format(resource_group, zone_name,
txt_name_1,
verification_id)).get_output_in_json()
self.cmd('network dns record-set cname create -g {} -z {} -n {}'.format(resource_group, zone_name,
subdomain_1)).get_output_in_json()
self.cmd('network dns record-set cname set-record -g {} -z {} -n {} -c {}'.format(resource_group, zone_name,
subdomain_1,
fqdn)).get_output_in_json()
# add hostname without binding, it is a Private key certificates
self.cmd('containerapp hostname add -g {} -n {} --hostname {}'.format(resource_group, ca_name, hostname_1),
checks={
JMESPathCheck('length(@)', 1),
JMESPathCheck('[0].name', hostname_1),
JMESPathCheck('[0].bindingType', "Disabled"),
})
self.cmd('containerapp hostname add -g {} -n {} --hostname {}'.format(resource_group, ca_name, hostname_1),
expect_failure=True)
self.cmd('containerapp env certificate list -g {} -n {} -c {} -p'.format(resource_group, env_name, cert_name),
checks=[
JMESPathCheck('length(@)', 1),
])
# create a managed certificate
self.cmd('containerapp env certificate create -n {} -g {} --hostname {} -v cname -c {}'.format(env_name,
resource_group,
hostname_1,
cert_name),
checks=[
JMESPathCheck(
'type', "Microsoft.App/managedEnvironments/managedCertificates"),
JMESPathCheck('name', cert_name),
JMESPathCheck('properties.subjectName', hostname_1),
]).get_output_in_json()
self.cmd('containerapp env certificate list -g {} -n {} -m'.format(resource_group, env_name), checks=[
JMESPathCheck('length(@)', 1),
])
self.cmd('containerapp env certificate list -g {} -n {} -c {}'.format(resource_group, env_name, cert_name),
checks=[
JMESPathCheck('length(@)', 2),
])
self.cmd(
'containerapp env certificate delete -n {} -g {} --certificate {} --yes'.format(env_name, resource_group,
cert_name),
expect_failure=True)
self.cmd(
'containerapp env certificate delete -n {} -g {} --thumbprint {} --yes'.format(env_name, resource_group,
cert_thumbprint))
self.cmd(
'containerapp env certificate delete -n {} -g {} --certificate {} --yes'.format(env_name, resource_group,
cert_name))
self.cmd('containerapp env certificate list -g {} -n {}'.format(resource_group, env_name), checks=[
JMESPathCheck('length(@)', 0),
])
self.cmd('containerapp hostname bind -g {} -n {} --hostname {} --environment {} -v cname'.format(resource_group,
ca_name,
hostname_1,
env_name))
certs = self.cmd('containerapp env certificate list -g {} -n {}'.format(resource_group, env_name), checks=[
JMESPathCheck('length(@)', 1),
]).get_output_in_json()
self.cmd(
'containerapp env certificate delete -n {} -g {} --certificate {} --yes'.format(env_name, resource_group,
certs[0]["name"]),
expect_failure=True)
self.cmd(
'containerapp hostname delete -g {} -n {} --hostname {} --yes'.format(resource_group, ca_name, hostname_1))
self.cmd(
'containerapp env certificate delete -n {} -g {} --certificate {} --yes'.format(env_name, resource_group,
certs[0]["name"]))
self.cmd('containerapp env certificate list -g {} -n {}'.format(resource_group, env_name), checks=[
JMESPathCheck('length(@)', 0),
])
@ResourceGroupPreparer(location="southcentralus")
def test_containerapp_env_certificate_upload_with_certificate_name(self, resource_group):
location = TEST_LOCATION
if format_location(location) == format_location(STAGE_LOCATION):
location = "eastus"
self.cmd('configure --defaults location={}'.format(location))
env_name = self.create_random_name(
prefix='containerapp-e2e-env', length=24)
self.cmd(
'containerapp env create -g {} -n {} --logs-destination none'.format(resource_group, env_name))
self.cmd('containerapp env certificate list -g {} -n {}'.format(resource_group, env_name), checks=[
JMESPathCheck('length(@)', 0),
])
# test that non pfx or pem files are not supported
txt_file = os.path.join(TEST_DIR, 'cert.txt')
self.cmd('containerapp env certificate upload -g {} -n {} --certificate-file "{}"'.format(
resource_group, env_name, txt_file), expect_failure=True)
# test pfx file with password
pfx_file = os.path.join(TEST_DIR, 'cert.pfx')
pfx_password = 'test12'
cert_pfx_name = self.create_random_name(prefix='cert-pfx', length=24)
cert = self.cmd(
'containerapp env certificate upload -g {} -n {} -c {} --certificate-file "{}" --password {}'.format(
resource_group, env_name, cert_pfx_name, pfx_file, pfx_password), checks=[
JMESPathCheck('properties.provisioningState', "Succeeded"),
JMESPathCheck('name', cert_pfx_name),
JMESPathCheck(
'type', "Microsoft.App/managedEnvironments/certificates"),
]).get_output_in_json()
cert_name = cert["name"]
cert_id = cert["id"]
cert_thumbprint = cert["properties"]["thumbprint"]
self.cmd('containerapp env certificate list -n {} -g {}'.format(env_name, resource_group), checks=[
JMESPathCheck('length(@)', 1),
JMESPathCheck('[0].properties.thumbprint', cert_thumbprint),
JMESPathCheck('[0].name', cert_name),
JMESPathCheck('[0].id', cert_id),
])
# upload without password will fail
self.cmd('containerapp env certificate upload -g {} -n {} --certificate-file "{}"'.format(
resource_group, env_name, pfx_file), expect_failure=True)
self.cmd(
'containerapp env certificate list -n {} -g {} --certificate {}'.format(env_name, resource_group,
cert_name), checks=[
JMESPathCheck('length(@)', 1),
JMESPathCheck('[0].name', cert_name),
JMESPathCheck('[0].id', cert_id),
JMESPathCheck('[0].properties.thumbprint', cert_thumbprint),
])
self.cmd(
'containerapp env certificate list -n {} -g {} --certificate {}'.format(env_name, resource_group,
cert_id), checks=[
JMESPathCheck('length(@)', 1),
JMESPathCheck('[0].name', cert_name),
JMESPathCheck('[0].id', cert_id),
JMESPathCheck('[0].properties.thumbprint', cert_thumbprint),
])
self.cmd(
'containerapp env certificate list -n {} -g {} --thumbprint {}'.format(env_name, resource_group,
cert_thumbprint), checks=[
JMESPathCheck('length(@)', 1),
JMESPathCheck('[0].name', cert_name),
JMESPathCheck('[0].id', cert_id),
JMESPathCheck('[0].properties.thumbprint', cert_thumbprint),
])
self.cmd('containerapp env certificate delete -n {} -g {} --thumbprint {} --certificate {} --yes'.format(
env_name, resource_group, cert_thumbprint, cert_name), expect_failure=False)
self.cmd('containerapp env certificate list -g {} -n {}'.format(resource_group, env_name), checks=[
JMESPathCheck('length(@)', 0),
])
self.cmd('containerapp env delete -g {} -n {} --yes'.format(resource_group,
env_name), expect_failure=False)
|
class ContainerappEnvLocationNotInStageScenarioTest(ScenarioTest):
def __init__(self, *arg, **kwargs):
pass
@AllowLargeResponse(8192)
@ResourceGroupPreparer(location="australiaeast")
def test_containerapp_env_logs_e2e(self, resource_group):
pass
@AllowLargeResponse(8192)
@live_only()
@ResourceGroupPreparer(location="westeurope")
def test_containerapp_env_custom_domains(self, resource_group):
pass
@AllowLargeResponse(8192)
@live_only()
@ResourceGroupPreparer(location="westeurope")
def test_containerapp_env_update_custom_domains(self, resource_group):
pass
@AllowLargeResponse(8192)
@ResourceGroupPreparer(location="northeurope")
@live_only()
def test_containerapp_env_internal_only_e2e(self, resource_group):
pass
@AllowLargeResponse(8192)
@live_only()
@ResourceGroupPreparer(location="northeurope")
def test_containerapp_env_certificate_e2e(self, resource_group):
pass
@ResourceGroupPreparer(location="southcentralus")
def test_containerapp_env_certificate_upload_with_certificate_name(self, resource_group):
pass
| 23 | 0 | 68 | 11 | 54 | 3 | 3 | 0.07 | 1 | 1 | 0 | 0 | 7 | 0 | 7 | 7 | 501 | 84 | 395 | 100 | 372 | 26 | 201 | 94 | 193 | 3 | 1 | 1 | 18 |
10,872 |
Azure/azure-cli-extensions
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/Azure_azure-cli-extensions/src/containerapp/azext_containerapp/tests/latest/test_containerapp_env_commands.py
|
azext_containerapp.tests.latest.test_containerapp_env_commands.ContainerappEnvIdentityTests
|
class ContainerappEnvIdentityTests(ScenarioTest):
def __init__(self, *arg, **kwargs):
super().__init__(*arg, random_config_dir=True, **kwargs)
@AllowLargeResponse(8192)
@ResourceGroupPreparer(location="northeurope")
def test_containerapp_env_identity_e2e(self, resource_group):
# MSI is not available in North Central US (Stage), if the TEST_LOCATION is "northcentralusstage", use eastus as location
location = TEST_LOCATION
if format_location(location) == format_location(STAGE_LOCATION):
location = "eastus"
self.cmd('configure --defaults location={}'.format(location))
user_identity_name1 = self.create_random_name(
prefix='env-msi1', length=24)
user_identity_name2 = self.create_random_name(
prefix='env-msi2', length=24)
user_identity_id1 = self.cmd('identity create -g {} -n {}'.format(
resource_group, user_identity_name1)).get_output_in_json()["id"]
user_identity_id2 = self.cmd('identity create -g {} -n {}'.format(
resource_group, user_identity_name2)).get_output_in_json()["id"]
env_name = self.create_random_name(
prefix='containerapp-e2e-env', length=24)
self.cmd('containerapp env create -g {} -n {} --mi-system-assigned --mi-user-assigned {} {} --logs-destination none'.format(
resource_group, env_name, user_identity_id1, user_identity_id2))
containerapp_env = self.cmd(
'containerapp env show -g {} -n {}'.format(resource_group, env_name)).get_output_in_json()
while containerapp_env["properties"]["provisioningState"].lower() == "waiting":
time.sleep(5)
containerapp_env = self.cmd(
'containerapp env show -g {} -n {}'.format(resource_group, env_name)).get_output_in_json()
self.cmd('containerapp env identity show -g {} -n {}'.format(resource_group, env_name), checks=[
JMESPathCheck('type', 'SystemAssigned, UserAssigned'),
JMESPathCheckExists(
f'userAssignedIdentities."{user_identity_id1}"'),
JMESPathCheckExists(
f'userAssignedIdentities."{user_identity_id2}"')
])
self.cmd('containerapp env identity remove --user-assigned {} -g {} -n {}'.format(user_identity_name1, resource_group, env_name), checks=[
JMESPathCheck('type', 'SystemAssigned, UserAssigned'),
JMESPathCheckExists(
f'userAssignedIdentities."{user_identity_id2}"')
])
self.cmd('containerapp env identity remove --system-assigned --user-assigned {} -g {} -n {}'.format(user_identity_name2, resource_group, env_name), checks=[
JMESPathCheck('type', 'None'),
])
self.cmd('containerapp env identity assign --system-assigned --user-assigned {} {} -g {} -n {}'.format(user_identity_name1, user_identity_name2, resource_group, env_name), checks=[
JMESPathCheck('type', 'SystemAssigned, UserAssigned'),
JMESPathCheckExists(
f'userAssignedIdentities."{user_identity_id1}"'),
JMESPathCheckExists(
f'userAssignedIdentities."{user_identity_id2}"')
])
self.cmd('containerapp env identity remove --system-assigned -g {} -n {}'.format(resource_group, env_name), checks=[
JMESPathCheck('type', 'UserAssigned'),
JMESPathCheckExists(
f'userAssignedIdentities."{user_identity_id1}"'),
JMESPathCheckExists(
f'userAssignedIdentities."{user_identity_id2}"')
])
self.cmd('containerapp env identity assign --system-assigned -g {} -n {}'.format(resource_group, env_name), checks=[
JMESPathCheck('type', 'SystemAssigned, UserAssigned'),
])
self.cmd('containerapp env identity remove --user-assigned {} {} -g {} -n {}'.format(user_identity_name1, user_identity_name2, resource_group, env_name), checks=[
JMESPathCheck('type', 'SystemAssigned'),
])
@AllowLargeResponse(8192)
@ResourceGroupPreparer(location="northeurope")
def test_containerapp_env_identity_system(self, resource_group):
# MSI is not available in North Central US (Stage), if the TEST_LOCATION is "northcentralusstage", use eastus as location
location = TEST_LOCATION
if format_location(location) == format_location(STAGE_LOCATION):
location = "eastus"
self.cmd('configure --defaults location={}'.format(location))
env_name = self.create_random_name(
prefix='containerapp-e2e-env', length=24)
self.cmd('containerapp env create -g {} -n {} --mi-system-assigned --logs-destination none'.format(resource_group, env_name))
containerapp_env = self.cmd(
'containerapp env show -g {} -n {}'.format(resource_group, env_name)).get_output_in_json()
while containerapp_env["properties"]["provisioningState"].lower() == "waiting":
time.sleep(5)
containerapp_env = self.cmd(
'containerapp env show -g {} -n {}'.format(resource_group, env_name)).get_output_in_json()
self.cmd('containerapp env identity show -g {} -n {}'.format(resource_group, env_name), checks=[
JMESPathCheck('type', 'SystemAssigned'),
])
self.cmd('containerapp env identity remove --system-assigned -g {} -n {}'.format(resource_group, env_name), checks=[
JMESPathCheck('type', 'None'),
])
self.cmd('containerapp env identity assign --system-assigned -g {} -n {}'.format(resource_group, env_name), checks=[
JMESPathCheck('type', 'SystemAssigned'),
])
self.cmd('containerapp env identity remove --system-assigned -g {} -n {}'.format(resource_group, env_name), checks=[
JMESPathCheck('type', 'None'),
])
self.cmd('containerapp env show -n {} -g {}'.format(env_name, resource_group), checks=[
JMESPathCheck('name', env_name),
])
@AllowLargeResponse(8192)
@ResourceGroupPreparer(location="northeurope")
def test_containerapp_env_identity_user(self, resource_group):
# MSI is not available in North Central US (Stage), if the TEST_LOCATION is "northcentralusstage", use eastus as location
location = TEST_LOCATION
if format_location(location) == format_location(STAGE_LOCATION):
location = "eastus"
self.cmd('configure --defaults location={}'.format(location))
user_identity_name1 = self.create_random_name(
prefix='env-msi1', length=24)
user_identity_name2 = self.create_random_name(
prefix='env-msi2', length=24)
user_identity_id1 = self.cmd('identity create -g {} -n {}'.format(
resource_group, user_identity_name1)).get_output_in_json()["id"]
user_identity_id2 = self.cmd('identity create -g {} -n {}'.format(
resource_group, user_identity_name2)).get_output_in_json()["id"]
env_name = self.create_random_name(
prefix='containerapp-e2e-env', length=24)
self.cmd('containerapp env create -g {} -n {} --mi-user-assigned {} {} --logs-destination none'.format(
resource_group, env_name, user_identity_id1, user_identity_id2))
containerapp_env = self.cmd(
'containerapp env show -g {} -n {}'.format(resource_group, env_name)).get_output_in_json()
while containerapp_env["properties"]["provisioningState"].lower() == "waiting":
time.sleep(5)
containerapp_env = self.cmd(
'containerapp env show -g {} -n {}'.format(resource_group, env_name)).get_output_in_json()
self.cmd('containerapp env identity show -g {} -n {}'.format(resource_group, env_name), checks=[
JMESPathCheck('type', 'UserAssigned'),
JMESPathCheckExists(
f'userAssignedIdentities."{user_identity_id1}"'),
JMESPathCheckExists(
f'userAssignedIdentities."{user_identity_id2}"')
])
self.cmd('containerapp env identity assign --system-assigned -g {} -n {}'.format(resource_group, env_name), checks=[
JMESPathCheck('type', 'SystemAssigned, UserAssigned'),
])
self.cmd('containerapp env identity remove --user-assigned {} -g {} -n {}'.format(user_identity_name1, resource_group, env_name), checks=[
JMESPathCheck('type', 'SystemAssigned, UserAssigned'),
JMESPathCheckExists(
f'userAssignedIdentities."{user_identity_id2}"')
])
self.cmd('containerapp env identity remove --user-assigned {} -g {} -n {}'.format(user_identity_name2, resource_group, env_name), checks=[
JMESPathCheck('type', 'SystemAssigned'),
])
self.cmd('containerapp env identity remove --system-assigned -g {} -n {}'.format(resource_group, env_name), checks=[
JMESPathCheck('type', 'None'),
])
self.cmd('containerapp env identity show -g {} -n {}'.format(resource_group, env_name), checks=[
JMESPathCheck('type', 'None'),
])
self.cmd('containerapp env identity assign --user-assigned {} -g {} -n {}'.format(user_identity_name1, resource_group, env_name), checks=[
JMESPathCheck('type', 'UserAssigned'),
JMESPathCheckExists(
f'userAssignedIdentities."{user_identity_id1}"')
])
self.cmd('containerapp env identity show -g {} -n {}'.format(resource_group, env_name), checks=[
JMESPathCheck('type', 'UserAssigned'),
JMESPathCheckExists(
f'userAssignedIdentities."{user_identity_id1}"')
])
@AllowLargeResponse(8192)
@ResourceGroupPreparer(location="westeurope")
def test_containerapp_env_msi_custom_domains(self, resource_group):
location = TEST_LOCATION
if format_location(location) == format_location(STAGE_LOCATION):
location = "eastus"
self.cmd('configure --defaults location={}'.format(location))
env_name = self.create_random_name(
prefix='containerapp-env', length=24)
verification_id = self.cmd(
'az containerapp show-custom-domain-verification-id').output
key_vault_name = self.create_random_name(prefix='capp-kv-', length=24)
cert_name = self.create_random_name(prefix='akv-cert-', length=24)
signInUser = self.cmd("ad signed-in-user show").get_output_in_json()
# create azure keyvault and assign role
kv = self.cmd(
f"keyvault create -g {resource_group} -n {key_vault_name}").get_output_in_json()
roleAssignmentName1 = self.create_guid()
self.cmd(
f'role assignment create --role "Key Vault Administrator" --assignee {signInUser["id"]} --scope {kv["id"]} --name {roleAssignmentName1}')
# create an App service domain and update its txt records
contacts = os.path.join(TEST_DIR, 'domain-contact.json')
zone_name = "{}.com".format(env_name)
subdomain_1 = "devtest"
txt_name_1 = "asuid.{}".format(subdomain_1)
hostname_1 = "{}.{}".format(subdomain_1, zone_name)
self.cmd("appservice domain create -g {} --hostname {} --contact-info=@'{}' --accept-terms".format(
resource_group, zone_name, contacts)).get_output_in_json()
self.cmd('network dns record-set txt add-record -g {} -z {} -n {} -v {}'.format(
resource_group, zone_name, txt_name_1, verification_id)).get_output_in_json()
defaultPolicy = self.cmd(
"keyvault certificate get-default-policy").get_output_in_json()
defaultPolicy["x509CertificateProperties"]["subject"] = f"CN=*.{hostname_1}"
defaultPolicy["secretProperties"]["contentType"] = "application/x-pem-file"
temp = tempfile.NamedTemporaryFile(
prefix='capp_', suffix='_tmp', mode="w+", delete=False)
temp.write(json.dumps(defaultPolicy, default=lambda o: dict(
(key, value) for key, value in o.__dict__.items() if value), allow_nan=False))
temp.close()
time.sleep(5)
# create a self assigned certificate in the keyvault
cert = self.cmd('keyvault certificate create --vault-name {} -n {} -p @"{}"'.format(
key_vault_name, cert_name, temp.name)).get_output_in_json()
akv_secret_url = cert["target"].replace("certificates", "secrets")
user_identity_name1 = self.create_random_name(
prefix='env-msi1', length=24)
identity_json = self.cmd('identity create -g {} -n {}'.format(
resource_group, user_identity_name1)).get_output_in_json()
user_identity_id1 = identity_json["id"]
principal_id1 = identity_json["principalId"]
# assign secret permissions to the user assigned identity
time.sleep(10)
roleAssignmentName2 = self.create_guid()
self.cmd(
f'role assignment create --role "Key Vault Secrets User" --assignee-object-id {principal_id1} --assignee-principal-type ServicePrincipal --scope {kv["id"]} --name {roleAssignmentName2}')
# create an environment with custom domain and user assigned identity
self.cmd('containerapp env create -g {} -n {} --mi-user-assigned {} --logs-destination none --dns-suffix {} --certificate-identity {} --certificate-akv-url {}'.format(
resource_group, env_name, user_identity_id1, hostname_1, user_identity_id1, akv_secret_url))
containerapp_env = self.cmd(
'containerapp env show -g {} -n {}'.format(resource_group, env_name)).get_output_in_json()
while containerapp_env["properties"]["provisioningState"].lower() == "waiting":
time.sleep(5)
containerapp_env = self.cmd(
'containerapp env show -g {} -n {}'.format(resource_group, env_name)).get_output_in_json()
self.cmd(f'containerapp env show -n {env_name} -g {resource_group}', checks=[
JMESPathCheck('name', env_name),
JMESPathCheck(
'properties.customDomainConfiguration.dnsSuffix', hostname_1),
JMESPathCheck(
'properties.customDomainConfiguration.certificateKeyVaultProperties.identity', user_identity_id1),
JMESPathCheck(
'properties.customDomainConfiguration.certificateKeyVaultProperties.keyVaultUrl', akv_secret_url),
])
# update env with custom domain using file and password
tmpFile = os.path.join(tempfile.gettempdir(),
"{}.pem".format(env_name))
self.cmd(
f'keyvault secret download --vault-name {key_vault_name} -n {cert_name} -f "{tmpFile}"')
self.cmd('containerapp env update -g {} -n {} --certificate-file "{}"'.format(
resource_group, env_name, tmpFile))
self.cmd(f'containerapp env show -n {env_name} -g {resource_group}', checks=[
JMESPathCheck('name', env_name),
JMESPathCheck(
'properties.customDomainConfiguration.dnsSuffix', hostname_1),
JMESPathCheck(
'properties.customDomainConfiguration.certificateKeyVaultProperties', None),
])
# update env with custom domain using msi
self.cmd('containerapp env update -g {} -n {} --certificate-identity {} --certificate-akv-url {}'.format(
resource_group, env_name, user_identity_id1, akv_secret_url))
self.cmd(f'containerapp env show -n {env_name} -g {resource_group}', checks=[
JMESPathCheck('name', env_name),
JMESPathCheck(
'properties.customDomainConfiguration.dnsSuffix', hostname_1),
JMESPathCheck(
'properties.customDomainConfiguration.certificateKeyVaultProperties.identity', user_identity_id1),
JMESPathCheck(
'properties.customDomainConfiguration.certificateKeyVaultProperties.keyVaultUrl', akv_secret_url),
])
# remove temp file
os.remove(temp.name)
os.remove(tmpFile)
@AllowLargeResponse(8192)
@ResourceGroupPreparer(location="westeurope")
def test_containerapp_env_msi_certificate(self, resource_group):
location = TEST_LOCATION
if format_location(location) == format_location(STAGE_LOCATION):
location = "eastus"
self.cmd('configure --defaults location={}'.format(location))
env_name = self.create_random_name(prefix='capp-env', length=24)
key_vault_name = self.create_random_name(prefix='capp-kv-', length=24)
cert_name = self.create_random_name(prefix='akv-cert-', length=24)
signInUser = self.cmd("ad signed-in-user show").get_output_in_json()
# create azure keyvault and assign role
kv = self.cmd(
f"keyvault create -g {resource_group} -n {key_vault_name}").get_output_in_json()
roleAssignmentName1 = self.create_guid()
self.cmd(
f'role assignment create --role "Key Vault Administrator" --assignee {signInUser["id"]} --scope {kv["id"]} --name {roleAssignmentName1}')
defaultPolicy = self.cmd(
"keyvault certificate get-default-policy").get_output_in_json()
defaultPolicy["x509CertificateProperties"]["subject"] = f"CN=*.contoso.com"
defaultPolicy["secretProperties"]["contentType"] = "application/x-pem-file"
temp = tempfile.NamedTemporaryFile(
prefix='capp_', suffix='_tmp', mode="w+", delete=False)
temp.write(json.dumps(defaultPolicy, default=lambda o: dict(
(key, value) for key, value in o.__dict__.items() if value), allow_nan=False))
temp.close()
time.sleep(5)
# create a self assigned certificate in the keyvault
cert = self.cmd('keyvault certificate create --vault-name {} -n {} -p @"{}"'.format(
key_vault_name, cert_name, temp.name)).get_output_in_json()
akv_secret_url = cert["target"].replace("certificates", "secrets")
# create an environment with custom domain and user assigned identity
self.cmd('containerapp env create -g {} -n {} --mi-system-assigned --logs-destination none'.format(
resource_group, env_name))
containerapp_env = self.cmd(
'containerapp env show -g {} -n {}'.format(resource_group, env_name)).get_output_in_json()
while containerapp_env["properties"]["provisioningState"].lower() == "waiting":
time.sleep(5)
containerapp_env = self.cmd(
'containerapp env show -g {} -n {}'.format(resource_group, env_name)).get_output_in_json()
# assign secret permissions to the system assigned identity
principal_id = containerapp_env["identity"]["principalId"]
roleAssignmentName2 = self.create_guid()
self.cmd(
f'role assignment create --role "Key Vault Secrets User" --assignee {principal_id} --scope {kv["id"]} --name {roleAssignmentName2}')
containerapp_cert_name = self.create_random_name(
prefix='containerapp-cert', length=24)
cert = self.cmd(f"containerapp env certificate upload -g {resource_group} -n {env_name} -c {containerapp_cert_name} --akv-url {akv_secret_url}", checks=[
JMESPathCheck(
'type', "Microsoft.App/managedEnvironments/certificates"),
]).get_output_in_json()
containerapp_cert_id = cert["id"]
containerapp_cert_thumbprint = cert["properties"]["thumbprint"]
containerapp_cert_location = cert["location"]
self.cmd(
'containerapp env certificate list -n {} -g {} -l "{}"'.format(
env_name, resource_group, containerapp_cert_location),
checks=[
JMESPathCheck('length(@)', 1),
JMESPathCheck(
'[0].properties.certificateKeyVaultProperties.keyVaultUrl', akv_secret_url),
JMESPathCheck(
'[0].properties.certificateKeyVaultProperties.identity', "system"),
JMESPathCheck('[0].properties.thumbprint',
containerapp_cert_thumbprint),
JMESPathCheck('[0].name', containerapp_cert_name),
JMESPathCheck('[0].id', containerapp_cert_id),
])
tmpFile = os.path.join(tempfile.gettempdir(),
"{}.pem".format(env_name))
self.cmd(
f'keyvault secret download --vault-name {key_vault_name} -n {cert_name} -f "{tmpFile}"')
containerapp_cert_name = self.create_random_name(
prefix='containerapp-cert', length=24)
self.cmd('containerapp env certificate upload -g {} -n {} -c {} --certificate-file "{}"'.format(
resource_group, env_name, containerapp_cert_name, tmpFile), checks=[
JMESPathCheck(
'type', "Microsoft.App/managedEnvironments/certificates"),
JMESPathCheck(
'properties.certificateKeyVaultProperties', None),
])
containerapp_cert_name = self.create_random_name(
prefix='containerapp-cert', length=24)
self.cmd(f"containerapp env certificate upload -g {resource_group} -n {env_name} -c {containerapp_cert_name} --akv-url {akv_secret_url}", checks=[
JMESPathCheck(
'type', "Microsoft.App/managedEnvironments/certificates"),
JMESPathCheck(
'properties.certificateKeyVaultProperties.keyVaultUrl', akv_secret_url),
JMESPathCheck(
'properties.certificateKeyVaultProperties.identity', "system"),
])
# remove temp file
os.remove(temp.name)
os.remove(tmpFile)
@AllowLargeResponse(8192)
@live_only()
@ResourceGroupPreparer(location="westeurope")
def test_containerapp_env_msi_certificate_random_name(self, resource_group):
location = TEST_LOCATION
if format_location(location) == format_location(STAGE_LOCATION):
location = "eastus"
self.cmd('configure --defaults location={}'.format(location))
env_name = self.create_random_name(prefix='capp-env', length=24)
key_vault_name = self.create_random_name(prefix='capp-kv-', length=24)
cert_name = self.create_random_name(prefix='akv-cert-', length=24)
signInUser = self.cmd("ad signed-in-user show").get_output_in_json()
# create azure keyvault and assign role
kv = self.cmd(
f"keyvault create -g {resource_group} -n {key_vault_name}").get_output_in_json()
roleAssignmentName1 = self.create_guid()
self.cmd(
f'role assignment create --role "Key Vault Administrator" --assignee {signInUser["id"]} --scope {kv["id"]} --name {roleAssignmentName1}')
defaultPolicy = self.cmd(
"keyvault certificate get-default-policy").get_output_in_json()
defaultPolicy["x509CertificateProperties"]["subject"] = f"CN=*.contoso.com"
defaultPolicy["secretProperties"]["contentType"] = "application/x-pem-file"
temp = tempfile.NamedTemporaryFile(
prefix='capp_', suffix='_tmp', mode="w+", delete=False)
temp.write(json.dumps(defaultPolicy, default=lambda o: dict(
(key, value) for key, value in o.__dict__.items() if value), allow_nan=False))
temp.close()
time.sleep(5)
# create a self assigned certificate in the keyvault
cert = self.cmd('keyvault certificate create --vault-name {} -n {} -p @"{}"'.format(
key_vault_name, cert_name, temp.name)).get_output_in_json()
akv_secret_url = cert["target"].replace("certificates", "secrets")
user_identity_name = self.create_random_name(
prefix='env-msi', length=24)
identity_json = self.cmd('identity create -g {} -n {}'.format(
resource_group, user_identity_name)).get_output_in_json()
user_identity_id = identity_json["id"]
principal_id = identity_json["principalId"]
# assign secret permissions to the user assigned identity
time.sleep(10)
roleAssignmentName2 = self.create_guid()
self.cmd(
f'role assignment create --role "Key Vault Secrets User" --assignee-object-id {principal_id} --assignee-principal-type ServicePrincipal --scope {kv["id"]} --name {roleAssignmentName2}')
# create an environment with custom domain and user assigned identity
self.cmd('containerapp env create -g {} -n {} --mi-user-assigned {} --logs-destination none'.format(
resource_group, env_name, user_identity_id))
containerapp_env = self.cmd(
'containerapp env show -g {} -n {}'.format(resource_group, env_name)).get_output_in_json()
while containerapp_env["properties"]["provisioningState"].lower() == "waiting":
time.sleep(5)
containerapp_env = self.cmd(
'containerapp env show -g {} -n {}'.format(resource_group, env_name)).get_output_in_json()
cert = self.cmd(f"containerapp env certificate upload -g {resource_group} -n {env_name} --akv-url {akv_secret_url} --identity {user_identity_id}", checks=[
JMESPathCheck(
'type', "Microsoft.App/managedEnvironments/certificates"),
]).get_output_in_json()
containerapp_cert_name = cert["name"]
containerapp_cert_id = cert["id"]
containerapp_cert_thumbprint = cert["properties"]["thumbprint"]
containerapp_cert_location = cert["location"]
self.cmd(
'containerapp env certificate list -n {} -g {} -l "{}"'.format(
env_name, resource_group, containerapp_cert_location),
checks=[
JMESPathCheck('length(@)', 1),
JMESPathCheck(
'[0].properties.certificateKeyVaultProperties.keyVaultUrl', akv_secret_url),
JMESPathCheck(
'[0].properties.certificateKeyVaultProperties.identity', user_identity_id),
JMESPathCheck('[0].properties.thumbprint',
containerapp_cert_thumbprint),
JMESPathCheck('[0].name', containerapp_cert_name),
JMESPathCheck('[0].id', containerapp_cert_id),
])
|
class ContainerappEnvIdentityTests(ScenarioTest):
def __init__(self, *arg, **kwargs):
pass
@AllowLargeResponse(8192)
@ResourceGroupPreparer(location="northeurope")
def test_containerapp_env_identity_e2e(self, resource_group):
pass
@AllowLargeResponse(8192)
@ResourceGroupPreparer(location="northeurope")
def test_containerapp_env_identity_system(self, resource_group):
pass
@AllowLargeResponse(8192)
@ResourceGroupPreparer(location="northeurope")
def test_containerapp_env_identity_user(self, resource_group):
pass
@AllowLargeResponse(8192)
@ResourceGroupPreparer(location="westeurope")
def test_containerapp_env_msi_custom_domains(self, resource_group):
pass
@AllowLargeResponse(8192)
@ResourceGroupPreparer(location="westeurope")
def test_containerapp_env_msi_certificate(self, resource_group):
pass
@AllowLargeResponse(8192)
@live_only()
@ResourceGroupPreparer(location="westeurope")
def test_containerapp_env_msi_certificate_random_name(self, resource_group):
pass
| 21 | 0 | 54 | 9 | 42 | 3 | 3 | 0.06 | 1 | 2 | 0 | 0 | 7 | 0 | 7 | 7 | 396 | 66 | 310 | 95 | 289 | 20 | 196 | 89 | 188 | 3 | 1 | 1 | 19 |
10,873 |
Azure/azure-cli-extensions
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/Azure_azure-cli-extensions/src/containerapp/azext_containerapp/tests/latest/test_containerapp_dotnet_component.py
|
azext_containerapp.tests.latest.test_containerapp_dotnet_component.ContainerappDotNetComponentTests
|
class ContainerappDotNetComponentTests(ScenarioTest):
@AllowLargeResponse(8192)
@ResourceGroupPreparer(location="westus2")
def test_containerapp_dotnet_component(self, resource_group):
env_name = self.create_random_name(prefix='aca-dotnet-env', length=24)
dotnet_component_name = self.create_random_name(
prefix='dotnet-aca', length=24)
location = "westus2"
env_create_cmd = f'containerapp env create -g {resource_group} -n {env_name} --location {location} --logs-destination none --enable-workload-profiles'
self.cmd(env_create_cmd)
# List DotNet Components
dotnet_component_list = self.cmd(
"containerapp env dotnet-component list -g {} --environment {}".format(resource_group, env_name)).get_output_in_json()
self.assertTrue(len(dotnet_component_list) == 0)
# Create DotNet Component
self.cmd('containerapp env dotnet-component create -g {} -n {} --environment {}'.format(
resource_group, dotnet_component_name, env_name))
# Show DotNet Component
self.cmd('containerapp env dotnet-component show -g {} -n {} --environment {}'.format(resource_group, dotnet_component_name, env_name), checks=[
JMESPathCheck('name', dotnet_component_name),
JMESPathCheck('properties.componentType', "AspireDashboard"),
JMESPathCheck('properties.provisioningState', "Succeeded")
])
# List DotNet Components
dotnet_component_list = self.cmd(
"containerapp env dotnet-component list -g {} --environment {}".format(resource_group, env_name)).get_output_in_json()
self.assertTrue(len(dotnet_component_list) == 1)
# Delete DotNet Component
self.cmd('containerapp env dotnet-component delete -g {} -n {} --environment {} --yes'.format(
resource_group, dotnet_component_name, env_name), expect_failure=False)
# List DotNet Components
dotnet_component_list = self.cmd(
"containerapp env dotnet-component list -g {} --environment {}".format(resource_group, env_name)).get_output_in_json()
self.assertTrue(len(dotnet_component_list) == 0)
@AllowLargeResponse(8192)
@ResourceGroupPreparer(location="westus2")
def test_containerapp_dotnet_component_create_with_unsupported_component_type(self, resource_group):
env_name = self.create_random_name(prefix='aca-dotnet-env', length=24)
dotnet_component_name = self.create_random_name(
prefix='dotnet-aca', length=24)
location = "westus2"
env_create_cmd = f'containerapp env create -g {resource_group} -n {env_name} --location {location} --logs-destination none --enable-workload-profiles'
self.cmd(env_create_cmd)
# List DotNet Components
dotnet_component_list = self.cmd(
"containerapp env dotnet-component list -g {} --environment {}".format(resource_group, env_name)).get_output_in_json()
self.assertTrue(len(dotnet_component_list) == 0)
# Creating DotNet Component with unsupported component type should fail
with self.assertRaises(SystemExit) as exec_info:
self.cmd('containerapp env dotnet-component create -g {} -n {} --environment {} --type {}'.format(
resource_group, dotnet_component_name, env_name, "test-component-type"), expect_failure=True)
self.assertEqual(exec_info.exception.code, 2)
# List DotNet Components
dotnet_component_list = self.cmd(
"containerapp env dotnet-component list -g {} --environment {}".format(resource_group, env_name)).get_output_in_json()
self.assertTrue(len(dotnet_component_list) == 0)
@AllowLargeResponse(8192)
@ResourceGroupPreparer(location="westus2")
def test_containerapp_dotnet_component_with_component_type(self, resource_group):
env_name = self.create_random_name(prefix='aca-dotnet-env', length=24)
dotnet_component_name = self.create_random_name(
prefix='dotnet-aca', length=24)
location = "westus2"
env_create_cmd = f'containerapp env create -g {resource_group} -n {env_name} --location {location} --logs-destination none --enable-workload-profiles'
self.cmd(env_create_cmd)
# List DotNet Components
dotnet_component_list = self.cmd(
"containerapp env dotnet-component list -g {} --environment {}".format(resource_group, env_name)).get_output_in_json()
self.assertTrue(len(dotnet_component_list) == 0)
# Create DotNet Component
self.cmd('containerapp env dotnet-component create -g {} -n {} --environment {} --type {}'.format(
resource_group, dotnet_component_name, env_name, "AspireDashboard"))
# Show DotNet Component
self.cmd('containerapp env dotnet-component show -g {} -n {} --environment {}'.format(resource_group, dotnet_component_name, env_name), checks=[
JMESPathCheck('name', dotnet_component_name),
JMESPathCheck('properties.componentType', "AspireDashboard"),
JMESPathCheck('properties.provisioningState', "Succeeded")
])
# List DotNet Components
dotnet_component_list = self.cmd(
"containerapp env dotnet-component list -g {} --environment {}".format(resource_group, env_name)).get_output_in_json()
self.assertTrue(len(dotnet_component_list) == 1)
# Delete DotNet Component
self.cmd('containerapp env dotnet-component delete -g {} -n {} --environment {} --yes'.format(
resource_group, dotnet_component_name, env_name), expect_failure=False)
# List DotNet Components
dotnet_component_list = self.cmd(
"containerapp env dotnet-component list -g {} --environment {}".format(resource_group, env_name)).get_output_in_json()
self.assertTrue(len(dotnet_component_list) == 0)
|
class ContainerappDotNetComponentTests(ScenarioTest):
@AllowLargeResponse(8192)
@ResourceGroupPreparer(location="westus2")
def test_containerapp_dotnet_component(self, resource_group):
pass
@AllowLargeResponse(8192)
@ResourceGroupPreparer(location="westus2")
def test_containerapp_dotnet_component_create_with_unsupported_component_type(self, resource_group):
pass
@AllowLargeResponse(8192)
@ResourceGroupPreparer(location="westus2")
def test_containerapp_dotnet_component_with_component_type(self, resource_group):
pass
| 10 | 0 | 28 | 6 | 17 | 5 | 1 | 0.26 | 1 | 1 | 0 | 0 | 3 | 0 | 3 | 3 | 93 | 20 | 58 | 23 | 48 | 15 | 44 | 19 | 40 | 1 | 1 | 1 | 3 |
10,874 |
Azure/azure-cli-extensions
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/Azure_azure-cli-extensions/src/containerapp/azext_containerapp/tests/latest/test_containerapp_dapr_resiliency.py
|
azext_containerapp.tests.latest.test_containerapp_dapr_resiliency.DaprComponentResiliencyTests
|
class DaprComponentResiliencyTests(ScenarioTest):
@AllowLargeResponse(8192)
@ResourceGroupPreparer(location="eastus2")
def test_dapr_component_resiliency(self, resource_group):
self.cmd('configure --defaults location={}'.format(TEST_LOCATION))
env_name = self.create_random_name(
prefix='containerapp-env', length=24)
dapr_comp_name = self.create_random_name(prefix='daprcomp', length=24)
resil_name = self.create_random_name(prefix='resil', length=24)
bad_rg = "bad-rg"
bad_comp = "bad-comp"
bad_env = "bad-env"
resil_policy_count = 1
create_containerapp_env(self, env_name, resource_group)
file_ref, dapr_file = tempfile.mkstemp(suffix=".yml")
dapr_yaml = """
name: statestore
componentType: state.azure.blobstorage
version: v1
metadata:
- name: accountName
secretRef: storage-account-name
secrets:
- name: storage-account-name
value: storage-account-name
"""
daprloaded = yaml.safe_load(dapr_yaml)
with open(dapr_file, 'w') as outfile:
yaml.dump(daprloaded, outfile, default_flow_style=False)
self.cmd('containerapp env dapr-component set -n {} -g {} --dapr-component-name {} --yaml {}'.format(env_name, resource_group, dapr_comp_name, dapr_file.replace(os.sep, os.sep + os.sep)), checks=[
JMESPathCheck('name', dapr_comp_name),
])
os.close(file_ref)
# Incorrect resource group (create)
self.cmd('containerapp env dapr-component resiliency create -n {} --dapr-component-name {} --environment {} -g {} --in-timeout 15 --in-http-retries 5'.format(
resil_name, dapr_comp_name, env_name, bad_rg), expect_failure=True)
# Incorrect dapr component name (create)
self.cmd('containerapp env dapr-component resiliency create -n {} --dapr-component-name {} --environment {} -g {} --in-timeout 15 --in-http-retries 5'.format(
resil_name, bad_comp, env_name, resource_group), expect_failure=True)
# Incorrect environment name (create)
self.cmd('containerapp env dapr-component resiliency create -n {} --dapr-component-name {} --environment {} -g {} --in-timeout 15 --in-http-retries 5'.format(
resil_name, dapr_comp_name, bad_env, resource_group), expect_failure=True)
# Create dapr component resiliency using flags with missing conditional required flags
self.cmd('containerapp env dapr-component resiliency create -n {} --dapr-component-name {} --environment {} -g {} --in-timeout 15 --in-http-retries 5 --in-cb-timeout 1'.format(
resil_name, dapr_comp_name, env_name, resource_group), expect_failure=True)
# Create dapr component resiliency using flags with missing conditional required flags
self.cmd('containerapp env dapr-component resiliency create -n {} --dapr-component-name {} --environment {} -g {} --in-timeout 15 --in-http-retries 5 --out-cb-interval 1'.format(
resil_name, dapr_comp_name, env_name, resource_group), expect_failure=True)
# Create dapr component resiliency using flags
self.cmd('containerapp env dapr-component resiliency create -n {} --dapr-component-name {} --environment {} -g {} --in-timeout 15 --in-http-retries 5 --in-cb-timeout 5 --in-cb-sequential-err 3'.format(resil_name, dapr_comp_name, env_name, resource_group), checks=[
JMESPathCheck(
"properties.inboundPolicy.httpRetryPolicy.maxRetries", "5"),
JMESPathCheck(
"properties.inboundPolicy.httpRetryPolicy.retryBackOff.initialDelayInMilliseconds", "1000"),
JMESPathCheck(
"properties.inboundPolicy.httpRetryPolicy.retryBackOff.maxIntervalInMilliseconds", "10000"),
JMESPathCheck(
"properties.inboundPolicy.timeoutPolicy.responseTimeoutInSeconds", "15"),
JMESPathCheck(
"properties.inboundPolicy.circuitBreakerPolicy.consecutiveErrors", "3"),
JMESPathCheck(
"properties.inboundPolicy.circuitBreakerPolicy.timeoutInSeconds", "5"),
])
# Show dapr component resiliency
self.cmd('containerapp env dapr-component resiliency show -n {} --dapr-component-name {} --environment {} -g {}'.format(resil_name, dapr_comp_name, env_name, resource_group), checks=[
JMESPathCheck(
"properties.inboundPolicy.httpRetryPolicy.maxRetries", "5"),
JMESPathCheck(
"properties.inboundPolicy.httpRetryPolicy.retryBackOff.initialDelayInMilliseconds", "1000"),
JMESPathCheck(
"properties.inboundPolicy.httpRetryPolicy.retryBackOff.maxIntervalInMilliseconds", "10000"),
JMESPathCheck(
"properties.inboundPolicy.timeoutPolicy.responseTimeoutInSeconds", "15"),
JMESPathCheck(
"properties.inboundPolicy.circuitBreakerPolicy.consecutiveErrors", "3"),
JMESPathCheck(
"properties.inboundPolicy.circuitBreakerPolicy.timeoutInSeconds", "5"),
])
# Update dapr component resiliency using flags
self.cmd('containerapp env dapr-component resiliency update -n {} --dapr-component-name {} --environment {} -g {} --out-timeout 45'.format(
resil_name, dapr_comp_name, env_name, resource_group))
self.cmd('containerapp env dapr-component resiliency show -n {} --dapr-component-name {} --environment {} -g {}'.format(resil_name, dapr_comp_name, env_name, resource_group), checks=[
JMESPathCheck(
"properties.inboundPolicy.httpRetryPolicy.maxRetries", "5"),
JMESPathCheck(
"properties.inboundPolicy.httpRetryPolicy.retryBackOff.initialDelayInMilliseconds", "1000"),
JMESPathCheck(
"properties.inboundPolicy.httpRetryPolicy.retryBackOff.maxIntervalInMilliseconds", "10000"),
JMESPathCheck(
"properties.inboundPolicy.timeoutPolicy.responseTimeoutInSeconds", "15"),
JMESPathCheck(
"properties.outboundPolicy.timeoutPolicy.responseTimeoutInSeconds", "45"),
JMESPathCheck(
"properties.inboundPolicy.circuitBreakerPolicy.consecutiveErrors", "3"),
JMESPathCheck(
"properties.inboundPolicy.circuitBreakerPolicy.timeoutInSeconds", "5"),
])
# Incorrect resource group (update)
self.cmd('containerapp env dapr-component resiliency update -n {} --dapr-component-name {} --environment {} -g {} --out-timeout 45'.format(
resil_name, dapr_comp_name, env_name, bad_rg), expect_failure=True)
# Incorrect dapr component name (update)
self.cmd('containerapp env dapr-component resiliency update -n {} --dapr-component-name {} --environment {} -g {} --out-timeout 45'.format(
resil_name, bad_comp, env_name, resource_group), expect_failure=True)
# Incorrect environment name (update)
self.cmd('containerapp env dapr-component resiliency update -n {} --dapr-component-name {} --environment {} -g {} --out-timeout 45'.format(
resil_name, dapr_comp_name, bad_env, resource_group), expect_failure=True)
# List dapr component resiliency
self.cmd('containerapp env dapr-component resiliency list --dapr-component-name {} --environment {} -g {}'.format(dapr_comp_name, env_name, resource_group), checks=[
JMESPathCheck('length(@)', resil_policy_count),
JMESPathCheck(
"[0].properties.inboundPolicy.httpRetryPolicy.maxRetries", "5"),
JMESPathCheck(
"[0].properties.inboundPolicy.httpRetryPolicy.retryBackOff.initialDelayInMilliseconds", "1000"),
JMESPathCheck(
"[0].properties.inboundPolicy.httpRetryPolicy.retryBackOff.maxIntervalInMilliseconds", "10000"),
JMESPathCheck(
"[0].properties.inboundPolicy.timeoutPolicy.responseTimeoutInSeconds", "15"),
JMESPathCheck(
"[0].properties.outboundPolicy.timeoutPolicy.responseTimeoutInSeconds", "45"),
JMESPathCheck(
"[0].properties.inboundPolicy.circuitBreakerPolicy.consecutiveErrors", "3"),
JMESPathCheck(
"[0].properties.inboundPolicy.circuitBreakerPolicy.timeoutInSeconds", "5"),
])
# Incorrect resource group (list)
self.cmd('containerapp env dapr-component resiliency list --dapr-component-name {} --environment {} -g {}'.format(
dapr_comp_name, env_name, bad_rg), expect_failure=True)
# Incorrect dapr component name (list)
self.cmd('containerapp env dapr-component resiliency list --dapr-component-name {} --environment {} -g {}'.format(
bad_comp, env_name, resource_group), expect_failure=True)
# Incorrect environment name (list)
self.cmd('containerapp env dapr-component resiliency list --dapr-component-name {} --environment {} -g {}'.format(
dapr_comp_name, bad_env, resource_group), expect_failure=True)
# Delete dapr component resiliency
self.cmd('containerapp env dapr-component resiliency delete -n {} --dapr-component-name {} --environment {} -g {} --yes'.format(
resil_name, dapr_comp_name, env_name, resource_group), expect_failure=False)
# List dapr component resiliency after deletion
self.cmd('containerapp env dapr-component resiliency list --dapr-component-name {} --environment {} -g {}'.format(dapr_comp_name, env_name, resource_group), checks=[
JMESPathCheck('length(@)', 0),
])
# Show dapr component resiliency after deletion
self.cmd('containerapp env dapr-component resiliency show -n {} --dapr-component-name {} --environment {} -g {}'.format(
resil_name, dapr_comp_name, env_name, resource_group), expect_failure=True)
# Create dapr component resiliency using yaml
resil_yaml_text = f"""
outboundPolicy:
httpRetryPolicy:
maxRetries: 16
retryBackOff:
initialDelayInMilliseconds: 10
maxIntervalInMilliseconds: 100
timeoutPolicy:
responseTimeoutInSeconds: 17
circuitBreakerPolicy:
consecutiveErrors: 5
timeoutInSeconds: 15
intervalInSeconds: 60
inboundPolicy:
httpRetryPolicy:
maxRetries: 15
retryBackOff:
initialDelayInMilliseconds: 9
maxIntervalInMilliseconds: 99
circuitBreakerPolicy:
consecutiveErrors: 3
timeoutInSeconds: 10
"""
resil_file_name = f"{self._testMethodName}_daprcomp.yml"
write_test_file(resil_file_name, resil_yaml_text)
self.cmd(f'containerapp env dapr-component resiliency create -n {resil_name} --dapr-component-name {dapr_comp_name} -g {resource_group} --environment {env_name} --yaml {resil_file_name}', checks=[
JMESPathCheck(
"properties.outboundPolicy.httpRetryPolicy.maxRetries", "16"),
JMESPathCheck(
"properties.outboundPolicy.httpRetryPolicy.retryBackOff.initialDelayInMilliseconds", "10"),
JMESPathCheck(
"properties.outboundPolicy.httpRetryPolicy.retryBackOff.maxIntervalInMilliseconds", "100"),
JMESPathCheck(
"properties.outboundPolicy.timeoutPolicy.responseTimeoutInSeconds", "17"),
JMESPathCheck(
"properties.outboundPolicy.circuitBreakerPolicy.consecutiveErrors", "5"),
JMESPathCheck(
"properties.outboundPolicy.circuitBreakerPolicy.timeoutInSeconds", "15"),
JMESPathCheck(
"properties.outboundPolicy.circuitBreakerPolicy.intervalInSeconds", "60"),
JMESPathCheck(
"properties.inboundPolicy.httpRetryPolicy.maxRetries", "15"),
JMESPathCheck(
"properties.inboundPolicy.httpRetryPolicy.retryBackOff.initialDelayInMilliseconds", "9"),
JMESPathCheck(
"properties.inboundPolicy.httpRetryPolicy.retryBackOff.maxIntervalInMilliseconds", "99"),
JMESPathCheck(
"properties.inboundPolicy.circuitBreakerPolicy.consecutiveErrors", "3"),
JMESPathCheck(
"properties.inboundPolicy.circuitBreakerPolicy.timeoutInSeconds", "10"),
])
clean_up_test_file(resil_file_name)
# Update dapr component resiliency using yaml
resil_yaml_text = f"""
outboundPolicy:
httpRetryPolicy:
maxRetries: 25
retryBackOff:
initialDelayInMilliseconds: 25
maxIntervalInMilliseconds: 250
"""
resil_file_name = f"{self._testMethodName}_daprcomp.yml"
write_test_file(resil_file_name, resil_yaml_text)
self.cmd(f'containerapp env dapr-component resiliency update -n {resil_name} --dapr-component-name {dapr_comp_name} -g {resource_group} --environment {env_name} --yaml {resil_file_name}', checks=[
JMESPathCheck(
"properties.outboundPolicy.httpRetryPolicy.maxRetries", "25"),
JMESPathCheck(
"properties.outboundPolicy.httpRetryPolicy.retryBackOff.initialDelayInMilliseconds", "25"),
JMESPathCheck(
"properties.outboundPolicy.httpRetryPolicy.retryBackOff.maxIntervalInMilliseconds", "250"),
])
clean_up_test_file(resil_file_name)
|
class DaprComponentResiliencyTests(ScenarioTest):
@AllowLargeResponse(8192)
@ResourceGroupPreparer(location="eastus2")
def test_dapr_component_resiliency(self, resource_group):
pass
| 4 | 0 | 189 | 32 | 137 | 20 | 1 | 0.14 | 1 | 0 | 0 | 0 | 1 | 0 | 1 | 1 | 192 | 32 | 140 | 16 | 136 | 20 | 47 | 14 | 45 | 1 | 1 | 1 | 1 |
10,875 |
Azure/azure-cli-extensions
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/Azure_azure-cli-extensions/src/containerapp/azext_containerapp/tests/latest/test_containerapp_dapr_resiliency.py
|
azext_containerapp.tests.latest.test_containerapp_dapr_resiliency.ContainerappResiliencyTests
|
class ContainerappResiliencyTests(ScenarioTest):
@AllowLargeResponse(8192)
@ResourceGroupPreparer(location="eastus2")
def test_containerapp_resiliency(self, resource_group):
self.cmd('configure --defaults location={}'.format(TEST_LOCATION))
env_name = self.create_random_name(
prefix='containerapp-env', length=24)
ca_name = self.create_random_name(prefix='containerapp', length=24)
resil_name = self.create_random_name(prefix='resil', length=24)
bad_resil = "bad-resil"
bad_rg = "bad-rg"
bad_capp = "bad-capp"
resil_policy_count = 1
create_containerapp_env(self, env_name, resource_group)
self.cmd(
'containerapp create -g {} -n {} --environment {}'.format(resource_group, ca_name, env_name))
self.cmd(f'containerapp show -g {resource_group} -n {ca_name}', checks=[
JMESPathCheck("properties.provisioningState", "Succeeded")])
# Incorrect resource group (create)
self.cmd('containerapp resiliency create -g {} -n {} --container-app-name {} --cb-interval 15 --cb-sequential-errors 5 --cb-max-ejection 60'.format(
bad_rg, resil_name, ca_name), expect_failure=True)
# Incorrect capp name (create)
self.cmd('containerapp resiliency create -g {} -n {} --container-app-name {} --cb-interval 15 --cb-sequential-errors 5 --cb-max-ejection 60'.format(
resource_group, resil_name, bad_capp), expect_failure=True)
# Create app resiliency using flags
self.cmd('containerapp resiliency create -g {} -n {} --container-app-name {} --cb-interval 15 --cb-sequential-errors 5 --cb-max-ejection 60'.format(resource_group, resil_name, ca_name))
# Show app resiliency
self.cmd('containerapp resiliency show -g {} -n {} --container-app-name {}'.format(resource_group, resil_name, ca_name), checks=[
JMESPathCheck(
"properties.circuitBreakerPolicy.consecutiveErrors", "5"),
JMESPathCheck(
"properties.circuitBreakerPolicy.intervalInSeconds", "15"),
JMESPathCheck(
"properties.circuitBreakerPolicy.maxEjectionPercent", "60"),
])
# Update app resiliency using flags
self.cmd('containerapp resiliency update -g {} -n {} --container-app-name {} --timeout 45 --timeout-connect 5'.format(
resource_group, resil_name, ca_name))
# Incorrect resource group (update)
self.cmd('containerapp resiliency update -g {} -n {} --container-app-name {} --cb-interval 15 --cb-sequential-errors 5 --cb-max-ejection 60'.format(
bad_rg, resil_name, ca_name), expect_failure=True)
# Incorrect capp name (update)
self.cmd('containerapp resiliency update -g {} -n {} --container-app-name {} --cb-interval 15 --cb-sequential-errors 5 --cb-max-ejection 60'.format(
resource_group, resil_name, bad_capp), expect_failure=True)
self.cmd('containerapp resiliency show -g {} -n {} --container-app-name {}'.format(resource_group, resil_name, ca_name), checks=[
JMESPathCheck(
"properties.circuitBreakerPolicy.consecutiveErrors", "5"),
JMESPathCheck(
"properties.circuitBreakerPolicy.intervalInSeconds", "15"),
JMESPathCheck(
"properties.circuitBreakerPolicy.maxEjectionPercent", "60"),
JMESPathCheck(
"properties.timeoutPolicy.responseTimeoutInSeconds", "45"),
JMESPathCheck(
"properties.timeoutPolicy.connectionTimeoutInSeconds", "5")
])
# Incorrect resource group (show)
self.cmd('containerapp resiliency show -g {} -n {} --container-app-name {}'.format(
bad_rg, resil_name, ca_name), expect_failure=True)
# Incorrect capp name (show)
self.cmd('containerapp resiliency show -g {} -n {} --container-app-name {}'.format(
resource_group, resil_name, bad_capp), expect_failure=True)
# Incorrect resil name (show)
self.cmd('containerapp resiliency show -g {} -n {} --container-app-name {}'.format(
resource_group, bad_resil, ca_name), expect_failure=True)
# List app resiliency
self.cmd('containerapp resiliency list -g {} --container-app-name {}'.format(resource_group, ca_name), checks=[
JMESPathCheck('length(@)', resil_policy_count),
JMESPathCheck(
"[0].properties.circuitBreakerPolicy.consecutiveErrors", "5"),
JMESPathCheck(
"[0].properties.circuitBreakerPolicy.intervalInSeconds", "15"),
JMESPathCheck(
"[0].properties.circuitBreakerPolicy.maxEjectionPercent", "60"),
JMESPathCheck(
"[0].properties.timeoutPolicy.responseTimeoutInSeconds", "45"),
JMESPathCheck(
"[0].properties.timeoutPolicy.connectionTimeoutInSeconds", "5")
])
# Incorrect resource group (list)
self.cmd('containerapp resiliency list -g {} --container-app-name {}'.format(
bad_rg, ca_name), expect_failure=True)
# Incorrect capp name (list)
self.cmd('containerapp resiliency list -g {} --container-app-name {}'.format(
resource_group, bad_capp), expect_failure=True)
# Delete app resiliency
self.cmd('containerapp resiliency delete -g {} -n {} --container-app-name {} --yes'.format(
resource_group, resil_name, ca_name), expect_failure=False)
# List app resiliency after deletion
self.cmd('containerapp resiliency list -g {} --container-app-name {}'.format(resource_group, ca_name), checks=[
JMESPathCheck('length(@)', 0),
])
# Show app resiliency after deletion
self.cmd('containerapp resiliency show -g {} -n {} --container-app-name {}'.format(
resource_group, resil_name, ca_name), expect_failure=True)
# Create app resiliency using yaml
resil_yaml_text = f"""
timeoutPolicy:
responseTimeoutInSeconds: 25
connectionTimeoutInSeconds: 15
httpRetryPolicy:
maxRetries: 15
retryBackOff:
initialDelayInMilliseconds: 5000
maxIntervalInMilliseconds: 50000
matches:
headers:
- header: X-Content-Type
match:
prefixMatch: GOATS
httpStatusCodes:
- 502
- 503
errors:
- 5xx
- connect-failure
- reset
- retriable-headers
- retriable-status-codes
tcpRetryPolicy:
maxConnectAttempts: 8
circuitBreakerPolicy:
consecutiveErrors: 15
intervalInSeconds: 15
maxEjectionPercent: 60
tcpConnectionPool:
maxConnections: 700
httpConnectionPool:
http1MaxPendingRequests: 2048
http2MaxRequests: 2048
"""
resil_file_name = f"{self._testMethodName}_containerapp.yml"
write_test_file(resil_file_name, resil_yaml_text)
self.cmd(f'containerapp resiliency create -n {resil_name} --container-app {ca_name} -g {resource_group} --yaml {resil_file_name}', checks=[
# HTTP Retry Policy
JMESPathCheck(
"properties.httpRetryPolicy.matches.errors[0]", "5xx"),
JMESPathCheck(
"properties.httpRetryPolicy.matches.errors[1]", "connect-failure"),
JMESPathCheck(
"properties.httpRetryPolicy.matches.errors[2]", "reset"),
JMESPathCheck(
"properties.httpRetryPolicy.matches.errors[3]", "retriable-headers"),
JMESPathCheck(
"properties.httpRetryPolicy.matches.errors[4]", "retriable-status-codes"),
JMESPathCheck(
"properties.httpRetryPolicy.matches.headers[0].header", "X-Content-Type"),
JMESPathCheck(
"properties.httpRetryPolicy.matches.headers[0].match.prefixMatch", "GOATS"),
JMESPathCheck(
"properties.httpRetryPolicy.matches.httpStatusCodes[0]", "502"),
JMESPathCheck(
"properties.httpRetryPolicy.matches.httpStatusCodes[1]", "503"),
JMESPathCheck("properties.httpRetryPolicy.maxRetries", "15"),
JMESPathCheck(
"properties.httpRetryPolicy.retryBackOff.initialDelayInMilliseconds", "5000"),
JMESPathCheck(
"properties.httpRetryPolicy.retryBackOff.maxIntervalInMilliseconds", "50000"),
# TCP Retry Policy
JMESPathCheck("properties.tcpRetryPolicy.maxConnectAttempts", "8"),
# Circuit Breaker Policy
JMESPathCheck(
"properties.circuitBreakerPolicy.consecutiveErrors", "15"),
JMESPathCheck(
"properties.circuitBreakerPolicy.intervalInSeconds", "15"),
JMESPathCheck(
"properties.circuitBreakerPolicy.maxEjectionPercent", "60"),
# TCP Connection Pool
JMESPathCheck(
"properties.tcpConnectionPool.maxConnections", "700"),
# HTTP Connection Pool
JMESPathCheck(
"properties.httpConnectionPool.httP1MaxPendingRequests", "2048"),
JMESPathCheck(
"properties.httpConnectionPool.httP2MaxRequests", "2048"),
# Timeout Policy
JMESPathCheck(
"properties.timeoutPolicy.responseTimeoutInSeconds", "25"),
JMESPathCheck(
"properties.timeoutPolicy.connectionTimeoutInSeconds", "15")
])
clean_up_test_file(resil_file_name)
# Update resiliency using yaml
resil_yaml_text = f"""
tcpConnectionPool:
maxConnections: 100
"""
resil_file_name = f"{self._testMethodName}_containerapp.yml"
write_test_file(resil_file_name, resil_yaml_text)
self.cmd(f'containerapp resiliency update -n {resil_name} --container-app {ca_name} -g {resource_group} --yaml {resil_file_name}', checks=[
JMESPathCheck(
"properties.tcpConnectionPool.maxConnections", "100"),
])
clean_up_test_file(resil_file_name)
|
class ContainerappResiliencyTests(ScenarioTest):
@AllowLargeResponse(8192)
@ResourceGroupPreparer(location="eastus2")
def test_containerapp_resiliency(self, resource_group):
pass
| 4 | 0 | 167 | 24 | 119 | 24 | 1 | 0.2 | 1 | 0 | 0 | 0 | 1 | 0 | 1 | 1 | 170 | 24 | 122 | 12 | 118 | 24 | 40 | 11 | 38 | 1 | 1 | 0 | 1 |
10,876 |
Azure/azure-cli-extensions
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/Azure_azure-cli-extensions/src/containerapp/azext_containerapp/tests/latest/test_containerapp_create_update_with_yaml.py
|
azext_containerapp.tests.latest.test_containerapp_create_update_with_yaml.ContainerappYamlTests
|
class ContainerappYamlTests(ScenarioTest):
def __init__(self, *arg, **kwargs):
super().__init__(*arg, random_config_dir=True, **kwargs)
@AllowLargeResponse(8192)
@ResourceGroupPreparer(location="westeurope")
def test_containerapp_preview_create_with_environment_id(self, resource_group):
# MSI is not available in North Central US (Stage), if the TEST_LOCATION is "northcentralusstage", use eastus as location
location = TEST_LOCATION
if format_location(location) == format_location(STAGE_LOCATION):
location = "eastus"
self.cmd('configure --defaults location={}'.format(location))
env1 = self.create_random_name(prefix='env1', length=24)
env2 = self.create_random_name(prefix='env2', length=24)
app = self.create_random_name(prefix='yaml1', length=24)
create_containerapp_env(self, env1, resource_group)
containerapp_env1 = self.cmd(
'containerapp env show -g {} -n {}'.format(resource_group, env1)).get_output_in_json()
create_containerapp_env(self, env2, resource_group)
containerapp_env2 = self.cmd(
'containerapp env show -g {} -n {}'.format(resource_group, env2)).get_output_in_json()
user_identity_name = self.create_random_name(
prefix='containerapp-user', length=24)
user_identity = self.cmd(
'identity create -g {} -n {}'.format(resource_group, user_identity_name)).get_output_in_json()
user_identity_id = user_identity['id']
# the value in --yaml is used, warning for different value in --environmentId
containerapp_yaml_text = f"""
location: {location}
type: Microsoft.App/containerApps
tags:
tagname: value
properties:
environmentId: {containerapp_env1["id"]}
configuration:
activeRevisionsMode: Multiple
ingress:
external: false
additionalPortMappings:
- external: false
targetPort: 12345
- external: false
targetPort: 9090
exposedPort: 23456
allowInsecure: false
targetPort: 80
traffic:
- latestRevision: true
weight: 100
transport: Auto
ipSecurityRestrictions:
- name: name
ipAddressRange: "1.1.1.1/10"
action: "Allow"
template:
revisionSuffix: myrevision
terminationGracePeriodSeconds: 90
containers:
- image: nginx
name: nginx
env:
- name: HTTP_PORT
value: 80
command:
- npm
- start
resources:
cpu: 0.5
memory: 1Gi
scale:
minReplicas: 1
maxReplicas: 3
rules:
- http:
auth:
- secretRef: secretref
triggerParameter: trigger
metadata:
concurrentRequests: '50'
key: value
name: http-scale-rule
identity:
type: UserAssigned
userAssignedIdentities:
{user_identity_id}: {{}}
"""
containerapp_file_name = f"{self._testMethodName}_containerapp.yml"
write_test_file(containerapp_file_name, containerapp_yaml_text)
self.cmd(
f'containerapp create -n {app} -g {resource_group} --environment {env2} --yaml {containerapp_file_name}')
self.cmd(f'containerapp show -g {resource_group} -n {app}', checks=[
JMESPathCheck("properties.provisioningState", "Succeeded"),
JMESPathCheck("properties.environmentId", containerapp_env1["id"]),
JMESPathCheck("properties.configuration.ingress.external", False),
JMESPathCheck(
"properties.configuration.ingress.additionalPortMappings[0].external", False),
JMESPathCheck(
"properties.configuration.ingress.additionalPortMappings[0].targetPort", 12345),
JMESPathCheck(
"properties.configuration.ingress.additionalPortMappings[1].external", False),
JMESPathCheck(
"properties.configuration.ingress.additionalPortMappings[1].targetPort", 9090),
JMESPathCheck(
"properties.configuration.ingress.additionalPortMappings[1].exposedPort", 23456),
JMESPathCheck(
"properties.configuration.ingress.ipSecurityRestrictions[0].name", "name"),
JMESPathCheck("properties.configuration.ingress.ipSecurityRestrictions[0].ipAddressRange",
"1.1.1.1/10"),
JMESPathCheck(
"properties.configuration.ingress.ipSecurityRestrictions[0].action", "Allow"),
JMESPathCheck("properties.environmentId", containerapp_env1["id"]),
JMESPathCheck("properties.template.revisionSuffix", "myrevision"),
JMESPathCheck(
"properties.template.terminationGracePeriodSeconds", 90),
JMESPathCheck("properties.template.containers[0].name", "nginx"),
JMESPathCheck("properties.template.scale.minReplicas", 1),
JMESPathCheck("properties.template.scale.maxReplicas", 3),
JMESPathCheck(
"properties.template.scale.rules[0].name", "http-scale-rule"),
JMESPathCheck(
"properties.template.scale.rules[0].http.metadata.concurrentRequests", "50"),
JMESPathCheck(
"properties.template.scale.rules[0].http.metadata.key", "value"),
JMESPathCheck(
"properties.template.scale.rules[0].http.auth[0].triggerParameter", "trigger"),
JMESPathCheck(
"properties.template.scale.rules[0].http.auth[0].secretRef", "secretref"),
])
containerapp_yaml_text = f"""
location: {location}
type: Microsoft.App/containerApps
tags:
tagname: value
properties:
configuration:
activeRevisionsMode: Multiple
ingress:
external: false
additionalPortMappings:
- external: false
targetPort: 12345
- external: false
targetPort: 9090
exposedPort: 23456
allowInsecure: false
targetPort: 80
traffic:
- latestRevision: true
weight: 100
transport: Auto
ipSecurityRestrictions:
- name: name
ipAddressRange: "1.1.1.1/10"
action: "Allow"
template:
revisionSuffix: myrevision
terminationGracePeriodSeconds: 90
containers:
- image: nginx
name: nginx
env:
- name: HTTP_PORT
value: 80
command:
- npm
- start
resources:
cpu: 0.5
memory: 1Gi
scale:
minReplicas: 1
maxReplicas: 3
rules:
- http:
auth:
- secretRef: secretref
triggerParameter: trigger
metadata:
concurrentRequests: '50'
key: value
name: http-scale-rule
identity:
type: UserAssigned
userAssignedIdentities:
{user_identity_id}: {{}}
"""
write_test_file(containerapp_file_name, containerapp_yaml_text)
app2 = self.create_random_name(prefix='yaml2', length=24)
self.cmd(
f'containerapp create -n {app2} -g {resource_group} --environment {env2} --yaml {containerapp_file_name}')
self.cmd(f'containerapp show -g {resource_group} -n {app2}', checks=[
JMESPathCheck("properties.provisioningState", "Succeeded"),
JMESPathCheck("properties.environmentId", containerapp_env2["id"]),
JMESPathCheck("properties.configuration.ingress.external", False),
JMESPathCheck(
"properties.configuration.ingress.additionalPortMappings[0].external", False),
JMESPathCheck(
"properties.configuration.ingress.additionalPortMappings[0].targetPort", 12345),
JMESPathCheck(
"properties.configuration.ingress.additionalPortMappings[1].external", False),
JMESPathCheck(
"properties.configuration.ingress.additionalPortMappings[1].targetPort", 9090),
JMESPathCheck(
"properties.configuration.ingress.additionalPortMappings[1].exposedPort", 23456),
JMESPathCheck(
"properties.configuration.ingress.ipSecurityRestrictions[0].name", "name"),
JMESPathCheck("properties.configuration.ingress.ipSecurityRestrictions[0].ipAddressRange",
"1.1.1.1/10"),
JMESPathCheck(
"properties.configuration.ingress.ipSecurityRestrictions[0].action", "Allow"),
JMESPathCheck("properties.environmentId", containerapp_env2["id"]),
JMESPathCheck("properties.template.revisionSuffix", "myrevision"),
JMESPathCheck(
"properties.template.terminationGracePeriodSeconds", 90),
JMESPathCheck("properties.template.containers[0].name", "nginx"),
JMESPathCheck("properties.template.scale.minReplicas", 1),
JMESPathCheck("properties.template.scale.maxReplicas", 3),
JMESPathCheck(
"properties.template.scale.rules[0].name", "http-scale-rule"),
JMESPathCheck(
"properties.template.scale.rules[0].http.metadata.concurrentRequests", "50"),
JMESPathCheck(
"properties.template.scale.rules[0].http.metadata.key", "value"),
JMESPathCheck(
"properties.template.scale.rules[0].http.auth[0].triggerParameter", "trigger"),
JMESPathCheck(
"properties.template.scale.rules[0].http.auth[0].secretRef", "secretref"),
])
clean_up_test_file(containerapp_file_name)
|
class ContainerappYamlTests(ScenarioTest):
def __init__(self, *arg, **kwargs):
pass
@AllowLargeResponse(8192)
@ResourceGroupPreparer(location="westeurope")
def test_containerapp_preview_create_with_environment_id(self, resource_group):
pass
| 5 | 0 | 103 | 5 | 97 | 1 | 2 | 0.01 | 1 | 1 | 0 | 0 | 2 | 0 | 2 | 2 | 210 | 11 | 197 | 16 | 192 | 2 | 29 | 15 | 26 | 2 | 1 | 1 | 3 |
10,877 |
Azure/azure-cli-extensions
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/Azure_azure-cli-extensions/src/containerapp/azext_containerapp/tests/latest/test_containerapp_create_update_with_source.py
|
azext_containerapp.tests.latest.test_containerapp_create_update_with_source.ContainerAppCreateTest
|
class ContainerAppCreateTest(ScenarioTest):
# These tests should have the `@live_only`attribute because they
# require a docker push operation to push the image built as part of the test to the container registry
# and would not execute from the CI pipeline since docker is not installed in the CI.
@live_only()
@ResourceGroupPreparer(location="eastus")
def test_containerapp_create_source_with_Dockerfile_e2e(self, resource_group):
source_path = os.path.join(TEST_DIR, os.path.join(
"data", "source_built_using_dockerfile"))
ingress = 'external'
target_port = '80'
create_and_verify_containerapp_create_and_update(
self, resource_group=resource_group, source_path=source_path, ingress=ingress, target_port=target_port)
@live_only()
@ResourceGroupPreparer(location="eastus")
def test_containerapp_create_source_with_buildpack_e2e(self, resource_group):
source_path = os.path.join(TEST_DIR, os.path.join(
"data", "source_built_using_bullseye_buildpack_net7"))
ingress = 'external'
target_port = '8080'
create_and_verify_containerapp_create_and_update(
self, resource_group=resource_group, source_path=source_path, ingress=ingress, target_port=target_port)
@live_only()
@ResourceGroupPreparer(location="eastus")
def test_containerapp_create_source_and_image_e2e(self, resource_group):
image = "mcr.microsoft.com/dotnet/runtime:7.0"
source_path = os.path.join(TEST_DIR, os.path.join(
"data", "source_built_using_dockerfile"))
create_and_verify_containerapp_create_and_update(
self, resource_group=resource_group, image=image, source_path=source_path)
@live_only()
@ResourceGroupPreparer(location="eastus")
def test_containerapp_create_source_with_acr_task_e2e(self, resource_group):
source_path = os.path.join(TEST_DIR, os.path.join(
"data", "source_built_using_acr_task"))
ingress = 'external'
target_port = '8080'
create_and_verify_containerapp_create_and_update(
self, resource_group=resource_group, source_path=source_path, ingress=ingress, target_port=target_port)
@ResourceGroupPreparer(location="eastus")
def test_containerapp_create_source_and_repo_e2e(self, resource_group):
source_path = os.path.join(TEST_DIR, os.path.join(
"data", "source_built_using_dockerfile"))
repo = "https://github.com/test/repo"
err = ("Usage error: --source and --repo cannot be used together. Can either deploy from a local directory or a GitHub repository")
verify_containerapp_create_exception(
self, resource_group=resource_group, err=err, source_path=source_path, repo=repo)
@ResourceGroupPreparer(location="eastus")
def test_containerapp_create_source_and_yaml_e2e(self, resource_group):
source_path = os.path.join(TEST_DIR, os.path.join(
"data", "source_built_using_dockerfile"))
yaml = "./test.yaml"
err = ("Usage error: --source or --repo cannot be used with --yaml together. Can either deploy from a local directory or provide a yaml file")
verify_containerapp_create_exception(
self, resource_group=resource_group, err=err, source_path=source_path, yaml=yaml)
@ResourceGroupPreparer(location="eastus")
def test_containerapp_create_repo_and_yaml_e2e(self, resource_group):
repo = "https://github.com/test/repo"
yaml = "./test.yaml"
err = ("Usage error: --source or --repo cannot be used with --yaml together. Can either deploy from a local directory or provide a yaml file")
verify_containerapp_create_exception(
self, resource_group=resource_group, err=err, repo=repo, yaml=yaml)
@ResourceGroupPreparer(location="eastus")
def test_containerapp_create_repo_and_connected_environment_e2e(self, resource_group):
repo = "https://github.com/test/repo"
err = ("Usage error: --source or --repo cannot be used with --environment-type connectedEnvironment together. Please use --environment-type managedEnvironment")
verify_containerapp_create_exception(
self, resource_group=resource_group, err=err, repo=repo, environment_type="connected")
@ResourceGroupPreparer(location="eastus")
def test_containerapp_create_source_and_connected_environment_e2e(self, resource_group):
source_path = os.path.join(TEST_DIR, os.path.join(
"data", "source_built_using_dockerfile"))
err = ("Usage error: --source or --repo cannot be used with --environment-type connectedEnvironment together. Please use --environment-type managedEnvironment")
verify_containerapp_create_exception(
self, resource_group=resource_group, err=err, source_path=source_path, environment_type="connected")
@ResourceGroupPreparer(location="eastus")
def test_containerapp_create_repo_with_non_ACR_registry_server_e2e(self, resource_group):
repo = "https://github.com/test/repo"
registry_server = "docker.io"
registry_user = "test"
registry_pass = "test"
err = (
"Usage error: --registry-server: expected an ACR registry (*.azurecr.io) for --repo")
verify_containerapp_create_exception(self, resource_group, err=err, repo=repo,
registry_server=registry_server, registry_user=registry_user, registry_pass=registry_pass)
# We have to use @live_only() here as cloud builder and build resource name is generated randomly
# and no matched request could be found for all builder/build ARM requests
@live_only()
@ResourceGroupPreparer()
def test_containerapp_create_and_update_with_env_vars_e2e(self, resource_group):
containerapp_name = self.create_random_name(prefix='aca', length=24)
source_path = os.path.join(TEST_DIR, os.path.join(
"data", "source_built_using_source_to_cloud_dotnet"))
create_and_verify_containerapp_create_and_update_env_vars(
self, resource_group=resource_group, name=containerapp_name, source_path=source_path)
|
class ContainerAppCreateTest(ScenarioTest):
@live_only()
@ResourceGroupPreparer(location="eastus")
def test_containerapp_create_source_with_Dockerfile_e2e(self, resource_group):
pass
@live_only()
@ResourceGroupPreparer(location="eastus")
def test_containerapp_create_source_with_buildpack_e2e(self, resource_group):
pass
@live_only()
@ResourceGroupPreparer(location="eastus")
def test_containerapp_create_source_and_image_e2e(self, resource_group):
pass
@live_only()
@ResourceGroupPreparer(location="eastus")
def test_containerapp_create_source_with_acr_task_e2e(self, resource_group):
pass
@ResourceGroupPreparer(location="eastus")
def test_containerapp_create_source_and_repo_e2e(self, resource_group):
pass
@ResourceGroupPreparer(location="eastus")
def test_containerapp_create_source_and_yaml_e2e(self, resource_group):
pass
@ResourceGroupPreparer(location="eastus")
def test_containerapp_create_repo_and_yaml_e2e(self, resource_group):
pass
@ResourceGroupPreparer(location="eastus")
def test_containerapp_create_repo_and_connected_environment_e2e(self, resource_group):
pass
@ResourceGroupPreparer(location="eastus")
def test_containerapp_create_source_and_connected_environment_e2e(self, resource_group):
pass
@ResourceGroupPreparer(location="eastus")
def test_containerapp_create_repo_with_non_ACR_registry_server_e2e(self, resource_group):
pass
@live_only()
@ResourceGroupPreparer()
def test_containerapp_create_and_update_with_env_vars_e2e(self, resource_group):
pass
| 28 | 0 | 5 | 0 | 5 | 0 | 1 | 0.07 | 1 | 0 | 0 | 0 | 11 | 0 | 11 | 11 | 85 | 10 | 70 | 54 | 42 | 5 | 54 | 43 | 42 | 1 | 1 | 0 | 11 |
10,878 |
Azure/azure-cli-extensions
|
src/datafactory/azext_datafactory/vendored_sdks/datafactory/models/_models_py3.py
|
azext_datafactory.vendored_sdks.datafactory.models._models_py3.FactoryRepoUpdate
|
class FactoryRepoUpdate(_serialization.Model):
"""Factory's git repo information.
:ivar factory_resource_id: The factory resource id.
:vartype factory_resource_id: str
:ivar repo_configuration: Git repo information of the factory.
:vartype repo_configuration: ~azure.mgmt.datafactory.models.FactoryRepoConfiguration
"""
_attribute_map = {
"factory_resource_id": {"key": "factoryResourceId", "type": "str"},
"repo_configuration": {"key": "repoConfiguration", "type": "FactoryRepoConfiguration"},
}
def __init__(
self,
*,
factory_resource_id: Optional[str] = None,
repo_configuration: Optional["_models.FactoryRepoConfiguration"] = None,
**kwargs: Any
) -> None:
"""
:keyword factory_resource_id: The factory resource id.
:paramtype factory_resource_id: str
:keyword repo_configuration: Git repo information of the factory.
:paramtype repo_configuration: ~azure.mgmt.datafactory.models.FactoryRepoConfiguration
"""
super().__init__(**kwargs)
self.factory_resource_id = factory_resource_id
self.repo_configuration = repo_configuration
|
class FactoryRepoUpdate(_serialization.Model):
'''Factory's git repo information.
:ivar factory_resource_id: The factory resource id.
:vartype factory_resource_id: str
:ivar repo_configuration: Git repo information of the factory.
:vartype repo_configuration: ~azure.mgmt.datafactory.models.FactoryRepoConfiguration
'''
def __init__(
self,
*,
factory_resource_id: Optional[str] = None,
repo_configuration: Optional["_models.FactoryRepoConfiguration"] = None,
**kwargs: Any
) -> None:
'''
:keyword factory_resource_id: The factory resource id.
:paramtype factory_resource_id: str
:keyword repo_configuration: Git repo information of the factory.
:paramtype repo_configuration: ~azure.mgmt.datafactory.models.FactoryRepoConfiguration
'''
pass
| 2 | 2 | 16 | 0 | 10 | 6 | 1 | 0.8 | 1 | 3 | 0 | 0 | 1 | 2 | 1 | 16 | 30 | 3 | 15 | 11 | 7 | 12 | 6 | 5 | 4 | 1 | 2 | 0 | 1 |
10,879 |
Azure/azure-cli-extensions
|
src/datafactory/azext_datafactory/vendored_sdks/datafactory/models/_models_py3.py
|
azext_datafactory.vendored_sdks.datafactory.models._models_py3.GitHubAccessTokenRequest
|
class GitHubAccessTokenRequest(_serialization.Model):
"""Get GitHub access token request definition.
All required parameters must be populated in order to send to server.
:ivar git_hub_access_code: GitHub access code. Required.
:vartype git_hub_access_code: str
:ivar git_hub_client_id: GitHub application client ID.
:vartype git_hub_client_id: str
:ivar git_hub_client_secret: GitHub bring your own app client secret information.
:vartype git_hub_client_secret: ~azure.mgmt.datafactory.models.GitHubClientSecret
:ivar git_hub_access_token_base_url: GitHub access token base URL. Required.
:vartype git_hub_access_token_base_url: str
"""
_validation = {
"git_hub_access_code": {"required": True},
"git_hub_access_token_base_url": {"required": True},
}
_attribute_map = {
"git_hub_access_code": {"key": "gitHubAccessCode", "type": "str"},
"git_hub_client_id": {"key": "gitHubClientId", "type": "str"},
"git_hub_client_secret": {"key": "gitHubClientSecret", "type": "GitHubClientSecret"},
"git_hub_access_token_base_url": {"key": "gitHubAccessTokenBaseUrl", "type": "str"},
}
def __init__(
self,
*,
git_hub_access_code: str,
git_hub_access_token_base_url: str,
git_hub_client_id: Optional[str] = None,
git_hub_client_secret: Optional["_models.GitHubClientSecret"] = None,
**kwargs: Any
) -> None:
"""
:keyword git_hub_access_code: GitHub access code. Required.
:paramtype git_hub_access_code: str
:keyword git_hub_client_id: GitHub application client ID.
:paramtype git_hub_client_id: str
:keyword git_hub_client_secret: GitHub bring your own app client secret information.
:paramtype git_hub_client_secret: ~azure.mgmt.datafactory.models.GitHubClientSecret
:keyword git_hub_access_token_base_url: GitHub access token base URL. Required.
:paramtype git_hub_access_token_base_url: str
"""
super().__init__(**kwargs)
self.git_hub_access_code = git_hub_access_code
self.git_hub_client_id = git_hub_client_id
self.git_hub_client_secret = git_hub_client_secret
self.git_hub_access_token_base_url = git_hub_access_token_base_url
|
class GitHubAccessTokenRequest(_serialization.Model):
'''Get GitHub access token request definition.
All required parameters must be populated in order to send to server.
:ivar git_hub_access_code: GitHub access code. Required.
:vartype git_hub_access_code: str
:ivar git_hub_client_id: GitHub application client ID.
:vartype git_hub_client_id: str
:ivar git_hub_client_secret: GitHub bring your own app client secret information.
:vartype git_hub_client_secret: ~azure.mgmt.datafactory.models.GitHubClientSecret
:ivar git_hub_access_token_base_url: GitHub access token base URL. Required.
:vartype git_hub_access_token_base_url: str
'''
def __init__(
self,
*,
git_hub_access_code: str,
git_hub_access_token_base_url: str,
git_hub_client_id: Optional[str] = None,
git_hub_client_secret: Optional["_models.GitHubClientSecret"] = None,
**kwargs: Any
) -> None:
'''
:keyword git_hub_access_code: GitHub access code. Required.
:paramtype git_hub_access_code: str
:keyword git_hub_client_id: GitHub application client ID.
:paramtype git_hub_client_id: str
:keyword git_hub_client_secret: GitHub bring your own app client secret information.
:paramtype git_hub_client_secret: ~azure.mgmt.datafactory.models.GitHubClientSecret
:keyword git_hub_access_token_base_url: GitHub access token base URL. Required.
:paramtype git_hub_access_token_base_url: str
'''
pass
| 2 | 2 | 24 | 0 | 14 | 10 | 1 | 0.84 | 1 | 3 | 0 | 0 | 1 | 4 | 1 | 16 | 51 | 5 | 25 | 16 | 15 | 21 | 9 | 8 | 7 | 1 | 2 | 0 | 1 |
10,880 |
Azure/azure-cli-extensions
|
src/datafactory/azext_datafactory/vendored_sdks/datafactory/models/_models_py3.py
|
azext_datafactory.vendored_sdks.datafactory.models._models_py3.GitHubAccessTokenResponse
|
class GitHubAccessTokenResponse(_serialization.Model):
"""Get GitHub access token response definition.
:ivar git_hub_access_token: GitHub access token.
:vartype git_hub_access_token: str
"""
_attribute_map = {
"git_hub_access_token": {"key": "gitHubAccessToken", "type": "str"},
}
def __init__(self, *, git_hub_access_token: Optional[str] = None, **kwargs: Any) -> None:
"""
:keyword git_hub_access_token: GitHub access token.
:paramtype git_hub_access_token: str
"""
super().__init__(**kwargs)
self.git_hub_access_token = git_hub_access_token
|
class GitHubAccessTokenResponse(_serialization.Model):
'''Get GitHub access token response definition.
:ivar git_hub_access_token: GitHub access token.
:vartype git_hub_access_token: str
'''
def __init__(self, *, git_hub_access_token: Optional[str] = None, **kwargs: Any) -> None:
'''
:keyword git_hub_access_token: GitHub access token.
:paramtype git_hub_access_token: str
'''
pass
| 2 | 2 | 7 | 0 | 3 | 4 | 1 | 1.14 | 1 | 3 | 0 | 0 | 1 | 1 | 1 | 16 | 18 | 3 | 7 | 4 | 5 | 8 | 5 | 4 | 3 | 1 | 2 | 0 | 1 |
10,881 |
Azure/azure-cli-extensions
|
src/datafactory/azext_datafactory/vendored_sdks/datafactory/models/_models_py3.py
|
azext_datafactory.vendored_sdks.datafactory.models._models_py3.GitHubClientSecret
|
class GitHubClientSecret(_serialization.Model):
"""Client secret information for factory's bring your own app repository configuration.
:ivar byoa_secret_akv_url: Bring your own app client secret AKV URL.
:vartype byoa_secret_akv_url: str
:ivar byoa_secret_name: Bring your own app client secret name in AKV.
:vartype byoa_secret_name: str
"""
_attribute_map = {
"byoa_secret_akv_url": {"key": "byoaSecretAkvUrl", "type": "str"},
"byoa_secret_name": {"key": "byoaSecretName", "type": "str"},
}
def __init__(
self, *, byoa_secret_akv_url: Optional[str] = None, byoa_secret_name: Optional[str] = None, **kwargs: Any
) -> None:
"""
:keyword byoa_secret_akv_url: Bring your own app client secret AKV URL.
:paramtype byoa_secret_akv_url: str
:keyword byoa_secret_name: Bring your own app client secret name in AKV.
:paramtype byoa_secret_name: str
"""
super().__init__(**kwargs)
self.byoa_secret_akv_url = byoa_secret_akv_url
self.byoa_secret_name = byoa_secret_name
|
class GitHubClientSecret(_serialization.Model):
'''Client secret information for factory's bring your own app repository configuration.
:ivar byoa_secret_akv_url: Bring your own app client secret AKV URL.
:vartype byoa_secret_akv_url: str
:ivar byoa_secret_name: Bring your own app client secret name in AKV.
:vartype byoa_secret_name: str
'''
def __init__(
self, *, byoa_secret_akv_url: Optional[str] = None, byoa_secret_name: Optional[str] = None, **kwargs: Any
) -> None:
'''
:keyword byoa_secret_akv_url: Bring your own app client secret AKV URL.
:paramtype byoa_secret_akv_url: str
:keyword byoa_secret_name: Bring your own app client secret name in AKV.
:paramtype byoa_secret_name: str
'''
pass
| 2 | 2 | 12 | 0 | 6 | 6 | 1 | 1.09 | 1 | 3 | 0 | 0 | 1 | 2 | 1 | 16 | 26 | 3 | 11 | 7 | 7 | 12 | 6 | 5 | 4 | 1 | 2 | 0 | 1 |
10,882 |
Azure/azure-cli-extensions
|
src/datafactory/azext_datafactory/vendored_sdks/datafactory/models/_models_py3.py
|
azext_datafactory.vendored_sdks.datafactory.models._models_py3.HDInsightHiveActivity
|
class HDInsightHiveActivity(ExecutionActivity):
"""HDInsight Hive activity type.
All required parameters must be populated in order to send to server.
:ivar additional_properties: Unmatched properties from the message are deserialized to this
collection.
:vartype additional_properties: dict[str, JSON]
:ivar name: Activity name. Required.
:vartype name: str
:ivar type: Type of activity. Required.
:vartype type: str
:ivar description: Activity description.
:vartype description: str
:ivar state: Activity state. This is an optional property and if not provided, the state will
be Active by default. Known values are: "Active" and "Inactive".
:vartype state: str or ~azure.mgmt.datafactory.models.ActivityState
:ivar on_inactive_mark_as: Status result of the activity when the state is set to Inactive.
This is an optional property and if not provided when the activity is inactive, the status will
be Succeeded by default. Known values are: "Succeeded", "Failed", and "Skipped".
:vartype on_inactive_mark_as: str or ~azure.mgmt.datafactory.models.ActivityOnInactiveMarkAs
:ivar depends_on: Activity depends on condition.
:vartype depends_on: list[~azure.mgmt.datafactory.models.ActivityDependency]
:ivar user_properties: Activity user properties.
:vartype user_properties: list[~azure.mgmt.datafactory.models.UserProperty]
:ivar linked_service_name: Linked service reference.
:vartype linked_service_name: ~azure.mgmt.datafactory.models.LinkedServiceReference
:ivar policy: Activity policy.
:vartype policy: ~azure.mgmt.datafactory.models.ActivityPolicy
:ivar storage_linked_services: Storage linked service references.
:vartype storage_linked_services: list[~azure.mgmt.datafactory.models.LinkedServiceReference]
:ivar arguments: User specified arguments to HDInsightActivity.
:vartype arguments: list[JSON]
:ivar get_debug_info: Debug info option. Known values are: "None", "Always", and "Failure".
:vartype get_debug_info: str or ~azure.mgmt.datafactory.models.HDInsightActivityDebugInfoOption
:ivar script_path: Script path. Type: string (or Expression with resultType string).
:vartype script_path: JSON
:ivar script_linked_service: Script linked service reference.
:vartype script_linked_service: ~azure.mgmt.datafactory.models.LinkedServiceReference
:ivar defines: Allows user to specify defines for Hive job request.
:vartype defines: dict[str, JSON]
:ivar variables: User specified arguments under hivevar namespace.
:vartype variables: dict[str, JSON]
:ivar query_timeout: Query timeout value (in minutes). Effective when the HDInsight cluster is
with ESP (Enterprise Security Package).
:vartype query_timeout: int
"""
_validation = {
"name": {"required": True},
"type": {"required": True},
}
_attribute_map = {
"additional_properties": {"key": "", "type": "{object}"},
"name": {"key": "name", "type": "str"},
"type": {"key": "type", "type": "str"},
"description": {"key": "description", "type": "str"},
"state": {"key": "state", "type": "str"},
"on_inactive_mark_as": {"key": "onInactiveMarkAs", "type": "str"},
"depends_on": {"key": "dependsOn", "type": "[ActivityDependency]"},
"user_properties": {"key": "userProperties", "type": "[UserProperty]"},
"linked_service_name": {"key": "linkedServiceName", "type": "LinkedServiceReference"},
"policy": {"key": "policy", "type": "ActivityPolicy"},
"storage_linked_services": {"key": "typeProperties.storageLinkedServices", "type": "[LinkedServiceReference]"},
"arguments": {"key": "typeProperties.arguments", "type": "[object]"},
"get_debug_info": {"key": "typeProperties.getDebugInfo", "type": "str"},
"script_path": {"key": "typeProperties.scriptPath", "type": "object"},
"script_linked_service": {"key": "typeProperties.scriptLinkedService", "type": "LinkedServiceReference"},
"defines": {"key": "typeProperties.defines", "type": "{object}"},
"variables": {"key": "typeProperties.variables", "type": "{object}"},
"query_timeout": {"key": "typeProperties.queryTimeout", "type": "int"},
}
def __init__(
self,
*,
name: str,
additional_properties: Optional[Dict[str, JSON]] = None,
description: Optional[str] = None,
state: Optional[Union[str, "_models.ActivityState"]] = None,
on_inactive_mark_as: Optional[Union[str, "_models.ActivityOnInactiveMarkAs"]] = None,
depends_on: Optional[List["_models.ActivityDependency"]] = None,
user_properties: Optional[List["_models.UserProperty"]] = None,
linked_service_name: Optional["_models.LinkedServiceReference"] = None,
policy: Optional["_models.ActivityPolicy"] = None,
storage_linked_services: Optional[List["_models.LinkedServiceReference"]] = None,
arguments: Optional[List[JSON]] = None,
get_debug_info: Optional[Union[str, "_models.HDInsightActivityDebugInfoOption"]] = None,
script_path: Optional[JSON] = None,
script_linked_service: Optional["_models.LinkedServiceReference"] = None,
defines: Optional[Dict[str, JSON]] = None,
variables: Optional[Dict[str, JSON]] = None,
query_timeout: Optional[int] = None,
**kwargs: Any
) -> None:
"""
:keyword additional_properties: Unmatched properties from the message are deserialized to this
collection.
:paramtype additional_properties: dict[str, JSON]
:keyword name: Activity name. Required.
:paramtype name: str
:keyword description: Activity description.
:paramtype description: str
:keyword state: Activity state. This is an optional property and if not provided, the state
will be Active by default. Known values are: "Active" and "Inactive".
:paramtype state: str or ~azure.mgmt.datafactory.models.ActivityState
:keyword on_inactive_mark_as: Status result of the activity when the state is set to Inactive.
This is an optional property and if not provided when the activity is inactive, the status will
be Succeeded by default. Known values are: "Succeeded", "Failed", and "Skipped".
:paramtype on_inactive_mark_as: str or ~azure.mgmt.datafactory.models.ActivityOnInactiveMarkAs
:keyword depends_on: Activity depends on condition.
:paramtype depends_on: list[~azure.mgmt.datafactory.models.ActivityDependency]
:keyword user_properties: Activity user properties.
:paramtype user_properties: list[~azure.mgmt.datafactory.models.UserProperty]
:keyword linked_service_name: Linked service reference.
:paramtype linked_service_name: ~azure.mgmt.datafactory.models.LinkedServiceReference
:keyword policy: Activity policy.
:paramtype policy: ~azure.mgmt.datafactory.models.ActivityPolicy
:keyword storage_linked_services: Storage linked service references.
:paramtype storage_linked_services: list[~azure.mgmt.datafactory.models.LinkedServiceReference]
:keyword arguments: User specified arguments to HDInsightActivity.
:paramtype arguments: list[JSON]
:keyword get_debug_info: Debug info option. Known values are: "None", "Always", and "Failure".
:paramtype get_debug_info: str or
~azure.mgmt.datafactory.models.HDInsightActivityDebugInfoOption
:keyword script_path: Script path. Type: string (or Expression with resultType string).
:paramtype script_path: JSON
:keyword script_linked_service: Script linked service reference.
:paramtype script_linked_service: ~azure.mgmt.datafactory.models.LinkedServiceReference
:keyword defines: Allows user to specify defines for Hive job request.
:paramtype defines: dict[str, JSON]
:keyword variables: User specified arguments under hivevar namespace.
:paramtype variables: dict[str, JSON]
:keyword query_timeout: Query timeout value (in minutes). Effective when the HDInsight cluster
is with ESP (Enterprise Security Package).
:paramtype query_timeout: int
"""
super().__init__(
additional_properties=additional_properties,
name=name,
description=description,
state=state,
on_inactive_mark_as=on_inactive_mark_as,
depends_on=depends_on,
user_properties=user_properties,
linked_service_name=linked_service_name,
policy=policy,
**kwargs
)
self.type: str = "HDInsightHive"
self.storage_linked_services = storage_linked_services
self.arguments = arguments
self.get_debug_info = get_debug_info
self.script_path = script_path
self.script_linked_service = script_linked_service
self.defines = defines
self.variables = variables
self.query_timeout = query_timeout
|
class HDInsightHiveActivity(ExecutionActivity):
'''HDInsight Hive activity type.
All required parameters must be populated in order to send to server.
:ivar additional_properties: Unmatched properties from the message are deserialized to this
collection.
:vartype additional_properties: dict[str, JSON]
:ivar name: Activity name. Required.
:vartype name: str
:ivar type: Type of activity. Required.
:vartype type: str
:ivar description: Activity description.
:vartype description: str
:ivar state: Activity state. This is an optional property and if not provided, the state will
be Active by default. Known values are: "Active" and "Inactive".
:vartype state: str or ~azure.mgmt.datafactory.models.ActivityState
:ivar on_inactive_mark_as: Status result of the activity when the state is set to Inactive.
This is an optional property and if not provided when the activity is inactive, the status will
be Succeeded by default. Known values are: "Succeeded", "Failed", and "Skipped".
:vartype on_inactive_mark_as: str or ~azure.mgmt.datafactory.models.ActivityOnInactiveMarkAs
:ivar depends_on: Activity depends on condition.
:vartype depends_on: list[~azure.mgmt.datafactory.models.ActivityDependency]
:ivar user_properties: Activity user properties.
:vartype user_properties: list[~azure.mgmt.datafactory.models.UserProperty]
:ivar linked_service_name: Linked service reference.
:vartype linked_service_name: ~azure.mgmt.datafactory.models.LinkedServiceReference
:ivar policy: Activity policy.
:vartype policy: ~azure.mgmt.datafactory.models.ActivityPolicy
:ivar storage_linked_services: Storage linked service references.
:vartype storage_linked_services: list[~azure.mgmt.datafactory.models.LinkedServiceReference]
:ivar arguments: User specified arguments to HDInsightActivity.
:vartype arguments: list[JSON]
:ivar get_debug_info: Debug info option. Known values are: "None", "Always", and "Failure".
:vartype get_debug_info: str or ~azure.mgmt.datafactory.models.HDInsightActivityDebugInfoOption
:ivar script_path: Script path. Type: string (or Expression with resultType string).
:vartype script_path: JSON
:ivar script_linked_service: Script linked service reference.
:vartype script_linked_service: ~azure.mgmt.datafactory.models.LinkedServiceReference
:ivar defines: Allows user to specify defines for Hive job request.
:vartype defines: dict[str, JSON]
:ivar variables: User specified arguments under hivevar namespace.
:vartype variables: dict[str, JSON]
:ivar query_timeout: Query timeout value (in minutes). Effective when the HDInsight cluster is
with ESP (Enterprise Security Package).
:vartype query_timeout: int
'''
def __init__(
self,
*,
name: str,
additional_properties: Optional[Dict[str, JSON]] = None,
description: Optional[str] = None,
state: Optional[Union[str, "_models.ActivityState"]] = None,
on_inactive_mark_as: Optional[Union[str, "_models.ActivityOnInactiveMarkAs"]] = None,
depends_on: Optional[List["_models.ActivityDependency"]] = None,
user_properties: Optional[List["_models.UserProperty"]] = None,
linked_service_name: Optional["_models.LinkedServiceReference"] = None,
policy: Optional["_models.ActivityPolicy"] = None,
storage_linked_services: Optional[List["_models.LinkedServiceReference"]] = None,
arguments: Optional[List[JSON]] = None,
get_debug_info: Optional[Union[str, "_models.HDInsightActivityDebugInfoOption"]] = None,
script_path: Optional[JSON] = None,
script_linked_service: Optional["_models.LinkedServiceReference"] = None,
defines: Optional[Dict[str, JSON]] = None,
variables: Optional[Dict[str, JSON]] = None,
query_timeout: Optional[int] = None,
**kwargs: Any
) -> None:
'''
:keyword additional_properties: Unmatched properties from the message are deserialized to this
collection.
:paramtype additional_properties: dict[str, JSON]
:keyword name: Activity name. Required.
:paramtype name: str
:keyword description: Activity description.
:paramtype description: str
:keyword state: Activity state. This is an optional property and if not provided, the state
will be Active by default. Known values are: "Active" and "Inactive".
:paramtype state: str or ~azure.mgmt.datafactory.models.ActivityState
:keyword on_inactive_mark_as: Status result of the activity when the state is set to Inactive.
This is an optional property and if not provided when the activity is inactive, the status will
be Succeeded by default. Known values are: "Succeeded", "Failed", and "Skipped".
:paramtype on_inactive_mark_as: str or ~azure.mgmt.datafactory.models.ActivityOnInactiveMarkAs
:keyword depends_on: Activity depends on condition.
:paramtype depends_on: list[~azure.mgmt.datafactory.models.ActivityDependency]
:keyword user_properties: Activity user properties.
:paramtype user_properties: list[~azure.mgmt.datafactory.models.UserProperty]
:keyword linked_service_name: Linked service reference.
:paramtype linked_service_name: ~azure.mgmt.datafactory.models.LinkedServiceReference
:keyword policy: Activity policy.
:paramtype policy: ~azure.mgmt.datafactory.models.ActivityPolicy
:keyword storage_linked_services: Storage linked service references.
:paramtype storage_linked_services: list[~azure.mgmt.datafactory.models.LinkedServiceReference]
:keyword arguments: User specified arguments to HDInsightActivity.
:paramtype arguments: list[JSON]
:keyword get_debug_info: Debug info option. Known values are: "None", "Always", and "Failure".
:paramtype get_debug_info: str or
~azure.mgmt.datafactory.models.HDInsightActivityDebugInfoOption
:keyword script_path: Script path. Type: string (or Expression with resultType string).
:paramtype script_path: JSON
:keyword script_linked_service: Script linked service reference.
:paramtype script_linked_service: ~azure.mgmt.datafactory.models.LinkedServiceReference
:keyword defines: Allows user to specify defines for Hive job request.
:paramtype defines: dict[str, JSON]
:keyword variables: User specified arguments under hivevar namespace.
:paramtype variables: dict[str, JSON]
:keyword query_timeout: Query timeout value (in minutes). Effective when the HDInsight cluster
is with ESP (Enterprise Security Package).
:paramtype query_timeout: int
'''
pass
| 2 | 2 | 85 | 0 | 43 | 42 | 1 | 1.26 | 1 | 4 | 0 | 0 | 1 | 9 | 1 | 18 | 159 | 5 | 68 | 34 | 45 | 86 | 14 | 13 | 12 | 1 | 4 | 0 | 1 |
10,883 |
Azure/azure-cli-extensions
|
src/datafactory/azext_datafactory/vendored_sdks/datafactory/models/_models_py3.py
|
azext_datafactory.vendored_sdks.datafactory.models._models_py3.HDInsightLinkedService
|
class HDInsightLinkedService(LinkedService):
"""HDInsight linked service.
All required parameters must be populated in order to send to server.
:ivar additional_properties: Unmatched properties from the message are deserialized to this
collection.
:vartype additional_properties: dict[str, JSON]
:ivar type: Type of linked service. Required.
:vartype type: str
:ivar version: Version of the linked service.
:vartype version: str
:ivar connect_via: The integration runtime reference.
:vartype connect_via: ~azure.mgmt.datafactory.models.IntegrationRuntimeReference
:ivar description: Linked service description.
:vartype description: str
:ivar parameters: Parameters for linked service.
:vartype parameters: dict[str, ~azure.mgmt.datafactory.models.ParameterSpecification]
:ivar annotations: List of tags that can be used for describing the linked service.
:vartype annotations: list[JSON]
:ivar cluster_uri: HDInsight cluster URI. Type: string (or Expression with resultType string).
Required.
:vartype cluster_uri: JSON
:ivar user_name: HDInsight cluster user name. Type: string (or Expression with resultType
string).
:vartype user_name: JSON
:ivar password: HDInsight cluster password.
:vartype password: ~azure.mgmt.datafactory.models.SecretBase
:ivar linked_service_name: The Azure Storage linked service reference.
:vartype linked_service_name: ~azure.mgmt.datafactory.models.LinkedServiceReference
:ivar hcatalog_linked_service_name: A reference to the Azure SQL linked service that points to
the HCatalog database.
:vartype hcatalog_linked_service_name: ~azure.mgmt.datafactory.models.LinkedServiceReference
:ivar encrypted_credential: The encrypted credential used for authentication. Credentials are
encrypted using the integration runtime credential manager. Type: string.
:vartype encrypted_credential: str
:ivar is_esp_enabled: Specify if the HDInsight is created with ESP (Enterprise Security
Package). Type: Boolean.
:vartype is_esp_enabled: JSON
:ivar file_system: Specify the FileSystem if the main storage for the HDInsight is ADLS Gen2.
Type: string (or Expression with resultType string).
:vartype file_system: JSON
"""
_validation = {
"type": {"required": True},
"cluster_uri": {"required": True},
}
_attribute_map = {
"additional_properties": {"key": "", "type": "{object}"},
"type": {"key": "type", "type": "str"},
"version": {"key": "version", "type": "str"},
"connect_via": {"key": "connectVia", "type": "IntegrationRuntimeReference"},
"description": {"key": "description", "type": "str"},
"parameters": {"key": "parameters", "type": "{ParameterSpecification}"},
"annotations": {"key": "annotations", "type": "[object]"},
"cluster_uri": {"key": "typeProperties.clusterUri", "type": "object"},
"user_name": {"key": "typeProperties.userName", "type": "object"},
"password": {"key": "typeProperties.password", "type": "SecretBase"},
"linked_service_name": {"key": "typeProperties.linkedServiceName", "type": "LinkedServiceReference"},
"hcatalog_linked_service_name": {
"key": "typeProperties.hcatalogLinkedServiceName",
"type": "LinkedServiceReference",
},
"encrypted_credential": {"key": "typeProperties.encryptedCredential", "type": "str"},
"is_esp_enabled": {"key": "typeProperties.isEspEnabled", "type": "object"},
"file_system": {"key": "typeProperties.fileSystem", "type": "object"},
}
def __init__(
self,
*,
cluster_uri: JSON,
additional_properties: Optional[Dict[str, JSON]] = None,
version: Optional[str] = None,
connect_via: Optional["_models.IntegrationRuntimeReference"] = None,
description: Optional[str] = None,
parameters: Optional[Dict[str, "_models.ParameterSpecification"]] = None,
annotations: Optional[List[JSON]] = None,
user_name: Optional[JSON] = None,
password: Optional["_models.SecretBase"] = None,
linked_service_name: Optional["_models.LinkedServiceReference"] = None,
hcatalog_linked_service_name: Optional["_models.LinkedServiceReference"] = None,
encrypted_credential: Optional[str] = None,
is_esp_enabled: Optional[JSON] = None,
file_system: Optional[JSON] = None,
**kwargs: Any
) -> None:
"""
:keyword additional_properties: Unmatched properties from the message are deserialized to this
collection.
:paramtype additional_properties: dict[str, JSON]
:keyword version: Version of the linked service.
:paramtype version: str
:keyword connect_via: The integration runtime reference.
:paramtype connect_via: ~azure.mgmt.datafactory.models.IntegrationRuntimeReference
:keyword description: Linked service description.
:paramtype description: str
:keyword parameters: Parameters for linked service.
:paramtype parameters: dict[str, ~azure.mgmt.datafactory.models.ParameterSpecification]
:keyword annotations: List of tags that can be used for describing the linked service.
:paramtype annotations: list[JSON]
:keyword cluster_uri: HDInsight cluster URI. Type: string (or Expression with resultType
string). Required.
:paramtype cluster_uri: JSON
:keyword user_name: HDInsight cluster user name. Type: string (or Expression with resultType
string).
:paramtype user_name: JSON
:keyword password: HDInsight cluster password.
:paramtype password: ~azure.mgmt.datafactory.models.SecretBase
:keyword linked_service_name: The Azure Storage linked service reference.
:paramtype linked_service_name: ~azure.mgmt.datafactory.models.LinkedServiceReference
:keyword hcatalog_linked_service_name: A reference to the Azure SQL linked service that points
to the HCatalog database.
:paramtype hcatalog_linked_service_name: ~azure.mgmt.datafactory.models.LinkedServiceReference
:keyword encrypted_credential: The encrypted credential used for authentication. Credentials
are encrypted using the integration runtime credential manager. Type: string.
:paramtype encrypted_credential: str
:keyword is_esp_enabled: Specify if the HDInsight is created with ESP (Enterprise Security
Package). Type: Boolean.
:paramtype is_esp_enabled: JSON
:keyword file_system: Specify the FileSystem if the main storage for the HDInsight is ADLS
Gen2. Type: string (or Expression with resultType string).
:paramtype file_system: JSON
"""
super().__init__(
additional_properties=additional_properties,
version=version,
connect_via=connect_via,
description=description,
parameters=parameters,
annotations=annotations,
**kwargs
)
self.type: str = "HDInsight"
self.cluster_uri = cluster_uri
self.user_name = user_name
self.password = password
self.linked_service_name = linked_service_name
self.hcatalog_linked_service_name = hcatalog_linked_service_name
self.encrypted_credential = encrypted_credential
self.is_esp_enabled = is_esp_enabled
self.file_system = file_system
|
class HDInsightLinkedService(LinkedService):
'''HDInsight linked service.
All required parameters must be populated in order to send to server.
:ivar additional_properties: Unmatched properties from the message are deserialized to this
collection.
:vartype additional_properties: dict[str, JSON]
:ivar type: Type of linked service. Required.
:vartype type: str
:ivar version: Version of the linked service.
:vartype version: str
:ivar connect_via: The integration runtime reference.
:vartype connect_via: ~azure.mgmt.datafactory.models.IntegrationRuntimeReference
:ivar description: Linked service description.
:vartype description: str
:ivar parameters: Parameters for linked service.
:vartype parameters: dict[str, ~azure.mgmt.datafactory.models.ParameterSpecification]
:ivar annotations: List of tags that can be used for describing the linked service.
:vartype annotations: list[JSON]
:ivar cluster_uri: HDInsight cluster URI. Type: string (or Expression with resultType string).
Required.
:vartype cluster_uri: JSON
:ivar user_name: HDInsight cluster user name. Type: string (or Expression with resultType
string).
:vartype user_name: JSON
:ivar password: HDInsight cluster password.
:vartype password: ~azure.mgmt.datafactory.models.SecretBase
:ivar linked_service_name: The Azure Storage linked service reference.
:vartype linked_service_name: ~azure.mgmt.datafactory.models.LinkedServiceReference
:ivar hcatalog_linked_service_name: A reference to the Azure SQL linked service that points to
the HCatalog database.
:vartype hcatalog_linked_service_name: ~azure.mgmt.datafactory.models.LinkedServiceReference
:ivar encrypted_credential: The encrypted credential used for authentication. Credentials are
encrypted using the integration runtime credential manager. Type: string.
:vartype encrypted_credential: str
:ivar is_esp_enabled: Specify if the HDInsight is created with ESP (Enterprise Security
Package). Type: Boolean.
:vartype is_esp_enabled: JSON
:ivar file_system: Specify the FileSystem if the main storage for the HDInsight is ADLS Gen2.
Type: string (or Expression with resultType string).
:vartype file_system: JSON
'''
def __init__(
self,
*,
cluster_uri: JSON,
additional_properties: Optional[Dict[str, JSON]] = None,
version: Optional[str] = None,
connect_via: Optional["_models.IntegrationRuntimeReference"] = None,
description: Optional[str] = None,
parameters: Optional[Dict[str, "_models.ParameterSpecification"]] = None,
annotations: Optional[List[JSON]] = None,
user_name: Optional[JSON] = None,
password: Optional["_models.SecretBase"] = None,
linked_service_name: Optional["_models.LinkedServiceReference"] = None,
hcatalog_linked_service_name: Optional["_models.LinkedServiceReference"] = None,
encrypted_credential: Optional[str] = None,
is_esp_enabled: Optional[JSON] = None,
file_system: Optional[JSON] = None,
**kwargs: Any
) -> None:
'''
:keyword additional_properties: Unmatched properties from the message are deserialized to this
collection.
:paramtype additional_properties: dict[str, JSON]
:keyword version: Version of the linked service.
:paramtype version: str
:keyword connect_via: The integration runtime reference.
:paramtype connect_via: ~azure.mgmt.datafactory.models.IntegrationRuntimeReference
:keyword description: Linked service description.
:paramtype description: str
:keyword parameters: Parameters for linked service.
:paramtype parameters: dict[str, ~azure.mgmt.datafactory.models.ParameterSpecification]
:keyword annotations: List of tags that can be used for describing the linked service.
:paramtype annotations: list[JSON]
:keyword cluster_uri: HDInsight cluster URI. Type: string (or Expression with resultType
string). Required.
:paramtype cluster_uri: JSON
:keyword user_name: HDInsight cluster user name. Type: string (or Expression with resultType
string).
:paramtype user_name: JSON
:keyword password: HDInsight cluster password.
:paramtype password: ~azure.mgmt.datafactory.models.SecretBase
:keyword linked_service_name: The Azure Storage linked service reference.
:paramtype linked_service_name: ~azure.mgmt.datafactory.models.LinkedServiceReference
:keyword hcatalog_linked_service_name: A reference to the Azure SQL linked service that points
to the HCatalog database.
:paramtype hcatalog_linked_service_name: ~azure.mgmt.datafactory.models.LinkedServiceReference
:keyword encrypted_credential: The encrypted credential used for authentication. Credentials
are encrypted using the integration runtime credential manager. Type: string.
:paramtype encrypted_credential: str
:keyword is_esp_enabled: Specify if the HDInsight is created with ESP (Enterprise Security
Package). Type: Boolean.
:paramtype is_esp_enabled: JSON
:keyword file_system: Specify the FileSystem if the main storage for the HDInsight is ADLS
Gen2. Type: string (or Expression with resultType string).
:paramtype file_system: JSON
'''
pass
| 2 | 2 | 74 | 0 | 37 | 37 | 1 | 1.24 | 1 | 3 | 0 | 0 | 1 | 9 | 1 | 17 | 144 | 5 | 62 | 31 | 42 | 77 | 14 | 13 | 12 | 1 | 3 | 0 | 1 |
10,884 |
Azure/azure-cli-extensions
|
src/datafactory/azext_datafactory/vendored_sdks/datafactory/models/_models_py3.py
|
azext_datafactory.vendored_sdks.datafactory.models._models_py3.HDInsightMapReduceActivity
|
class HDInsightMapReduceActivity(ExecutionActivity):
"""HDInsight MapReduce activity type.
All required parameters must be populated in order to send to server.
:ivar additional_properties: Unmatched properties from the message are deserialized to this
collection.
:vartype additional_properties: dict[str, JSON]
:ivar name: Activity name. Required.
:vartype name: str
:ivar type: Type of activity. Required.
:vartype type: str
:ivar description: Activity description.
:vartype description: str
:ivar state: Activity state. This is an optional property and if not provided, the state will
be Active by default. Known values are: "Active" and "Inactive".
:vartype state: str or ~azure.mgmt.datafactory.models.ActivityState
:ivar on_inactive_mark_as: Status result of the activity when the state is set to Inactive.
This is an optional property and if not provided when the activity is inactive, the status will
be Succeeded by default. Known values are: "Succeeded", "Failed", and "Skipped".
:vartype on_inactive_mark_as: str or ~azure.mgmt.datafactory.models.ActivityOnInactiveMarkAs
:ivar depends_on: Activity depends on condition.
:vartype depends_on: list[~azure.mgmt.datafactory.models.ActivityDependency]
:ivar user_properties: Activity user properties.
:vartype user_properties: list[~azure.mgmt.datafactory.models.UserProperty]
:ivar linked_service_name: Linked service reference.
:vartype linked_service_name: ~azure.mgmt.datafactory.models.LinkedServiceReference
:ivar policy: Activity policy.
:vartype policy: ~azure.mgmt.datafactory.models.ActivityPolicy
:ivar storage_linked_services: Storage linked service references.
:vartype storage_linked_services: list[~azure.mgmt.datafactory.models.LinkedServiceReference]
:ivar arguments: User specified arguments to HDInsightActivity.
:vartype arguments: list[JSON]
:ivar get_debug_info: Debug info option. Known values are: "None", "Always", and "Failure".
:vartype get_debug_info: str or ~azure.mgmt.datafactory.models.HDInsightActivityDebugInfoOption
:ivar class_name: Class name. Type: string (or Expression with resultType string). Required.
:vartype class_name: JSON
:ivar jar_file_path: Jar path. Type: string (or Expression with resultType string). Required.
:vartype jar_file_path: JSON
:ivar jar_linked_service: Jar linked service reference.
:vartype jar_linked_service: ~azure.mgmt.datafactory.models.LinkedServiceReference
:ivar jar_libs: Jar libs.
:vartype jar_libs: list[JSON]
:ivar defines: Allows user to specify defines for the MapReduce job request.
:vartype defines: dict[str, JSON]
"""
_validation = {
"name": {"required": True},
"type": {"required": True},
"class_name": {"required": True},
"jar_file_path": {"required": True},
}
_attribute_map = {
"additional_properties": {"key": "", "type": "{object}"},
"name": {"key": "name", "type": "str"},
"type": {"key": "type", "type": "str"},
"description": {"key": "description", "type": "str"},
"state": {"key": "state", "type": "str"},
"on_inactive_mark_as": {"key": "onInactiveMarkAs", "type": "str"},
"depends_on": {"key": "dependsOn", "type": "[ActivityDependency]"},
"user_properties": {"key": "userProperties", "type": "[UserProperty]"},
"linked_service_name": {"key": "linkedServiceName", "type": "LinkedServiceReference"},
"policy": {"key": "policy", "type": "ActivityPolicy"},
"storage_linked_services": {"key": "typeProperties.storageLinkedServices", "type": "[LinkedServiceReference]"},
"arguments": {"key": "typeProperties.arguments", "type": "[object]"},
"get_debug_info": {"key": "typeProperties.getDebugInfo", "type": "str"},
"class_name": {"key": "typeProperties.className", "type": "object"},
"jar_file_path": {"key": "typeProperties.jarFilePath", "type": "object"},
"jar_linked_service": {"key": "typeProperties.jarLinkedService", "type": "LinkedServiceReference"},
"jar_libs": {"key": "typeProperties.jarLibs", "type": "[object]"},
"defines": {"key": "typeProperties.defines", "type": "{object}"},
}
def __init__(
self,
*,
name: str,
class_name: JSON,
jar_file_path: JSON,
additional_properties: Optional[Dict[str, JSON]] = None,
description: Optional[str] = None,
state: Optional[Union[str, "_models.ActivityState"]] = None,
on_inactive_mark_as: Optional[Union[str, "_models.ActivityOnInactiveMarkAs"]] = None,
depends_on: Optional[List["_models.ActivityDependency"]] = None,
user_properties: Optional[List["_models.UserProperty"]] = None,
linked_service_name: Optional["_models.LinkedServiceReference"] = None,
policy: Optional["_models.ActivityPolicy"] = None,
storage_linked_services: Optional[List["_models.LinkedServiceReference"]] = None,
arguments: Optional[List[JSON]] = None,
get_debug_info: Optional[Union[str, "_models.HDInsightActivityDebugInfoOption"]] = None,
jar_linked_service: Optional["_models.LinkedServiceReference"] = None,
jar_libs: Optional[List[JSON]] = None,
defines: Optional[Dict[str, JSON]] = None,
**kwargs: Any
) -> None:
"""
:keyword additional_properties: Unmatched properties from the message are deserialized to this
collection.
:paramtype additional_properties: dict[str, JSON]
:keyword name: Activity name. Required.
:paramtype name: str
:keyword description: Activity description.
:paramtype description: str
:keyword state: Activity state. This is an optional property and if not provided, the state
will be Active by default. Known values are: "Active" and "Inactive".
:paramtype state: str or ~azure.mgmt.datafactory.models.ActivityState
:keyword on_inactive_mark_as: Status result of the activity when the state is set to Inactive.
This is an optional property and if not provided when the activity is inactive, the status will
be Succeeded by default. Known values are: "Succeeded", "Failed", and "Skipped".
:paramtype on_inactive_mark_as: str or ~azure.mgmt.datafactory.models.ActivityOnInactiveMarkAs
:keyword depends_on: Activity depends on condition.
:paramtype depends_on: list[~azure.mgmt.datafactory.models.ActivityDependency]
:keyword user_properties: Activity user properties.
:paramtype user_properties: list[~azure.mgmt.datafactory.models.UserProperty]
:keyword linked_service_name: Linked service reference.
:paramtype linked_service_name: ~azure.mgmt.datafactory.models.LinkedServiceReference
:keyword policy: Activity policy.
:paramtype policy: ~azure.mgmt.datafactory.models.ActivityPolicy
:keyword storage_linked_services: Storage linked service references.
:paramtype storage_linked_services: list[~azure.mgmt.datafactory.models.LinkedServiceReference]
:keyword arguments: User specified arguments to HDInsightActivity.
:paramtype arguments: list[JSON]
:keyword get_debug_info: Debug info option. Known values are: "None", "Always", and "Failure".
:paramtype get_debug_info: str or
~azure.mgmt.datafactory.models.HDInsightActivityDebugInfoOption
:keyword class_name: Class name. Type: string (or Expression with resultType string). Required.
:paramtype class_name: JSON
:keyword jar_file_path: Jar path. Type: string (or Expression with resultType string).
Required.
:paramtype jar_file_path: JSON
:keyword jar_linked_service: Jar linked service reference.
:paramtype jar_linked_service: ~azure.mgmt.datafactory.models.LinkedServiceReference
:keyword jar_libs: Jar libs.
:paramtype jar_libs: list[JSON]
:keyword defines: Allows user to specify defines for the MapReduce job request.
:paramtype defines: dict[str, JSON]
"""
super().__init__(
additional_properties=additional_properties,
name=name,
description=description,
state=state,
on_inactive_mark_as=on_inactive_mark_as,
depends_on=depends_on,
user_properties=user_properties,
linked_service_name=linked_service_name,
policy=policy,
**kwargs
)
self.type: str = "HDInsightMapReduce"
self.storage_linked_services = storage_linked_services
self.arguments = arguments
self.get_debug_info = get_debug_info
self.class_name = class_name
self.jar_file_path = jar_file_path
self.jar_linked_service = jar_linked_service
self.jar_libs = jar_libs
self.defines = defines
|
class HDInsightMapReduceActivity(ExecutionActivity):
'''HDInsight MapReduce activity type.
All required parameters must be populated in order to send to server.
:ivar additional_properties: Unmatched properties from the message are deserialized to this
collection.
:vartype additional_properties: dict[str, JSON]
:ivar name: Activity name. Required.
:vartype name: str
:ivar type: Type of activity. Required.
:vartype type: str
:ivar description: Activity description.
:vartype description: str
:ivar state: Activity state. This is an optional property and if not provided, the state will
be Active by default. Known values are: "Active" and "Inactive".
:vartype state: str or ~azure.mgmt.datafactory.models.ActivityState
:ivar on_inactive_mark_as: Status result of the activity when the state is set to Inactive.
This is an optional property and if not provided when the activity is inactive, the status will
be Succeeded by default. Known values are: "Succeeded", "Failed", and "Skipped".
:vartype on_inactive_mark_as: str or ~azure.mgmt.datafactory.models.ActivityOnInactiveMarkAs
:ivar depends_on: Activity depends on condition.
:vartype depends_on: list[~azure.mgmt.datafactory.models.ActivityDependency]
:ivar user_properties: Activity user properties.
:vartype user_properties: list[~azure.mgmt.datafactory.models.UserProperty]
:ivar linked_service_name: Linked service reference.
:vartype linked_service_name: ~azure.mgmt.datafactory.models.LinkedServiceReference
:ivar policy: Activity policy.
:vartype policy: ~azure.mgmt.datafactory.models.ActivityPolicy
:ivar storage_linked_services: Storage linked service references.
:vartype storage_linked_services: list[~azure.mgmt.datafactory.models.LinkedServiceReference]
:ivar arguments: User specified arguments to HDInsightActivity.
:vartype arguments: list[JSON]
:ivar get_debug_info: Debug info option. Known values are: "None", "Always", and "Failure".
:vartype get_debug_info: str or ~azure.mgmt.datafactory.models.HDInsightActivityDebugInfoOption
:ivar class_name: Class name. Type: string (or Expression with resultType string). Required.
:vartype class_name: JSON
:ivar jar_file_path: Jar path. Type: string (or Expression with resultType string). Required.
:vartype jar_file_path: JSON
:ivar jar_linked_service: Jar linked service reference.
:vartype jar_linked_service: ~azure.mgmt.datafactory.models.LinkedServiceReference
:ivar jar_libs: Jar libs.
:vartype jar_libs: list[JSON]
:ivar defines: Allows user to specify defines for the MapReduce job request.
:vartype defines: dict[str, JSON]
'''
def __init__(
self,
*,
name: str,
class_name: JSON,
jar_file_path: JSON,
additional_properties: Optional[Dict[str, JSON]] = None,
description: Optional[str] = None,
state: Optional[Union[str, "_models.ActivityState"]] = None,
on_inactive_mark_as: Optional[Union[str, "_models.ActivityOnInactiveMarkAs"]] = None,
depends_on: Optional[List["_models.ActivityDependency"]] = None,
user_properties: Optional[List["_models.UserProperty"]] = None,
linked_service_name: Optional["_models.LinkedServiceReference"] = None,
policy: Optional["_models.ActivityPolicy"] = None,
storage_linked_services: Optional[List["_models.LinkedServiceReference"]] = None,
arguments: Optional[List[JSON]] = None,
get_debug_info: Optional[Union[str, "_models.HDInsightActivityDebugInfoOption"]] = None,
jar_linked_service: Optional["_models.LinkedServiceReference"] = None,
jar_libs: Optional[List[JSON]] = None,
defines: Optional[Dict[str, JSON]] = None,
**kwargs: Any
) -> None:
'''
:keyword additional_properties: Unmatched properties from the message are deserialized to this
collection.
:paramtype additional_properties: dict[str, JSON]
:keyword name: Activity name. Required.
:paramtype name: str
:keyword description: Activity description.
:paramtype description: str
:keyword state: Activity state. This is an optional property and if not provided, the state
will be Active by default. Known values are: "Active" and "Inactive".
:paramtype state: str or ~azure.mgmt.datafactory.models.ActivityState
:keyword on_inactive_mark_as: Status result of the activity when the state is set to Inactive.
This is an optional property and if not provided when the activity is inactive, the status will
be Succeeded by default. Known values are: "Succeeded", "Failed", and "Skipped".
:paramtype on_inactive_mark_as: str or ~azure.mgmt.datafactory.models.ActivityOnInactiveMarkAs
:keyword depends_on: Activity depends on condition.
:paramtype depends_on: list[~azure.mgmt.datafactory.models.ActivityDependency]
:keyword user_properties: Activity user properties.
:paramtype user_properties: list[~azure.mgmt.datafactory.models.UserProperty]
:keyword linked_service_name: Linked service reference.
:paramtype linked_service_name: ~azure.mgmt.datafactory.models.LinkedServiceReference
:keyword policy: Activity policy.
:paramtype policy: ~azure.mgmt.datafactory.models.ActivityPolicy
:keyword storage_linked_services: Storage linked service references.
:paramtype storage_linked_services: list[~azure.mgmt.datafactory.models.LinkedServiceReference]
:keyword arguments: User specified arguments to HDInsightActivity.
:paramtype arguments: list[JSON]
:keyword get_debug_info: Debug info option. Known values are: "None", "Always", and "Failure".
:paramtype get_debug_info: str or
~azure.mgmt.datafactory.models.HDInsightActivityDebugInfoOption
:keyword class_name: Class name. Type: string (or Expression with resultType string). Required.
:paramtype class_name: JSON
:keyword jar_file_path: Jar path. Type: string (or Expression with resultType string).
Required.
:paramtype jar_file_path: JSON
:keyword jar_linked_service: Jar linked service reference.
:paramtype jar_linked_service: ~azure.mgmt.datafactory.models.LinkedServiceReference
:keyword jar_libs: Jar libs.
:paramtype jar_libs: list[JSON]
:keyword defines: Allows user to specify defines for the MapReduce job request.
:paramtype defines: dict[str, JSON]
'''
pass
| 2 | 2 | 85 | 0 | 43 | 42 | 1 | 1.21 | 1 | 3 | 0 | 0 | 1 | 9 | 1 | 18 | 160 | 5 | 70 | 34 | 47 | 85 | 14 | 13 | 12 | 1 | 4 | 0 | 1 |
10,885 |
Azure/azure-cli-extensions
|
src/datafactory/azext_datafactory/vendored_sdks/datafactory/models/_models_py3.py
|
azext_datafactory.vendored_sdks.datafactory.models._models_py3.HDInsightOnDemandLinkedService
|
class HDInsightOnDemandLinkedService(LinkedService):
"""HDInsight ondemand linked service.
All required parameters must be populated in order to send to server.
:ivar additional_properties: Unmatched properties from the message are deserialized to this
collection.
:vartype additional_properties: dict[str, JSON]
:ivar type: Type of linked service. Required.
:vartype type: str
:ivar version: Version of the linked service.
:vartype version: str
:ivar connect_via: The integration runtime reference.
:vartype connect_via: ~azure.mgmt.datafactory.models.IntegrationRuntimeReference
:ivar description: Linked service description.
:vartype description: str
:ivar parameters: Parameters for linked service.
:vartype parameters: dict[str, ~azure.mgmt.datafactory.models.ParameterSpecification]
:ivar annotations: List of tags that can be used for describing the linked service.
:vartype annotations: list[JSON]
:ivar cluster_size: Number of worker/data nodes in the cluster. Suggestion value: 4. Type: int
(or Expression with resultType int). Required.
:vartype cluster_size: JSON
:ivar time_to_live: The allowed idle time for the on-demand HDInsight cluster. Specifies how
long the on-demand HDInsight cluster stays alive after completion of an activity run if there
are no other active jobs in the cluster. The minimum value is 5 mins. Type: string (or
Expression with resultType string). Required.
:vartype time_to_live: JSON
:ivar version_type_properties_version: Version of the HDInsight cluster. Type: string (or
Expression with resultType string). Required.
:vartype version_type_properties_version: JSON
:ivar linked_service_name: Azure Storage linked service to be used by the on-demand cluster for
storing and processing data. Required.
:vartype linked_service_name: ~azure.mgmt.datafactory.models.LinkedServiceReference
:ivar host_subscription_id: The customer’s subscription to host the cluster. Type: string (or
Expression with resultType string). Required.
:vartype host_subscription_id: JSON
:ivar service_principal_id: The service principal id for the hostSubscriptionId. Type: string
(or Expression with resultType string).
:vartype service_principal_id: JSON
:ivar service_principal_key: The key for the service principal id.
:vartype service_principal_key: ~azure.mgmt.datafactory.models.SecretBase
:ivar tenant: The Tenant id/name to which the service principal belongs. Type: string (or
Expression with resultType string). Required.
:vartype tenant: JSON
:ivar cluster_resource_group: The resource group where the cluster belongs. Type: string (or
Expression with resultType string). Required.
:vartype cluster_resource_group: JSON
:ivar cluster_name_prefix: The prefix of cluster name, postfix will be distinct with timestamp.
Type: string (or Expression with resultType string).
:vartype cluster_name_prefix: JSON
:ivar cluster_user_name: The username to access the cluster. Type: string (or Expression with
resultType string).
:vartype cluster_user_name: JSON
:ivar cluster_password: The password to access the cluster.
:vartype cluster_password: ~azure.mgmt.datafactory.models.SecretBase
:ivar cluster_ssh_user_name: The username to SSH remotely connect to cluster’s node (for
Linux). Type: string (or Expression with resultType string).
:vartype cluster_ssh_user_name: JSON
:ivar cluster_ssh_password: The password to SSH remotely connect cluster’s node (for Linux).
:vartype cluster_ssh_password: ~azure.mgmt.datafactory.models.SecretBase
:ivar additional_linked_service_names: Specifies additional storage accounts for the HDInsight
linked service so that the Data Factory service can register them on your behalf.
:vartype additional_linked_service_names:
list[~azure.mgmt.datafactory.models.LinkedServiceReference]
:ivar hcatalog_linked_service_name: The name of Azure SQL linked service that point to the
HCatalog database. The on-demand HDInsight cluster is created by using the Azure SQL database
as the metastore.
:vartype hcatalog_linked_service_name: ~azure.mgmt.datafactory.models.LinkedServiceReference
:ivar cluster_type: The cluster type. Type: string (or Expression with resultType string).
:vartype cluster_type: JSON
:ivar spark_version: The version of spark if the cluster type is 'spark'. Type: string (or
Expression with resultType string).
:vartype spark_version: JSON
:ivar core_configuration: Specifies the core configuration parameters (as in core-site.xml) for
the HDInsight cluster to be created.
:vartype core_configuration: JSON
:ivar h_base_configuration: Specifies the HBase configuration parameters (hbase-site.xml) for
the HDInsight cluster.
:vartype h_base_configuration: JSON
:ivar hdfs_configuration: Specifies the HDFS configuration parameters (hdfs-site.xml) for the
HDInsight cluster.
:vartype hdfs_configuration: JSON
:ivar hive_configuration: Specifies the hive configuration parameters (hive-site.xml) for the
HDInsight cluster.
:vartype hive_configuration: JSON
:ivar map_reduce_configuration: Specifies the MapReduce configuration parameters
(mapred-site.xml) for the HDInsight cluster.
:vartype map_reduce_configuration: JSON
:ivar oozie_configuration: Specifies the Oozie configuration parameters (oozie-site.xml) for
the HDInsight cluster.
:vartype oozie_configuration: JSON
:ivar storm_configuration: Specifies the Storm configuration parameters (storm-site.xml) for
the HDInsight cluster.
:vartype storm_configuration: JSON
:ivar yarn_configuration: Specifies the Yarn configuration parameters (yarn-site.xml) for the
HDInsight cluster.
:vartype yarn_configuration: JSON
:ivar encrypted_credential: The encrypted credential used for authentication. Credentials are
encrypted using the integration runtime credential manager. Type: string.
:vartype encrypted_credential: str
:ivar head_node_size: Specifies the size of the head node for the HDInsight cluster.
:vartype head_node_size: JSON
:ivar data_node_size: Specifies the size of the data node for the HDInsight cluster.
:vartype data_node_size: JSON
:ivar zookeeper_node_size: Specifies the size of the Zoo Keeper node for the HDInsight cluster.
:vartype zookeeper_node_size: JSON
:ivar script_actions: Custom script actions to run on HDI ondemand cluster once it's up. Please
refer to
https://docs.microsoft.com/en-us/azure/hdinsight/hdinsight-hadoop-customize-cluster-linux?toc=%2Fen-us%2Fazure%2Fhdinsight%2Fr-server%2FTOC.json&bc=%2Fen-us%2Fazure%2Fbread%2Ftoc.json#understanding-script-actions. # pylint: disable=line-too-long
:vartype script_actions: list[~azure.mgmt.datafactory.models.ScriptAction]
:ivar virtual_network_id: The ARM resource ID for the vNet to which the cluster should be
joined after creation. Type: string (or Expression with resultType string).
:vartype virtual_network_id: JSON
:ivar subnet_name: The ARM resource ID for the subnet in the vNet. If virtualNetworkId was
specified, then this property is required. Type: string (or Expression with resultType string).
:vartype subnet_name: JSON
:ivar credential: The credential reference containing authentication information.
:vartype credential: ~azure.mgmt.datafactory.models.CredentialReference
"""
_validation = {
"type": {"required": True},
"cluster_size": {"required": True},
"time_to_live": {"required": True},
"version_type_properties_version": {"required": True},
"linked_service_name": {"required": True},
"host_subscription_id": {"required": True},
"tenant": {"required": True},
"cluster_resource_group": {"required": True},
}
_attribute_map = {
"additional_properties": {"key": "", "type": "{object}"},
"type": {"key": "type", "type": "str"},
"version": {"key": "version", "type": "str"},
"connect_via": {"key": "connectVia", "type": "IntegrationRuntimeReference"},
"description": {"key": "description", "type": "str"},
"parameters": {"key": "parameters", "type": "{ParameterSpecification}"},
"annotations": {"key": "annotations", "type": "[object]"},
"cluster_size": {"key": "typeProperties.clusterSize", "type": "object"},
"time_to_live": {"key": "typeProperties.timeToLive", "type": "object"},
"version_type_properties_version": {"key": "typeProperties.version", "type": "object"},
"linked_service_name": {"key": "typeProperties.linkedServiceName", "type": "LinkedServiceReference"},
"host_subscription_id": {"key": "typeProperties.hostSubscriptionId", "type": "object"},
"service_principal_id": {"key": "typeProperties.servicePrincipalId", "type": "object"},
"service_principal_key": {"key": "typeProperties.servicePrincipalKey", "type": "SecretBase"},
"tenant": {"key": "typeProperties.tenant", "type": "object"},
"cluster_resource_group": {"key": "typeProperties.clusterResourceGroup", "type": "object"},
"cluster_name_prefix": {"key": "typeProperties.clusterNamePrefix", "type": "object"},
"cluster_user_name": {"key": "typeProperties.clusterUserName", "type": "object"},
"cluster_password": {"key": "typeProperties.clusterPassword", "type": "SecretBase"},
"cluster_ssh_user_name": {"key": "typeProperties.clusterSshUserName", "type": "object"},
"cluster_ssh_password": {"key": "typeProperties.clusterSshPassword", "type": "SecretBase"},
"additional_linked_service_names": {
"key": "typeProperties.additionalLinkedServiceNames",
"type": "[LinkedServiceReference]",
},
"hcatalog_linked_service_name": {
"key": "typeProperties.hcatalogLinkedServiceName",
"type": "LinkedServiceReference",
},
"cluster_type": {"key": "typeProperties.clusterType", "type": "object"},
"spark_version": {"key": "typeProperties.sparkVersion", "type": "object"},
"core_configuration": {"key": "typeProperties.coreConfiguration", "type": "object"},
"h_base_configuration": {"key": "typeProperties.hBaseConfiguration", "type": "object"},
"hdfs_configuration": {"key": "typeProperties.hdfsConfiguration", "type": "object"},
"hive_configuration": {"key": "typeProperties.hiveConfiguration", "type": "object"},
"map_reduce_configuration": {"key": "typeProperties.mapReduceConfiguration", "type": "object"},
"oozie_configuration": {"key": "typeProperties.oozieConfiguration", "type": "object"},
"storm_configuration": {"key": "typeProperties.stormConfiguration", "type": "object"},
"yarn_configuration": {"key": "typeProperties.yarnConfiguration", "type": "object"},
"encrypted_credential": {"key": "typeProperties.encryptedCredential", "type": "str"},
"head_node_size": {"key": "typeProperties.headNodeSize", "type": "object"},
"data_node_size": {"key": "typeProperties.dataNodeSize", "type": "object"},
"zookeeper_node_size": {"key": "typeProperties.zookeeperNodeSize", "type": "object"},
"script_actions": {"key": "typeProperties.scriptActions", "type": "[ScriptAction]"},
"virtual_network_id": {"key": "typeProperties.virtualNetworkId", "type": "object"},
"subnet_name": {"key": "typeProperties.subnetName", "type": "object"},
"credential": {"key": "typeProperties.credential", "type": "CredentialReference"},
}
def __init__( # pylint: disable=too-many-locals
self,
*,
cluster_size: JSON,
time_to_live: JSON,
version_type_properties_version: JSON,
linked_service_name: "_models.LinkedServiceReference",
host_subscription_id: JSON,
tenant: JSON,
cluster_resource_group: JSON,
additional_properties: Optional[Dict[str, JSON]] = None,
version: Optional[str] = None,
connect_via: Optional["_models.IntegrationRuntimeReference"] = None,
description: Optional[str] = None,
parameters: Optional[Dict[str, "_models.ParameterSpecification"]] = None,
annotations: Optional[List[JSON]] = None,
service_principal_id: Optional[JSON] = None,
service_principal_key: Optional["_models.SecretBase"] = None,
cluster_name_prefix: Optional[JSON] = None,
cluster_user_name: Optional[JSON] = None,
cluster_password: Optional["_models.SecretBase"] = None,
cluster_ssh_user_name: Optional[JSON] = None,
cluster_ssh_password: Optional["_models.SecretBase"] = None,
additional_linked_service_names: Optional[List["_models.LinkedServiceReference"]] = None,
hcatalog_linked_service_name: Optional["_models.LinkedServiceReference"] = None,
cluster_type: Optional[JSON] = None,
spark_version: Optional[JSON] = None,
core_configuration: Optional[JSON] = None,
h_base_configuration: Optional[JSON] = None,
hdfs_configuration: Optional[JSON] = None,
hive_configuration: Optional[JSON] = None,
map_reduce_configuration: Optional[JSON] = None,
oozie_configuration: Optional[JSON] = None,
storm_configuration: Optional[JSON] = None,
yarn_configuration: Optional[JSON] = None,
encrypted_credential: Optional[str] = None,
head_node_size: Optional[JSON] = None,
data_node_size: Optional[JSON] = None,
zookeeper_node_size: Optional[JSON] = None,
script_actions: Optional[List["_models.ScriptAction"]] = None,
virtual_network_id: Optional[JSON] = None,
subnet_name: Optional[JSON] = None,
credential: Optional["_models.CredentialReference"] = None,
**kwargs: Any
) -> None:
"""
:keyword additional_properties: Unmatched properties from the message are deserialized to this
collection.
:paramtype additional_properties: dict[str, JSON]
:keyword version: Version of the linked service.
:paramtype version: str
:keyword connect_via: The integration runtime reference.
:paramtype connect_via: ~azure.mgmt.datafactory.models.IntegrationRuntimeReference
:keyword description: Linked service description.
:paramtype description: str
:keyword parameters: Parameters for linked service.
:paramtype parameters: dict[str, ~azure.mgmt.datafactory.models.ParameterSpecification]
:keyword annotations: List of tags that can be used for describing the linked service.
:paramtype annotations: list[JSON]
:keyword cluster_size: Number of worker/data nodes in the cluster. Suggestion value: 4. Type:
int (or Expression with resultType int). Required.
:paramtype cluster_size: JSON
:keyword time_to_live: The allowed idle time for the on-demand HDInsight cluster. Specifies how
long the on-demand HDInsight cluster stays alive after completion of an activity run if there
are no other active jobs in the cluster. The minimum value is 5 mins. Type: string (or
Expression with resultType string). Required.
:paramtype time_to_live: JSON
:keyword version_type_properties_version: Version of the HDInsight cluster. Type: string (or
Expression with resultType string). Required.
:paramtype version_type_properties_version: JSON
:keyword linked_service_name: Azure Storage linked service to be used by the on-demand cluster
for storing and processing data. Required.
:paramtype linked_service_name: ~azure.mgmt.datafactory.models.LinkedServiceReference
:keyword host_subscription_id: The customer’s subscription to host the cluster. Type: string
(or Expression with resultType string). Required.
:paramtype host_subscription_id: JSON
:keyword service_principal_id: The service principal id for the hostSubscriptionId. Type:
string (or Expression with resultType string).
:paramtype service_principal_id: JSON
:keyword service_principal_key: The key for the service principal id.
:paramtype service_principal_key: ~azure.mgmt.datafactory.models.SecretBase
:keyword tenant: The Tenant id/name to which the service principal belongs. Type: string (or
Expression with resultType string). Required.
:paramtype tenant: JSON
:keyword cluster_resource_group: The resource group where the cluster belongs. Type: string (or
Expression with resultType string). Required.
:paramtype cluster_resource_group: JSON
:keyword cluster_name_prefix: The prefix of cluster name, postfix will be distinct with
timestamp. Type: string (or Expression with resultType string).
:paramtype cluster_name_prefix: JSON
:keyword cluster_user_name: The username to access the cluster. Type: string (or Expression
with resultType string).
:paramtype cluster_user_name: JSON
:keyword cluster_password: The password to access the cluster.
:paramtype cluster_password: ~azure.mgmt.datafactory.models.SecretBase
:keyword cluster_ssh_user_name: The username to SSH remotely connect to cluster’s node (for
Linux). Type: string (or Expression with resultType string).
:paramtype cluster_ssh_user_name: JSON
:keyword cluster_ssh_password: The password to SSH remotely connect cluster’s node (for Linux).
:paramtype cluster_ssh_password: ~azure.mgmt.datafactory.models.SecretBase
:keyword additional_linked_service_names: Specifies additional storage accounts for the
HDInsight linked service so that the Data Factory service can register them on your behalf.
:paramtype additional_linked_service_names:
list[~azure.mgmt.datafactory.models.LinkedServiceReference]
:keyword hcatalog_linked_service_name: The name of Azure SQL linked service that point to the
HCatalog database. The on-demand HDInsight cluster is created by using the Azure SQL database
as the metastore.
:paramtype hcatalog_linked_service_name: ~azure.mgmt.datafactory.models.LinkedServiceReference
:keyword cluster_type: The cluster type. Type: string (or Expression with resultType string).
:paramtype cluster_type: JSON
:keyword spark_version: The version of spark if the cluster type is 'spark'. Type: string (or
Expression with resultType string).
:paramtype spark_version: JSON
:keyword core_configuration: Specifies the core configuration parameters (as in core-site.xml)
for the HDInsight cluster to be created.
:paramtype core_configuration: JSON
:keyword h_base_configuration: Specifies the HBase configuration parameters (hbase-site.xml)
for the HDInsight cluster.
:paramtype h_base_configuration: JSON
:keyword hdfs_configuration: Specifies the HDFS configuration parameters (hdfs-site.xml) for
the HDInsight cluster.
:paramtype hdfs_configuration: JSON
:keyword hive_configuration: Specifies the hive configuration parameters (hive-site.xml) for
the HDInsight cluster.
:paramtype hive_configuration: JSON
:keyword map_reduce_configuration: Specifies the MapReduce configuration parameters
(mapred-site.xml) for the HDInsight cluster.
:paramtype map_reduce_configuration: JSON
:keyword oozie_configuration: Specifies the Oozie configuration parameters (oozie-site.xml) for
the HDInsight cluster.
:paramtype oozie_configuration: JSON
:keyword storm_configuration: Specifies the Storm configuration parameters (storm-site.xml) for
the HDInsight cluster.
:paramtype storm_configuration: JSON
:keyword yarn_configuration: Specifies the Yarn configuration parameters (yarn-site.xml) for
the HDInsight cluster.
:paramtype yarn_configuration: JSON
:keyword encrypted_credential: The encrypted credential used for authentication. Credentials
are encrypted using the integration runtime credential manager. Type: string.
:paramtype encrypted_credential: str
:keyword head_node_size: Specifies the size of the head node for the HDInsight cluster.
:paramtype head_node_size: JSON
:keyword data_node_size: Specifies the size of the data node for the HDInsight cluster.
:paramtype data_node_size: JSON
:keyword zookeeper_node_size: Specifies the size of the Zoo Keeper node for the HDInsight
cluster.
:paramtype zookeeper_node_size: JSON
:keyword script_actions: Custom script actions to run on HDI ondemand cluster once it's up.
Please refer to
https://docs.microsoft.com/en-us/azure/hdinsight/hdinsight-hadoop-customize-cluster-linux?toc=%2Fen-us%2Fazure%2Fhdinsight%2Fr-server%2FTOC.json&bc=%2Fen-us%2Fazure%2Fbread%2Ftoc.json#understanding-script-actions. # pylint: disable=line-too-long
:paramtype script_actions: list[~azure.mgmt.datafactory.models.ScriptAction]
:keyword virtual_network_id: The ARM resource ID for the vNet to which the cluster should be
joined after creation. Type: string (or Expression with resultType string).
:paramtype virtual_network_id: JSON
:keyword subnet_name: The ARM resource ID for the subnet in the vNet. If virtualNetworkId was
specified, then this property is required. Type: string (or Expression with resultType string).
:paramtype subnet_name: JSON
:keyword credential: The credential reference containing authentication information.
:paramtype credential: ~azure.mgmt.datafactory.models.CredentialReference
"""
super().__init__(
additional_properties=additional_properties,
version=version,
connect_via=connect_via,
description=description,
parameters=parameters,
annotations=annotations,
**kwargs
)
self.type: str = "HDInsightOnDemand"
self.cluster_size = cluster_size
self.time_to_live = time_to_live
self.version_type_properties_version = version_type_properties_version
self.linked_service_name = linked_service_name
self.host_subscription_id = host_subscription_id
self.service_principal_id = service_principal_id
self.service_principal_key = service_principal_key
self.tenant = tenant
self.cluster_resource_group = cluster_resource_group
self.cluster_name_prefix = cluster_name_prefix
self.cluster_user_name = cluster_user_name
self.cluster_password = cluster_password
self.cluster_ssh_user_name = cluster_ssh_user_name
self.cluster_ssh_password = cluster_ssh_password
self.additional_linked_service_names = additional_linked_service_names
self.hcatalog_linked_service_name = hcatalog_linked_service_name
self.cluster_type = cluster_type
self.spark_version = spark_version
self.core_configuration = core_configuration
self.h_base_configuration = h_base_configuration
self.hdfs_configuration = hdfs_configuration
self.hive_configuration = hive_configuration
self.map_reduce_configuration = map_reduce_configuration
self.oozie_configuration = oozie_configuration
self.storm_configuration = storm_configuration
self.yarn_configuration = yarn_configuration
self.encrypted_credential = encrypted_credential
self.head_node_size = head_node_size
self.data_node_size = data_node_size
self.zookeeper_node_size = zookeeper_node_size
self.script_actions = script_actions
self.virtual_network_id = virtual_network_id
self.subnet_name = subnet_name
self.credential = credential
|
class HDInsightOnDemandLinkedService(LinkedService):
'''HDInsight ondemand linked service.
All required parameters must be populated in order to send to server.
:ivar additional_properties: Unmatched properties from the message are deserialized to this
collection.
:vartype additional_properties: dict[str, JSON]
:ivar type: Type of linked service. Required.
:vartype type: str
:ivar version: Version of the linked service.
:vartype version: str
:ivar connect_via: The integration runtime reference.
:vartype connect_via: ~azure.mgmt.datafactory.models.IntegrationRuntimeReference
:ivar description: Linked service description.
:vartype description: str
:ivar parameters: Parameters for linked service.
:vartype parameters: dict[str, ~azure.mgmt.datafactory.models.ParameterSpecification]
:ivar annotations: List of tags that can be used for describing the linked service.
:vartype annotations: list[JSON]
:ivar cluster_size: Number of worker/data nodes in the cluster. Suggestion value: 4. Type: int
(or Expression with resultType int). Required.
:vartype cluster_size: JSON
:ivar time_to_live: The allowed idle time for the on-demand HDInsight cluster. Specifies how
long the on-demand HDInsight cluster stays alive after completion of an activity run if there
are no other active jobs in the cluster. The minimum value is 5 mins. Type: string (or
Expression with resultType string). Required.
:vartype time_to_live: JSON
:ivar version_type_properties_version: Version of the HDInsight cluster. Type: string (or
Expression with resultType string). Required.
:vartype version_type_properties_version: JSON
:ivar linked_service_name: Azure Storage linked service to be used by the on-demand cluster for
storing and processing data. Required.
:vartype linked_service_name: ~azure.mgmt.datafactory.models.LinkedServiceReference
:ivar host_subscription_id: The customer’s subscription to host the cluster. Type: string (or
Expression with resultType string). Required.
:vartype host_subscription_id: JSON
:ivar service_principal_id: The service principal id for the hostSubscriptionId. Type: string
(or Expression with resultType string).
:vartype service_principal_id: JSON
:ivar service_principal_key: The key for the service principal id.
:vartype service_principal_key: ~azure.mgmt.datafactory.models.SecretBase
:ivar tenant: The Tenant id/name to which the service principal belongs. Type: string (or
Expression with resultType string). Required.
:vartype tenant: JSON
:ivar cluster_resource_group: The resource group where the cluster belongs. Type: string (or
Expression with resultType string). Required.
:vartype cluster_resource_group: JSON
:ivar cluster_name_prefix: The prefix of cluster name, postfix will be distinct with timestamp.
Type: string (or Expression with resultType string).
:vartype cluster_name_prefix: JSON
:ivar cluster_user_name: The username to access the cluster. Type: string (or Expression with
resultType string).
:vartype cluster_user_name: JSON
:ivar cluster_password: The password to access the cluster.
:vartype cluster_password: ~azure.mgmt.datafactory.models.SecretBase
:ivar cluster_ssh_user_name: The username to SSH remotely connect to cluster’s node (for
Linux). Type: string (or Expression with resultType string).
:vartype cluster_ssh_user_name: JSON
:ivar cluster_ssh_password: The password to SSH remotely connect cluster’s node (for Linux).
:vartype cluster_ssh_password: ~azure.mgmt.datafactory.models.SecretBase
:ivar additional_linked_service_names: Specifies additional storage accounts for the HDInsight
linked service so that the Data Factory service can register them on your behalf.
:vartype additional_linked_service_names:
list[~azure.mgmt.datafactory.models.LinkedServiceReference]
:ivar hcatalog_linked_service_name: The name of Azure SQL linked service that point to the
HCatalog database. The on-demand HDInsight cluster is created by using the Azure SQL database
as the metastore.
:vartype hcatalog_linked_service_name: ~azure.mgmt.datafactory.models.LinkedServiceReference
:ivar cluster_type: The cluster type. Type: string (or Expression with resultType string).
:vartype cluster_type: JSON
:ivar spark_version: The version of spark if the cluster type is 'spark'. Type: string (or
Expression with resultType string).
:vartype spark_version: JSON
:ivar core_configuration: Specifies the core configuration parameters (as in core-site.xml) for
the HDInsight cluster to be created.
:vartype core_configuration: JSON
:ivar h_base_configuration: Specifies the HBase configuration parameters (hbase-site.xml) for
the HDInsight cluster.
:vartype h_base_configuration: JSON
:ivar hdfs_configuration: Specifies the HDFS configuration parameters (hdfs-site.xml) for the
HDInsight cluster.
:vartype hdfs_configuration: JSON
:ivar hive_configuration: Specifies the hive configuration parameters (hive-site.xml) for the
HDInsight cluster.
:vartype hive_configuration: JSON
:ivar map_reduce_configuration: Specifies the MapReduce configuration parameters
(mapred-site.xml) for the HDInsight cluster.
:vartype map_reduce_configuration: JSON
:ivar oozie_configuration: Specifies the Oozie configuration parameters (oozie-site.xml) for
the HDInsight cluster.
:vartype oozie_configuration: JSON
:ivar storm_configuration: Specifies the Storm configuration parameters (storm-site.xml) for
the HDInsight cluster.
:vartype storm_configuration: JSON
:ivar yarn_configuration: Specifies the Yarn configuration parameters (yarn-site.xml) for the
HDInsight cluster.
:vartype yarn_configuration: JSON
:ivar encrypted_credential: The encrypted credential used for authentication. Credentials are
encrypted using the integration runtime credential manager. Type: string.
:vartype encrypted_credential: str
:ivar head_node_size: Specifies the size of the head node for the HDInsight cluster.
:vartype head_node_size: JSON
:ivar data_node_size: Specifies the size of the data node for the HDInsight cluster.
:vartype data_node_size: JSON
:ivar zookeeper_node_size: Specifies the size of the Zoo Keeper node for the HDInsight cluster.
:vartype zookeeper_node_size: JSON
:ivar script_actions: Custom script actions to run on HDI ondemand cluster once it's up. Please
refer to
https://docs.microsoft.com/en-us/azure/hdinsight/hdinsight-hadoop-customize-cluster-linux?toc=%2Fen-us%2Fazure%2Fhdinsight%2Fr-server%2FTOC.json&bc=%2Fen-us%2Fazure%2Fbread%2Ftoc.json#understanding-script-actions. # pylint: disable=line-too-long
:vartype script_actions: list[~azure.mgmt.datafactory.models.ScriptAction]
:ivar virtual_network_id: The ARM resource ID for the vNet to which the cluster should be
joined after creation. Type: string (or Expression with resultType string).
:vartype virtual_network_id: JSON
:ivar subnet_name: The ARM resource ID for the subnet in the vNet. If virtualNetworkId was
specified, then this property is required. Type: string (or Expression with resultType string).
:vartype subnet_name: JSON
:ivar credential: The credential reference containing authentication information.
:vartype credential: ~azure.mgmt.datafactory.models.CredentialReference
'''
def __init__( # pylint: disable=too-many-locals
self,
*,
cluster_size: JSON,
time_to_live: JSON,
version_type_properties_version: JSON,
linked_service_name: "_models.LinkedServiceReference",
host_subscription_id: JSON,
tenant: JSON,
cluster_resource_group: JSON,
additional_properties: Optional[Dict[str, JSON]] = None,
version: Optional[str] = None,
connect_via: Optional["_models.IntegrationRuntimeReference"] = None,
description: Optional[str] = None,
parameters: Optional[Dict[str, "_models.ParameterSpecification"]] = None,
annotations: Optional[List[JSON]] = None,
service_principal_id: Optional[JSON] = None,
service_principal_key: Optional["_models.SecretBase"] = None,
cluster_name_prefix: Optional[JSON] = None,
cluster_user_name: Optional[JSON] = None,
cluster_password: Optional["_models.SecretBase"] = None,
cluster_ssh_user_name: Optional[JSON] = None,
cluster_ssh_password: Optional["_models.SecretBase"] = None,
additional_linked_service_names: Optional[List["_models.LinkedServiceReference"]] = None,
hcatalog_linked_service_name: Optional["_models.LinkedServiceReference"] = None,
cluster_type: Optional[JSON] = None,
spark_version: Optional[JSON] = None,
core_configuration: Optional[JSON] = None,
h_base_configuration: Optional[JSON] = None,
hdfs_configuration: Optional[JSON] = None,
hive_configuration: Optional[JSON] = None,
map_reduce_configuration: Optional[JSON] = None,
oozie_configuration: Optional[JSON] = None,
storm_configuration: Optional[JSON] = None,
yarn_configuration: Optional[JSON] = None,
encrypted_credential: Optional[str] = None,
head_node_size: Optional[JSON] = None,
data_node_size: Optional[JSON] = None,
zookeeper_node_size: Optional[JSON] = None,
script_actions: Optional[List["_models.ScriptAction"]] = None,
virtual_network_id: Optional[JSON] = None,
subnet_name: Optional[JSON] = None,
credential: Optional["_models.CredentialReference"] = None,
**kwargs: Any
) -> None:
'''
:keyword additional_properties: Unmatched properties from the message are deserialized to this
collection.
:paramtype additional_properties: dict[str, JSON]
:keyword version: Version of the linked service.
:paramtype version: str
:keyword connect_via: The integration runtime reference.
:paramtype connect_via: ~azure.mgmt.datafactory.models.IntegrationRuntimeReference
:keyword description: Linked service description.
:paramtype description: str
:keyword parameters: Parameters for linked service.
:paramtype parameters: dict[str, ~azure.mgmt.datafactory.models.ParameterSpecification]
:keyword annotations: List of tags that can be used for describing the linked service.
:paramtype annotations: list[JSON]
:keyword cluster_size: Number of worker/data nodes in the cluster. Suggestion value: 4. Type:
int (or Expression with resultType int). Required.
:paramtype cluster_size: JSON
:keyword time_to_live: The allowed idle time for the on-demand HDInsight cluster. Specifies how
long the on-demand HDInsight cluster stays alive after completion of an activity run if there
are no other active jobs in the cluster. The minimum value is 5 mins. Type: string (or
Expression with resultType string). Required.
:paramtype time_to_live: JSON
:keyword version_type_properties_version: Version of the HDInsight cluster. Type: string (or
Expression with resultType string). Required.
:paramtype version_type_properties_version: JSON
:keyword linked_service_name: Azure Storage linked service to be used by the on-demand cluster
for storing and processing data. Required.
:paramtype linked_service_name: ~azure.mgmt.datafactory.models.LinkedServiceReference
:keyword host_subscription_id: The customer’s subscription to host the cluster. Type: string
(or Expression with resultType string). Required.
:paramtype host_subscription_id: JSON
:keyword service_principal_id: The service principal id for the hostSubscriptionId. Type:
string (or Expression with resultType string).
:paramtype service_principal_id: JSON
:keyword service_principal_key: The key for the service principal id.
:paramtype service_principal_key: ~azure.mgmt.datafactory.models.SecretBase
:keyword tenant: The Tenant id/name to which the service principal belongs. Type: string (or
Expression with resultType string). Required.
:paramtype tenant: JSON
:keyword cluster_resource_group: The resource group where the cluster belongs. Type: string (or
Expression with resultType string). Required.
:paramtype cluster_resource_group: JSON
:keyword cluster_name_prefix: The prefix of cluster name, postfix will be distinct with
timestamp. Type: string (or Expression with resultType string).
:paramtype cluster_name_prefix: JSON
:keyword cluster_user_name: The username to access the cluster. Type: string (or Expression
with resultType string).
:paramtype cluster_user_name: JSON
:keyword cluster_password: The password to access the cluster.
:paramtype cluster_password: ~azure.mgmt.datafactory.models.SecretBase
:keyword cluster_ssh_user_name: The username to SSH remotely connect to cluster’s node (for
Linux). Type: string (or Expression with resultType string).
:paramtype cluster_ssh_user_name: JSON
:keyword cluster_ssh_password: The password to SSH remotely connect cluster’s node (for Linux).
:paramtype cluster_ssh_password: ~azure.mgmt.datafactory.models.SecretBase
:keyword additional_linked_service_names: Specifies additional storage accounts for the
HDInsight linked service so that the Data Factory service can register them on your behalf.
:paramtype additional_linked_service_names:
list[~azure.mgmt.datafactory.models.LinkedServiceReference]
:keyword hcatalog_linked_service_name: The name of Azure SQL linked service that point to the
HCatalog database. The on-demand HDInsight cluster is created by using the Azure SQL database
as the metastore.
:paramtype hcatalog_linked_service_name: ~azure.mgmt.datafactory.models.LinkedServiceReference
:keyword cluster_type: The cluster type. Type: string (or Expression with resultType string).
:paramtype cluster_type: JSON
:keyword spark_version: The version of spark if the cluster type is 'spark'. Type: string (or
Expression with resultType string).
:paramtype spark_version: JSON
:keyword core_configuration: Specifies the core configuration parameters (as in core-site.xml)
for the HDInsight cluster to be created.
:paramtype core_configuration: JSON
:keyword h_base_configuration: Specifies the HBase configuration parameters (hbase-site.xml)
for the HDInsight cluster.
:paramtype h_base_configuration: JSON
:keyword hdfs_configuration: Specifies the HDFS configuration parameters (hdfs-site.xml) for
the HDInsight cluster.
:paramtype hdfs_configuration: JSON
:keyword hive_configuration: Specifies the hive configuration parameters (hive-site.xml) for
the HDInsight cluster.
:paramtype hive_configuration: JSON
:keyword map_reduce_configuration: Specifies the MapReduce configuration parameters
(mapred-site.xml) for the HDInsight cluster.
:paramtype map_reduce_configuration: JSON
:keyword oozie_configuration: Specifies the Oozie configuration parameters (oozie-site.xml) for
the HDInsight cluster.
:paramtype oozie_configuration: JSON
:keyword storm_configuration: Specifies the Storm configuration parameters (storm-site.xml) for
the HDInsight cluster.
:paramtype storm_configuration: JSON
:keyword yarn_configuration: Specifies the Yarn configuration parameters (yarn-site.xml) for
the HDInsight cluster.
:paramtype yarn_configuration: JSON
:keyword encrypted_credential: The encrypted credential used for authentication. Credentials
are encrypted using the integration runtime credential manager. Type: string.
:paramtype encrypted_credential: str
:keyword head_node_size: Specifies the size of the head node for the HDInsight cluster.
:paramtype head_node_size: JSON
:keyword data_node_size: Specifies the size of the data node for the HDInsight cluster.
:paramtype data_node_size: JSON
:keyword zookeeper_node_size: Specifies the size of the Zoo Keeper node for the HDInsight
cluster.
:paramtype zookeeper_node_size: JSON
:keyword script_actions: Custom script actions to run on HDI ondemand cluster once it's up.
Please refer to
https://docs.microsoft.com/en-us/azure/hdinsight/hdinsight-hadoop-customize-cluster-linux?toc=%2Fen-us%2Fazure%2Fhdinsight%2Fr-server%2FTOC.json&bc=%2Fen-us%2Fazure%2Fbread%2Ftoc.json#understanding-script-actions. # pylint: disable=line-too-long
:paramtype script_actions: list[~azure.mgmt.datafactory.models.ScriptAction]
:keyword virtual_network_id: The ARM resource ID for the vNet to which the cluster should be
joined after creation. Type: string (or Expression with resultType string).
:paramtype virtual_network_id: JSON
:keyword subnet_name: The ARM resource ID for the subnet in the vNet. If virtualNetworkId was
specified, then this property is required. Type: string (or Expression with resultType string).
:paramtype subnet_name: JSON
:keyword credential: The credential reference containing authentication information.
:paramtype credential: ~azure.mgmt.datafactory.models.CredentialReference
'''
pass
| 2 | 2 | 204 | 0 | 89 | 116 | 1 | 1.56 | 1 | 3 | 0 | 0 | 1 | 35 | 1 | 17 | 386 | 5 | 149 | 83 | 103 | 233 | 40 | 39 | 38 | 1 | 3 | 0 | 1 |
10,886 |
Azure/azure-cli-extensions
|
src/datafactory/azext_datafactory/vendored_sdks/datafactory/models/_models_py3.py
|
azext_datafactory.vendored_sdks.datafactory.models._models_py3.HDInsightPigActivity
|
class HDInsightPigActivity(ExecutionActivity):
"""HDInsight Pig activity type.
All required parameters must be populated in order to send to server.
:ivar additional_properties: Unmatched properties from the message are deserialized to this
collection.
:vartype additional_properties: dict[str, JSON]
:ivar name: Activity name. Required.
:vartype name: str
:ivar type: Type of activity. Required.
:vartype type: str
:ivar description: Activity description.
:vartype description: str
:ivar state: Activity state. This is an optional property and if not provided, the state will
be Active by default. Known values are: "Active" and "Inactive".
:vartype state: str or ~azure.mgmt.datafactory.models.ActivityState
:ivar on_inactive_mark_as: Status result of the activity when the state is set to Inactive.
This is an optional property and if not provided when the activity is inactive, the status will
be Succeeded by default. Known values are: "Succeeded", "Failed", and "Skipped".
:vartype on_inactive_mark_as: str or ~azure.mgmt.datafactory.models.ActivityOnInactiveMarkAs
:ivar depends_on: Activity depends on condition.
:vartype depends_on: list[~azure.mgmt.datafactory.models.ActivityDependency]
:ivar user_properties: Activity user properties.
:vartype user_properties: list[~azure.mgmt.datafactory.models.UserProperty]
:ivar linked_service_name: Linked service reference.
:vartype linked_service_name: ~azure.mgmt.datafactory.models.LinkedServiceReference
:ivar policy: Activity policy.
:vartype policy: ~azure.mgmt.datafactory.models.ActivityPolicy
:ivar storage_linked_services: Storage linked service references.
:vartype storage_linked_services: list[~azure.mgmt.datafactory.models.LinkedServiceReference]
:ivar arguments: User specified arguments to HDInsightActivity. Type: array (or Expression with
resultType array).
:vartype arguments: JSON
:ivar get_debug_info: Debug info option. Known values are: "None", "Always", and "Failure".
:vartype get_debug_info: str or ~azure.mgmt.datafactory.models.HDInsightActivityDebugInfoOption
:ivar script_path: Script path. Type: string (or Expression with resultType string).
:vartype script_path: JSON
:ivar script_linked_service: Script linked service reference.
:vartype script_linked_service: ~azure.mgmt.datafactory.models.LinkedServiceReference
:ivar defines: Allows user to specify defines for Pig job request.
:vartype defines: dict[str, JSON]
"""
_validation = {
"name": {"required": True},
"type": {"required": True},
}
_attribute_map = {
"additional_properties": {"key": "", "type": "{object}"},
"name": {"key": "name", "type": "str"},
"type": {"key": "type", "type": "str"},
"description": {"key": "description", "type": "str"},
"state": {"key": "state", "type": "str"},
"on_inactive_mark_as": {"key": "onInactiveMarkAs", "type": "str"},
"depends_on": {"key": "dependsOn", "type": "[ActivityDependency]"},
"user_properties": {"key": "userProperties", "type": "[UserProperty]"},
"linked_service_name": {"key": "linkedServiceName", "type": "LinkedServiceReference"},
"policy": {"key": "policy", "type": "ActivityPolicy"},
"storage_linked_services": {"key": "typeProperties.storageLinkedServices", "type": "[LinkedServiceReference]"},
"arguments": {"key": "typeProperties.arguments", "type": "object"},
"get_debug_info": {"key": "typeProperties.getDebugInfo", "type": "str"},
"script_path": {"key": "typeProperties.scriptPath", "type": "object"},
"script_linked_service": {"key": "typeProperties.scriptLinkedService", "type": "LinkedServiceReference"},
"defines": {"key": "typeProperties.defines", "type": "{object}"},
}
def __init__(
self,
*,
name: str,
additional_properties: Optional[Dict[str, JSON]] = None,
description: Optional[str] = None,
state: Optional[Union[str, "_models.ActivityState"]] = None,
on_inactive_mark_as: Optional[Union[str, "_models.ActivityOnInactiveMarkAs"]] = None,
depends_on: Optional[List["_models.ActivityDependency"]] = None,
user_properties: Optional[List["_models.UserProperty"]] = None,
linked_service_name: Optional["_models.LinkedServiceReference"] = None,
policy: Optional["_models.ActivityPolicy"] = None,
storage_linked_services: Optional[List["_models.LinkedServiceReference"]] = None,
arguments: Optional[JSON] = None,
get_debug_info: Optional[Union[str, "_models.HDInsightActivityDebugInfoOption"]] = None,
script_path: Optional[JSON] = None,
script_linked_service: Optional["_models.LinkedServiceReference"] = None,
defines: Optional[Dict[str, JSON]] = None,
**kwargs: Any
) -> None:
"""
:keyword additional_properties: Unmatched properties from the message are deserialized to this
collection.
:paramtype additional_properties: dict[str, JSON]
:keyword name: Activity name. Required.
:paramtype name: str
:keyword description: Activity description.
:paramtype description: str
:keyword state: Activity state. This is an optional property and if not provided, the state
will be Active by default. Known values are: "Active" and "Inactive".
:paramtype state: str or ~azure.mgmt.datafactory.models.ActivityState
:keyword on_inactive_mark_as: Status result of the activity when the state is set to Inactive.
This is an optional property and if not provided when the activity is inactive, the status will
be Succeeded by default. Known values are: "Succeeded", "Failed", and "Skipped".
:paramtype on_inactive_mark_as: str or ~azure.mgmt.datafactory.models.ActivityOnInactiveMarkAs
:keyword depends_on: Activity depends on condition.
:paramtype depends_on: list[~azure.mgmt.datafactory.models.ActivityDependency]
:keyword user_properties: Activity user properties.
:paramtype user_properties: list[~azure.mgmt.datafactory.models.UserProperty]
:keyword linked_service_name: Linked service reference.
:paramtype linked_service_name: ~azure.mgmt.datafactory.models.LinkedServiceReference
:keyword policy: Activity policy.
:paramtype policy: ~azure.mgmt.datafactory.models.ActivityPolicy
:keyword storage_linked_services: Storage linked service references.
:paramtype storage_linked_services: list[~azure.mgmt.datafactory.models.LinkedServiceReference]
:keyword arguments: User specified arguments to HDInsightActivity. Type: array (or Expression
with resultType array).
:paramtype arguments: JSON
:keyword get_debug_info: Debug info option. Known values are: "None", "Always", and "Failure".
:paramtype get_debug_info: str or
~azure.mgmt.datafactory.models.HDInsightActivityDebugInfoOption
:keyword script_path: Script path. Type: string (or Expression with resultType string).
:paramtype script_path: JSON
:keyword script_linked_service: Script linked service reference.
:paramtype script_linked_service: ~azure.mgmt.datafactory.models.LinkedServiceReference
:keyword defines: Allows user to specify defines for Pig job request.
:paramtype defines: dict[str, JSON]
"""
super().__init__(
additional_properties=additional_properties,
name=name,
description=description,
state=state,
on_inactive_mark_as=on_inactive_mark_as,
depends_on=depends_on,
user_properties=user_properties,
linked_service_name=linked_service_name,
policy=policy,
**kwargs
)
self.type: str = "HDInsightPig"
self.storage_linked_services = storage_linked_services
self.arguments = arguments
self.get_debug_info = get_debug_info
self.script_path = script_path
self.script_linked_service = script_linked_service
self.defines = defines
|
class HDInsightPigActivity(ExecutionActivity):
'''HDInsight Pig activity type.
All required parameters must be populated in order to send to server.
:ivar additional_properties: Unmatched properties from the message are deserialized to this
collection.
:vartype additional_properties: dict[str, JSON]
:ivar name: Activity name. Required.
:vartype name: str
:ivar type: Type of activity. Required.
:vartype type: str
:ivar description: Activity description.
:vartype description: str
:ivar state: Activity state. This is an optional property and if not provided, the state will
be Active by default. Known values are: "Active" and "Inactive".
:vartype state: str or ~azure.mgmt.datafactory.models.ActivityState
:ivar on_inactive_mark_as: Status result of the activity when the state is set to Inactive.
This is an optional property and if not provided when the activity is inactive, the status will
be Succeeded by default. Known values are: "Succeeded", "Failed", and "Skipped".
:vartype on_inactive_mark_as: str or ~azure.mgmt.datafactory.models.ActivityOnInactiveMarkAs
:ivar depends_on: Activity depends on condition.
:vartype depends_on: list[~azure.mgmt.datafactory.models.ActivityDependency]
:ivar user_properties: Activity user properties.
:vartype user_properties: list[~azure.mgmt.datafactory.models.UserProperty]
:ivar linked_service_name: Linked service reference.
:vartype linked_service_name: ~azure.mgmt.datafactory.models.LinkedServiceReference
:ivar policy: Activity policy.
:vartype policy: ~azure.mgmt.datafactory.models.ActivityPolicy
:ivar storage_linked_services: Storage linked service references.
:vartype storage_linked_services: list[~azure.mgmt.datafactory.models.LinkedServiceReference]
:ivar arguments: User specified arguments to HDInsightActivity. Type: array (or Expression with
resultType array).
:vartype arguments: JSON
:ivar get_debug_info: Debug info option. Known values are: "None", "Always", and "Failure".
:vartype get_debug_info: str or ~azure.mgmt.datafactory.models.HDInsightActivityDebugInfoOption
:ivar script_path: Script path. Type: string (or Expression with resultType string).
:vartype script_path: JSON
:ivar script_linked_service: Script linked service reference.
:vartype script_linked_service: ~azure.mgmt.datafactory.models.LinkedServiceReference
:ivar defines: Allows user to specify defines for Pig job request.
:vartype defines: dict[str, JSON]
'''
def __init__(
self,
*,
name: str,
additional_properties: Optional[Dict[str, JSON]] = None,
description: Optional[str] = None,
state: Optional[Union[str, "_models.ActivityState"]] = None,
on_inactive_mark_as: Optional[Union[str, "_models.ActivityOnInactiveMarkAs"]] = None,
depends_on: Optional[List["_models.ActivityDependency"]] = None,
user_properties: Optional[List["_models.UserProperty"]] = None,
linked_service_name: Optional["_models.LinkedServiceReference"] = None,
policy: Optional["_models.ActivityPolicy"] = None,
storage_linked_services: Optional[List["_models.LinkedServiceReference"]] = None,
arguments: Optional[JSON] = None,
get_debug_info: Optional[Union[str, "_models.HDInsightActivityDebugInfoOption"]] = None,
script_path: Optional[JSON] = None,
script_linked_service: Optional["_models.LinkedServiceReference"] = None,
defines: Optional[Dict[str, JSON]] = None,
**kwargs: Any
) -> None:
'''
:keyword additional_properties: Unmatched properties from the message are deserialized to this
collection.
:paramtype additional_properties: dict[str, JSON]
:keyword name: Activity name. Required.
:paramtype name: str
:keyword description: Activity description.
:paramtype description: str
:keyword state: Activity state. This is an optional property and if not provided, the state
will be Active by default. Known values are: "Active" and "Inactive".
:paramtype state: str or ~azure.mgmt.datafactory.models.ActivityState
:keyword on_inactive_mark_as: Status result of the activity when the state is set to Inactive.
This is an optional property and if not provided when the activity is inactive, the status will
be Succeeded by default. Known values are: "Succeeded", "Failed", and "Skipped".
:paramtype on_inactive_mark_as: str or ~azure.mgmt.datafactory.models.ActivityOnInactiveMarkAs
:keyword depends_on: Activity depends on condition.
:paramtype depends_on: list[~azure.mgmt.datafactory.models.ActivityDependency]
:keyword user_properties: Activity user properties.
:paramtype user_properties: list[~azure.mgmt.datafactory.models.UserProperty]
:keyword linked_service_name: Linked service reference.
:paramtype linked_service_name: ~azure.mgmt.datafactory.models.LinkedServiceReference
:keyword policy: Activity policy.
:paramtype policy: ~azure.mgmt.datafactory.models.ActivityPolicy
:keyword storage_linked_services: Storage linked service references.
:paramtype storage_linked_services: list[~azure.mgmt.datafactory.models.LinkedServiceReference]
:keyword arguments: User specified arguments to HDInsightActivity. Type: array (or Expression
with resultType array).
:paramtype arguments: JSON
:keyword get_debug_info: Debug info option. Known values are: "None", "Always", and "Failure".
:paramtype get_debug_info: str or
~azure.mgmt.datafactory.models.HDInsightActivityDebugInfoOption
:keyword script_path: Script path. Type: string (or Expression with resultType string).
:paramtype script_path: JSON
:keyword script_linked_service: Script linked service reference.
:paramtype script_linked_service: ~azure.mgmt.datafactory.models.LinkedServiceReference
:keyword defines: Allows user to specify defines for Pig job request.
:paramtype defines: dict[str, JSON]
'''
pass
| 2 | 2 | 77 | 0 | 39 | 38 | 1 | 1.26 | 1 | 3 | 0 | 0 | 1 | 7 | 1 | 18 | 145 | 5 | 62 | 30 | 41 | 78 | 12 | 11 | 10 | 1 | 4 | 0 | 1 |
10,887 |
Azure/azure-cli-extensions
|
src/datafactory/azext_datafactory/vendored_sdks/datafactory/models/_models_py3.py
|
azext_datafactory.vendored_sdks.datafactory.models._models_py3.HDInsightSparkActivity
|
class HDInsightSparkActivity(ExecutionActivity):
"""HDInsight Spark activity.
All required parameters must be populated in order to send to server.
:ivar additional_properties: Unmatched properties from the message are deserialized to this
collection.
:vartype additional_properties: dict[str, JSON]
:ivar name: Activity name. Required.
:vartype name: str
:ivar type: Type of activity. Required.
:vartype type: str
:ivar description: Activity description.
:vartype description: str
:ivar state: Activity state. This is an optional property and if not provided, the state will
be Active by default. Known values are: "Active" and "Inactive".
:vartype state: str or ~azure.mgmt.datafactory.models.ActivityState
:ivar on_inactive_mark_as: Status result of the activity when the state is set to Inactive.
This is an optional property and if not provided when the activity is inactive, the status will
be Succeeded by default. Known values are: "Succeeded", "Failed", and "Skipped".
:vartype on_inactive_mark_as: str or ~azure.mgmt.datafactory.models.ActivityOnInactiveMarkAs
:ivar depends_on: Activity depends on condition.
:vartype depends_on: list[~azure.mgmt.datafactory.models.ActivityDependency]
:ivar user_properties: Activity user properties.
:vartype user_properties: list[~azure.mgmt.datafactory.models.UserProperty]
:ivar linked_service_name: Linked service reference.
:vartype linked_service_name: ~azure.mgmt.datafactory.models.LinkedServiceReference
:ivar policy: Activity policy.
:vartype policy: ~azure.mgmt.datafactory.models.ActivityPolicy
:ivar root_path: The root path in 'sparkJobLinkedService' for all the job’s files. Type: string
(or Expression with resultType string). Required.
:vartype root_path: JSON
:ivar entry_file_path: The relative path to the root folder of the code/package to be executed.
Type: string (or Expression with resultType string). Required.
:vartype entry_file_path: JSON
:ivar arguments: The user-specified arguments to HDInsightSparkActivity.
:vartype arguments: list[JSON]
:ivar get_debug_info: Debug info option. Known values are: "None", "Always", and "Failure".
:vartype get_debug_info: str or ~azure.mgmt.datafactory.models.HDInsightActivityDebugInfoOption
:ivar spark_job_linked_service: The storage linked service for uploading the entry file and
dependencies, and for receiving logs.
:vartype spark_job_linked_service: ~azure.mgmt.datafactory.models.LinkedServiceReference
:ivar class_name: The application's Java/Spark main class.
:vartype class_name: str
:ivar proxy_user: The user to impersonate that will execute the job. Type: string (or
Expression with resultType string).
:vartype proxy_user: JSON
:ivar spark_config: Spark configuration property.
:vartype spark_config: dict[str, JSON]
"""
_validation = {
"name": {"required": True},
"type": {"required": True},
"root_path": {"required": True},
"entry_file_path": {"required": True},
}
_attribute_map = {
"additional_properties": {"key": "", "type": "{object}"},
"name": {"key": "name", "type": "str"},
"type": {"key": "type", "type": "str"},
"description": {"key": "description", "type": "str"},
"state": {"key": "state", "type": "str"},
"on_inactive_mark_as": {"key": "onInactiveMarkAs", "type": "str"},
"depends_on": {"key": "dependsOn", "type": "[ActivityDependency]"},
"user_properties": {"key": "userProperties", "type": "[UserProperty]"},
"linked_service_name": {"key": "linkedServiceName", "type": "LinkedServiceReference"},
"policy": {"key": "policy", "type": "ActivityPolicy"},
"root_path": {"key": "typeProperties.rootPath", "type": "object"},
"entry_file_path": {"key": "typeProperties.entryFilePath", "type": "object"},
"arguments": {"key": "typeProperties.arguments", "type": "[object]"},
"get_debug_info": {"key": "typeProperties.getDebugInfo", "type": "str"},
"spark_job_linked_service": {"key": "typeProperties.sparkJobLinkedService", "type": "LinkedServiceReference"},
"class_name": {"key": "typeProperties.className", "type": "str"},
"proxy_user": {"key": "typeProperties.proxyUser", "type": "object"},
"spark_config": {"key": "typeProperties.sparkConfig", "type": "{object}"},
}
def __init__(
self,
*,
name: str,
root_path: JSON,
entry_file_path: JSON,
additional_properties: Optional[Dict[str, JSON]] = None,
description: Optional[str] = None,
state: Optional[Union[str, "_models.ActivityState"]] = None,
on_inactive_mark_as: Optional[Union[str, "_models.ActivityOnInactiveMarkAs"]] = None,
depends_on: Optional[List["_models.ActivityDependency"]] = None,
user_properties: Optional[List["_models.UserProperty"]] = None,
linked_service_name: Optional["_models.LinkedServiceReference"] = None,
policy: Optional["_models.ActivityPolicy"] = None,
arguments: Optional[List[JSON]] = None,
get_debug_info: Optional[Union[str, "_models.HDInsightActivityDebugInfoOption"]] = None,
spark_job_linked_service: Optional["_models.LinkedServiceReference"] = None,
class_name: Optional[str] = None,
proxy_user: Optional[JSON] = None,
spark_config: Optional[Dict[str, JSON]] = None,
**kwargs: Any
) -> None:
"""
:keyword additional_properties: Unmatched properties from the message are deserialized to this
collection.
:paramtype additional_properties: dict[str, JSON]
:keyword name: Activity name. Required.
:paramtype name: str
:keyword description: Activity description.
:paramtype description: str
:keyword state: Activity state. This is an optional property and if not provided, the state
will be Active by default. Known values are: "Active" and "Inactive".
:paramtype state: str or ~azure.mgmt.datafactory.models.ActivityState
:keyword on_inactive_mark_as: Status result of the activity when the state is set to Inactive.
This is an optional property and if not provided when the activity is inactive, the status will
be Succeeded by default. Known values are: "Succeeded", "Failed", and "Skipped".
:paramtype on_inactive_mark_as: str or ~azure.mgmt.datafactory.models.ActivityOnInactiveMarkAs
:keyword depends_on: Activity depends on condition.
:paramtype depends_on: list[~azure.mgmt.datafactory.models.ActivityDependency]
:keyword user_properties: Activity user properties.
:paramtype user_properties: list[~azure.mgmt.datafactory.models.UserProperty]
:keyword linked_service_name: Linked service reference.
:paramtype linked_service_name: ~azure.mgmt.datafactory.models.LinkedServiceReference
:keyword policy: Activity policy.
:paramtype policy: ~azure.mgmt.datafactory.models.ActivityPolicy
:keyword root_path: The root path in 'sparkJobLinkedService' for all the job’s files. Type:
string (or Expression with resultType string). Required.
:paramtype root_path: JSON
:keyword entry_file_path: The relative path to the root folder of the code/package to be
executed. Type: string (or Expression with resultType string). Required.
:paramtype entry_file_path: JSON
:keyword arguments: The user-specified arguments to HDInsightSparkActivity.
:paramtype arguments: list[JSON]
:keyword get_debug_info: Debug info option. Known values are: "None", "Always", and "Failure".
:paramtype get_debug_info: str or
~azure.mgmt.datafactory.models.HDInsightActivityDebugInfoOption
:keyword spark_job_linked_service: The storage linked service for uploading the entry file and
dependencies, and for receiving logs.
:paramtype spark_job_linked_service: ~azure.mgmt.datafactory.models.LinkedServiceReference
:keyword class_name: The application's Java/Spark main class.
:paramtype class_name: str
:keyword proxy_user: The user to impersonate that will execute the job. Type: string (or
Expression with resultType string).
:paramtype proxy_user: JSON
:keyword spark_config: Spark configuration property.
:paramtype spark_config: dict[str, JSON]
"""
super().__init__(
additional_properties=additional_properties,
name=name,
description=description,
state=state,
on_inactive_mark_as=on_inactive_mark_as,
depends_on=depends_on,
user_properties=user_properties,
linked_service_name=linked_service_name,
policy=policy,
**kwargs
)
self.type: str = "HDInsightSpark"
self.root_path = root_path
self.entry_file_path = entry_file_path
self.arguments = arguments
self.get_debug_info = get_debug_info
self.spark_job_linked_service = spark_job_linked_service
self.class_name = class_name
self.proxy_user = proxy_user
self.spark_config = spark_config
|
class HDInsightSparkActivity(ExecutionActivity):
'''HDInsight Spark activity.
All required parameters must be populated in order to send to server.
:ivar additional_properties: Unmatched properties from the message are deserialized to this
collection.
:vartype additional_properties: dict[str, JSON]
:ivar name: Activity name. Required.
:vartype name: str
:ivar type: Type of activity. Required.
:vartype type: str
:ivar description: Activity description.
:vartype description: str
:ivar state: Activity state. This is an optional property and if not provided, the state will
be Active by default. Known values are: "Active" and "Inactive".
:vartype state: str or ~azure.mgmt.datafactory.models.ActivityState
:ivar on_inactive_mark_as: Status result of the activity when the state is set to Inactive.
This is an optional property and if not provided when the activity is inactive, the status will
be Succeeded by default. Known values are: "Succeeded", "Failed", and "Skipped".
:vartype on_inactive_mark_as: str or ~azure.mgmt.datafactory.models.ActivityOnInactiveMarkAs
:ivar depends_on: Activity depends on condition.
:vartype depends_on: list[~azure.mgmt.datafactory.models.ActivityDependency]
:ivar user_properties: Activity user properties.
:vartype user_properties: list[~azure.mgmt.datafactory.models.UserProperty]
:ivar linked_service_name: Linked service reference.
:vartype linked_service_name: ~azure.mgmt.datafactory.models.LinkedServiceReference
:ivar policy: Activity policy.
:vartype policy: ~azure.mgmt.datafactory.models.ActivityPolicy
:ivar root_path: The root path in 'sparkJobLinkedService' for all the job’s files. Type: string
(or Expression with resultType string). Required.
:vartype root_path: JSON
:ivar entry_file_path: The relative path to the root folder of the code/package to be executed.
Type: string (or Expression with resultType string). Required.
:vartype entry_file_path: JSON
:ivar arguments: The user-specified arguments to HDInsightSparkActivity.
:vartype arguments: list[JSON]
:ivar get_debug_info: Debug info option. Known values are: "None", "Always", and "Failure".
:vartype get_debug_info: str or ~azure.mgmt.datafactory.models.HDInsightActivityDebugInfoOption
:ivar spark_job_linked_service: The storage linked service for uploading the entry file and
dependencies, and for receiving logs.
:vartype spark_job_linked_service: ~azure.mgmt.datafactory.models.LinkedServiceReference
:ivar class_name: The application's Java/Spark main class.
:vartype class_name: str
:ivar proxy_user: The user to impersonate that will execute the job. Type: string (or
Expression with resultType string).
:vartype proxy_user: JSON
:ivar spark_config: Spark configuration property.
:vartype spark_config: dict[str, JSON]
'''
def __init__(
self,
*,
name: str,
root_path: JSON,
entry_file_path: JSON,
additional_properties: Optional[Dict[str, JSON]] = None,
description: Optional[str] = None,
state: Optional[Union[str, "_models.ActivityState"]] = None,
on_inactive_mark_as: Optional[Union[str, "_models.ActivityOnInactiveMarkAs"]] = None,
depends_on: Optional[List["_models.ActivityDependency"]] = None,
user_properties: Optional[List["_models.UserProperty"]] = None,
linked_service_name: Optional["_models.LinkedServiceReference"] = None,
policy: Optional["_models.ActivityPolicy"] = None,
arguments: Optional[List[JSON]] = None,
get_debug_info: Optional[Union[str, "_models.HDInsightActivityDebugInfoOption"]] = None,
spark_job_linked_service: Optional["_models.LinkedServiceReference"] = None,
class_name: Optional[str] = None,
proxy_user: Optional[JSON] = None,
spark_config: Optional[Dict[str, JSON]] = None,
**kwargs: Any
) -> None:
'''
:keyword additional_properties: Unmatched properties from the message are deserialized to this
collection.
:paramtype additional_properties: dict[str, JSON]
:keyword name: Activity name. Required.
:paramtype name: str
:keyword description: Activity description.
:paramtype description: str
:keyword state: Activity state. This is an optional property and if not provided, the state
will be Active by default. Known values are: "Active" and "Inactive".
:paramtype state: str or ~azure.mgmt.datafactory.models.ActivityState
:keyword on_inactive_mark_as: Status result of the activity when the state is set to Inactive.
This is an optional property and if not provided when the activity is inactive, the status will
be Succeeded by default. Known values are: "Succeeded", "Failed", and "Skipped".
:paramtype on_inactive_mark_as: str or ~azure.mgmt.datafactory.models.ActivityOnInactiveMarkAs
:keyword depends_on: Activity depends on condition.
:paramtype depends_on: list[~azure.mgmt.datafactory.models.ActivityDependency]
:keyword user_properties: Activity user properties.
:paramtype user_properties: list[~azure.mgmt.datafactory.models.UserProperty]
:keyword linked_service_name: Linked service reference.
:paramtype linked_service_name: ~azure.mgmt.datafactory.models.LinkedServiceReference
:keyword policy: Activity policy.
:paramtype policy: ~azure.mgmt.datafactory.models.ActivityPolicy
:keyword root_path: The root path in 'sparkJobLinkedService' for all the job’s files. Type:
string (or Expression with resultType string). Required.
:paramtype root_path: JSON
:keyword entry_file_path: The relative path to the root folder of the code/package to be
executed. Type: string (or Expression with resultType string). Required.
:paramtype entry_file_path: JSON
:keyword arguments: The user-specified arguments to HDInsightSparkActivity.
:paramtype arguments: list[JSON]
:keyword get_debug_info: Debug info option. Known values are: "None", "Always", and "Failure".
:paramtype get_debug_info: str or
~azure.mgmt.datafactory.models.HDInsightActivityDebugInfoOption
:keyword spark_job_linked_service: The storage linked service for uploading the entry file and
dependencies, and for receiving logs.
:paramtype spark_job_linked_service: ~azure.mgmt.datafactory.models.LinkedServiceReference
:keyword class_name: The application's Java/Spark main class.
:paramtype class_name: str
:keyword proxy_user: The user to impersonate that will execute the job. Type: string (or
Expression with resultType string).
:paramtype proxy_user: JSON
:keyword spark_config: Spark configuration property.
:paramtype spark_config: dict[str, JSON]
'''
pass
| 2 | 2 | 88 | 0 | 43 | 45 | 1 | 1.31 | 1 | 3 | 0 | 0 | 1 | 9 | 1 | 18 | 167 | 5 | 70 | 34 | 47 | 92 | 14 | 13 | 12 | 1 | 4 | 0 | 1 |
10,888 |
Azure/azure-cli-extensions
|
src/datafactory/azext_datafactory/vendored_sdks/datafactory/models/_models_py3.py
|
azext_datafactory.vendored_sdks.datafactory.models._models_py3.HDInsightStreamingActivity
|
class HDInsightStreamingActivity(ExecutionActivity):
"""HDInsight streaming activity type.
All required parameters must be populated in order to send to server.
:ivar additional_properties: Unmatched properties from the message are deserialized to this
collection.
:vartype additional_properties: dict[str, JSON]
:ivar name: Activity name. Required.
:vartype name: str
:ivar type: Type of activity. Required.
:vartype type: str
:ivar description: Activity description.
:vartype description: str
:ivar state: Activity state. This is an optional property and if not provided, the state will
be Active by default. Known values are: "Active" and "Inactive".
:vartype state: str or ~azure.mgmt.datafactory.models.ActivityState
:ivar on_inactive_mark_as: Status result of the activity when the state is set to Inactive.
This is an optional property and if not provided when the activity is inactive, the status will
be Succeeded by default. Known values are: "Succeeded", "Failed", and "Skipped".
:vartype on_inactive_mark_as: str or ~azure.mgmt.datafactory.models.ActivityOnInactiveMarkAs
:ivar depends_on: Activity depends on condition.
:vartype depends_on: list[~azure.mgmt.datafactory.models.ActivityDependency]
:ivar user_properties: Activity user properties.
:vartype user_properties: list[~azure.mgmt.datafactory.models.UserProperty]
:ivar linked_service_name: Linked service reference.
:vartype linked_service_name: ~azure.mgmt.datafactory.models.LinkedServiceReference
:ivar policy: Activity policy.
:vartype policy: ~azure.mgmt.datafactory.models.ActivityPolicy
:ivar storage_linked_services: Storage linked service references.
:vartype storage_linked_services: list[~azure.mgmt.datafactory.models.LinkedServiceReference]
:ivar arguments: User specified arguments to HDInsightActivity.
:vartype arguments: list[JSON]
:ivar get_debug_info: Debug info option. Known values are: "None", "Always", and "Failure".
:vartype get_debug_info: str or ~azure.mgmt.datafactory.models.HDInsightActivityDebugInfoOption
:ivar mapper: Mapper executable name. Type: string (or Expression with resultType string).
Required.
:vartype mapper: JSON
:ivar reducer: Reducer executable name. Type: string (or Expression with resultType string).
Required.
:vartype reducer: JSON
:ivar input: Input blob path. Type: string (or Expression with resultType string). Required.
:vartype input: JSON
:ivar output: Output blob path. Type: string (or Expression with resultType string). Required.
:vartype output: JSON
:ivar file_paths: Paths to streaming job files. Can be directories. Required.
:vartype file_paths: list[JSON]
:ivar file_linked_service: Linked service reference where the files are located.
:vartype file_linked_service: ~azure.mgmt.datafactory.models.LinkedServiceReference
:ivar combiner: Combiner executable name. Type: string (or Expression with resultType string).
:vartype combiner: JSON
:ivar command_environment: Command line environment values.
:vartype command_environment: list[JSON]
:ivar defines: Allows user to specify defines for streaming job request.
:vartype defines: dict[str, JSON]
"""
_validation = {
"name": {"required": True},
"type": {"required": True},
"mapper": {"required": True},
"reducer": {"required": True},
"input": {"required": True},
"output": {"required": True},
"file_paths": {"required": True},
}
_attribute_map = {
"additional_properties": {"key": "", "type": "{object}"},
"name": {"key": "name", "type": "str"},
"type": {"key": "type", "type": "str"},
"description": {"key": "description", "type": "str"},
"state": {"key": "state", "type": "str"},
"on_inactive_mark_as": {"key": "onInactiveMarkAs", "type": "str"},
"depends_on": {"key": "dependsOn", "type": "[ActivityDependency]"},
"user_properties": {"key": "userProperties", "type": "[UserProperty]"},
"linked_service_name": {"key": "linkedServiceName", "type": "LinkedServiceReference"},
"policy": {"key": "policy", "type": "ActivityPolicy"},
"storage_linked_services": {"key": "typeProperties.storageLinkedServices", "type": "[LinkedServiceReference]"},
"arguments": {"key": "typeProperties.arguments", "type": "[object]"},
"get_debug_info": {"key": "typeProperties.getDebugInfo", "type": "str"},
"mapper": {"key": "typeProperties.mapper", "type": "object"},
"reducer": {"key": "typeProperties.reducer", "type": "object"},
"input": {"key": "typeProperties.input", "type": "object"},
"output": {"key": "typeProperties.output", "type": "object"},
"file_paths": {"key": "typeProperties.filePaths", "type": "[object]"},
"file_linked_service": {"key": "typeProperties.fileLinkedService", "type": "LinkedServiceReference"},
"combiner": {"key": "typeProperties.combiner", "type": "object"},
"command_environment": {"key": "typeProperties.commandEnvironment", "type": "[object]"},
"defines": {"key": "typeProperties.defines", "type": "{object}"},
}
def __init__(
self,
*,
name: str,
mapper: JSON,
reducer: JSON,
input: JSON,
output: JSON,
file_paths: List[JSON],
additional_properties: Optional[Dict[str, JSON]] = None,
description: Optional[str] = None,
state: Optional[Union[str, "_models.ActivityState"]] = None,
on_inactive_mark_as: Optional[Union[str, "_models.ActivityOnInactiveMarkAs"]] = None,
depends_on: Optional[List["_models.ActivityDependency"]] = None,
user_properties: Optional[List["_models.UserProperty"]] = None,
linked_service_name: Optional["_models.LinkedServiceReference"] = None,
policy: Optional["_models.ActivityPolicy"] = None,
storage_linked_services: Optional[List["_models.LinkedServiceReference"]] = None,
arguments: Optional[List[JSON]] = None,
get_debug_info: Optional[Union[str, "_models.HDInsightActivityDebugInfoOption"]] = None,
file_linked_service: Optional["_models.LinkedServiceReference"] = None,
combiner: Optional[JSON] = None,
command_environment: Optional[List[JSON]] = None,
defines: Optional[Dict[str, JSON]] = None,
**kwargs: Any
) -> None:
"""
:keyword additional_properties: Unmatched properties from the message are deserialized to this
collection.
:paramtype additional_properties: dict[str, JSON]
:keyword name: Activity name. Required.
:paramtype name: str
:keyword description: Activity description.
:paramtype description: str
:keyword state: Activity state. This is an optional property and if not provided, the state
will be Active by default. Known values are: "Active" and "Inactive".
:paramtype state: str or ~azure.mgmt.datafactory.models.ActivityState
:keyword on_inactive_mark_as: Status result of the activity when the state is set to Inactive.
This is an optional property and if not provided when the activity is inactive, the status will
be Succeeded by default. Known values are: "Succeeded", "Failed", and "Skipped".
:paramtype on_inactive_mark_as: str or ~azure.mgmt.datafactory.models.ActivityOnInactiveMarkAs
:keyword depends_on: Activity depends on condition.
:paramtype depends_on: list[~azure.mgmt.datafactory.models.ActivityDependency]
:keyword user_properties: Activity user properties.
:paramtype user_properties: list[~azure.mgmt.datafactory.models.UserProperty]
:keyword linked_service_name: Linked service reference.
:paramtype linked_service_name: ~azure.mgmt.datafactory.models.LinkedServiceReference
:keyword policy: Activity policy.
:paramtype policy: ~azure.mgmt.datafactory.models.ActivityPolicy
:keyword storage_linked_services: Storage linked service references.
:paramtype storage_linked_services: list[~azure.mgmt.datafactory.models.LinkedServiceReference]
:keyword arguments: User specified arguments to HDInsightActivity.
:paramtype arguments: list[JSON]
:keyword get_debug_info: Debug info option. Known values are: "None", "Always", and "Failure".
:paramtype get_debug_info: str or
~azure.mgmt.datafactory.models.HDInsightActivityDebugInfoOption
:keyword mapper: Mapper executable name. Type: string (or Expression with resultType string).
Required.
:paramtype mapper: JSON
:keyword reducer: Reducer executable name. Type: string (or Expression with resultType string).
Required.
:paramtype reducer: JSON
:keyword input: Input blob path. Type: string (or Expression with resultType string). Required.
:paramtype input: JSON
:keyword output: Output blob path. Type: string (or Expression with resultType string).
Required.
:paramtype output: JSON
:keyword file_paths: Paths to streaming job files. Can be directories. Required.
:paramtype file_paths: list[JSON]
:keyword file_linked_service: Linked service reference where the files are located.
:paramtype file_linked_service: ~azure.mgmt.datafactory.models.LinkedServiceReference
:keyword combiner: Combiner executable name. Type: string (or Expression with resultType
string).
:paramtype combiner: JSON
:keyword command_environment: Command line environment values.
:paramtype command_environment: list[JSON]
:keyword defines: Allows user to specify defines for streaming job request.
:paramtype defines: dict[str, JSON]
"""
super().__init__(
additional_properties=additional_properties,
name=name,
description=description,
state=state,
on_inactive_mark_as=on_inactive_mark_as,
depends_on=depends_on,
user_properties=user_properties,
linked_service_name=linked_service_name,
policy=policy,
**kwargs
)
self.type: str = "HDInsightStreaming"
self.storage_linked_services = storage_linked_services
self.arguments = arguments
self.get_debug_info = get_debug_info
self.mapper = mapper
self.reducer = reducer
self.input = input
self.output = output
self.file_paths = file_paths
self.file_linked_service = file_linked_service
self.combiner = combiner
self.command_environment = command_environment
self.defines = defines
|
class HDInsightStreamingActivity(ExecutionActivity):
'''HDInsight streaming activity type.
All required parameters must be populated in order to send to server.
:ivar additional_properties: Unmatched properties from the message are deserialized to this
collection.
:vartype additional_properties: dict[str, JSON]
:ivar name: Activity name. Required.
:vartype name: str
:ivar type: Type of activity. Required.
:vartype type: str
:ivar description: Activity description.
:vartype description: str
:ivar state: Activity state. This is an optional property and if not provided, the state will
be Active by default. Known values are: "Active" and "Inactive".
:vartype state: str or ~azure.mgmt.datafactory.models.ActivityState
:ivar on_inactive_mark_as: Status result of the activity when the state is set to Inactive.
This is an optional property and if not provided when the activity is inactive, the status will
be Succeeded by default. Known values are: "Succeeded", "Failed", and "Skipped".
:vartype on_inactive_mark_as: str or ~azure.mgmt.datafactory.models.ActivityOnInactiveMarkAs
:ivar depends_on: Activity depends on condition.
:vartype depends_on: list[~azure.mgmt.datafactory.models.ActivityDependency]
:ivar user_properties: Activity user properties.
:vartype user_properties: list[~azure.mgmt.datafactory.models.UserProperty]
:ivar linked_service_name: Linked service reference.
:vartype linked_service_name: ~azure.mgmt.datafactory.models.LinkedServiceReference
:ivar policy: Activity policy.
:vartype policy: ~azure.mgmt.datafactory.models.ActivityPolicy
:ivar storage_linked_services: Storage linked service references.
:vartype storage_linked_services: list[~azure.mgmt.datafactory.models.LinkedServiceReference]
:ivar arguments: User specified arguments to HDInsightActivity.
:vartype arguments: list[JSON]
:ivar get_debug_info: Debug info option. Known values are: "None", "Always", and "Failure".
:vartype get_debug_info: str or ~azure.mgmt.datafactory.models.HDInsightActivityDebugInfoOption
:ivar mapper: Mapper executable name. Type: string (or Expression with resultType string).
Required.
:vartype mapper: JSON
:ivar reducer: Reducer executable name. Type: string (or Expression with resultType string).
Required.
:vartype reducer: JSON
:ivar input: Input blob path. Type: string (or Expression with resultType string). Required.
:vartype input: JSON
:ivar output: Output blob path. Type: string (or Expression with resultType string). Required.
:vartype output: JSON
:ivar file_paths: Paths to streaming job files. Can be directories. Required.
:vartype file_paths: list[JSON]
:ivar file_linked_service: Linked service reference where the files are located.
:vartype file_linked_service: ~azure.mgmt.datafactory.models.LinkedServiceReference
:ivar combiner: Combiner executable name. Type: string (or Expression with resultType string).
:vartype combiner: JSON
:ivar command_environment: Command line environment values.
:vartype command_environment: list[JSON]
:ivar defines: Allows user to specify defines for streaming job request.
:vartype defines: dict[str, JSON]
'''
def __init__(
self,
*,
name: str,
mapper: JSON,
reducer: JSON,
input: JSON,
output: JSON,
file_paths: List[JSON],
additional_properties: Optional[Dict[str, JSON]] = None,
description: Optional[str] = None,
state: Optional[Union[str, "_models.ActivityState"]] = None,
on_inactive_mark_as: Optional[Union[str, "_models.ActivityOnInactiveMarkAs"]] = None,
depends_on: Optional[List["_models.ActivityDependency"]] = None,
user_properties: Optional[List["_models.UserProperty"]] = None,
linked_service_name: Optional["_models.LinkedServiceReference"] = None,
policy: Optional["_models.ActivityPolicy"] = None,
storage_linked_services: Optional[List["_models.LinkedServiceReference"]] = None,
arguments: Optional[List[JSON]] = None,
get_debug_info: Optional[Union[str, "_models.HDInsightActivityDebugInfoOption"]] = None,
file_linked_service: Optional["_models.LinkedServiceReference"] = None,
combiner: Optional[JSON] = None,
command_environment: Optional[List[JSON]] = None,
defines: Optional[Dict[str, JSON]] = None,
**kwargs: Any
) -> None:
'''
:keyword additional_properties: Unmatched properties from the message are deserialized to this
collection.
:paramtype additional_properties: dict[str, JSON]
:keyword name: Activity name. Required.
:paramtype name: str
:keyword description: Activity description.
:paramtype description: str
:keyword state: Activity state. This is an optional property and if not provided, the state
will be Active by default. Known values are: "Active" and "Inactive".
:paramtype state: str or ~azure.mgmt.datafactory.models.ActivityState
:keyword on_inactive_mark_as: Status result of the activity when the state is set to Inactive.
This is an optional property and if not provided when the activity is inactive, the status will
be Succeeded by default. Known values are: "Succeeded", "Failed", and "Skipped".
:paramtype on_inactive_mark_as: str or ~azure.mgmt.datafactory.models.ActivityOnInactiveMarkAs
:keyword depends_on: Activity depends on condition.
:paramtype depends_on: list[~azure.mgmt.datafactory.models.ActivityDependency]
:keyword user_properties: Activity user properties.
:paramtype user_properties: list[~azure.mgmt.datafactory.models.UserProperty]
:keyword linked_service_name: Linked service reference.
:paramtype linked_service_name: ~azure.mgmt.datafactory.models.LinkedServiceReference
:keyword policy: Activity policy.
:paramtype policy: ~azure.mgmt.datafactory.models.ActivityPolicy
:keyword storage_linked_services: Storage linked service references.
:paramtype storage_linked_services: list[~azure.mgmt.datafactory.models.LinkedServiceReference]
:keyword arguments: User specified arguments to HDInsightActivity.
:paramtype arguments: list[JSON]
:keyword get_debug_info: Debug info option. Known values are: "None", "Always", and "Failure".
:paramtype get_debug_info: str or
~azure.mgmt.datafactory.models.HDInsightActivityDebugInfoOption
:keyword mapper: Mapper executable name. Type: string (or Expression with resultType string).
Required.
:paramtype mapper: JSON
:keyword reducer: Reducer executable name. Type: string (or Expression with resultType string).
Required.
:paramtype reducer: JSON
:keyword input: Input blob path. Type: string (or Expression with resultType string). Required.
:paramtype input: JSON
:keyword output: Output blob path. Type: string (or Expression with resultType string).
Required.
:paramtype output: JSON
:keyword file_paths: Paths to streaming job files. Can be directories. Required.
:paramtype file_paths: list[JSON]
:keyword file_linked_service: Linked service reference where the files are located.
:paramtype file_linked_service: ~azure.mgmt.datafactory.models.LinkedServiceReference
:keyword combiner: Combiner executable name. Type: string (or Expression with resultType
string).
:paramtype combiner: JSON
:keyword command_environment: Command line environment values.
:paramtype command_environment: list[JSON]
:keyword defines: Allows user to specify defines for streaming job request.
:paramtype defines: dict[str, JSON]
'''
pass
| 2 | 2 | 104 | 0 | 51 | 53 | 1 | 1.25 | 1 | 3 | 0 | 0 | 1 | 13 | 1 | 18 | 196 | 5 | 85 | 42 | 58 | 106 | 18 | 17 | 16 | 1 | 4 | 0 | 1 |
10,889 |
Azure/azure-cli-extensions
|
src/datafactory/azext_datafactory/vendored_sdks/datafactory/models/_models_py3.py
|
azext_datafactory.vendored_sdks.datafactory.models._models_py3.HdfsLinkedService
|
class HdfsLinkedService(LinkedService):
"""Hadoop Distributed File System (HDFS) linked service.
All required parameters must be populated in order to send to server.
:ivar additional_properties: Unmatched properties from the message are deserialized to this
collection.
:vartype additional_properties: dict[str, JSON]
:ivar type: Type of linked service. Required.
:vartype type: str
:ivar version: Version of the linked service.
:vartype version: str
:ivar connect_via: The integration runtime reference.
:vartype connect_via: ~azure.mgmt.datafactory.models.IntegrationRuntimeReference
:ivar description: Linked service description.
:vartype description: str
:ivar parameters: Parameters for linked service.
:vartype parameters: dict[str, ~azure.mgmt.datafactory.models.ParameterSpecification]
:ivar annotations: List of tags that can be used for describing the linked service.
:vartype annotations: list[JSON]
:ivar url: The URL of the HDFS service endpoint, e.g. http://myhostname:50070/webhdfs/v1 .
Type: string (or Expression with resultType string). Required.
:vartype url: JSON
:ivar authentication_type: Type of authentication used to connect to the HDFS. Possible values
are: Anonymous and Windows. Type: string (or Expression with resultType string).
:vartype authentication_type: JSON
:ivar encrypted_credential: The encrypted credential used for authentication. Credentials are
encrypted using the integration runtime credential manager. Type: string.
:vartype encrypted_credential: str
:ivar user_name: User name for Windows authentication. Type: string (or Expression with
resultType string).
:vartype user_name: JSON
:ivar password: Password for Windows authentication.
:vartype password: ~azure.mgmt.datafactory.models.SecretBase
"""
_validation = {
"type": {"required": True},
"url": {"required": True},
}
_attribute_map = {
"additional_properties": {"key": "", "type": "{object}"},
"type": {"key": "type", "type": "str"},
"version": {"key": "version", "type": "str"},
"connect_via": {"key": "connectVia", "type": "IntegrationRuntimeReference"},
"description": {"key": "description", "type": "str"},
"parameters": {"key": "parameters", "type": "{ParameterSpecification}"},
"annotations": {"key": "annotations", "type": "[object]"},
"url": {"key": "typeProperties.url", "type": "object"},
"authentication_type": {"key": "typeProperties.authenticationType", "type": "object"},
"encrypted_credential": {"key": "typeProperties.encryptedCredential", "type": "str"},
"user_name": {"key": "typeProperties.userName", "type": "object"},
"password": {"key": "typeProperties.password", "type": "SecretBase"},
}
def __init__(
self,
*,
url: JSON,
additional_properties: Optional[Dict[str, JSON]] = None,
version: Optional[str] = None,
connect_via: Optional["_models.IntegrationRuntimeReference"] = None,
description: Optional[str] = None,
parameters: Optional[Dict[str, "_models.ParameterSpecification"]] = None,
annotations: Optional[List[JSON]] = None,
authentication_type: Optional[JSON] = None,
encrypted_credential: Optional[str] = None,
user_name: Optional[JSON] = None,
password: Optional["_models.SecretBase"] = None,
**kwargs: Any
) -> None:
"""
:keyword additional_properties: Unmatched properties from the message are deserialized to this
collection.
:paramtype additional_properties: dict[str, JSON]
:keyword version: Version of the linked service.
:paramtype version: str
:keyword connect_via: The integration runtime reference.
:paramtype connect_via: ~azure.mgmt.datafactory.models.IntegrationRuntimeReference
:keyword description: Linked service description.
:paramtype description: str
:keyword parameters: Parameters for linked service.
:paramtype parameters: dict[str, ~azure.mgmt.datafactory.models.ParameterSpecification]
:keyword annotations: List of tags that can be used for describing the linked service.
:paramtype annotations: list[JSON]
:keyword url: The URL of the HDFS service endpoint, e.g. http://myhostname:50070/webhdfs/v1 .
Type: string (or Expression with resultType string). Required.
:paramtype url: JSON
:keyword authentication_type: Type of authentication used to connect to the HDFS. Possible
values are: Anonymous and Windows. Type: string (or Expression with resultType string).
:paramtype authentication_type: JSON
:keyword encrypted_credential: The encrypted credential used for authentication. Credentials
are encrypted using the integration runtime credential manager. Type: string.
:paramtype encrypted_credential: str
:keyword user_name: User name for Windows authentication. Type: string (or Expression with
resultType string).
:paramtype user_name: JSON
:keyword password: Password for Windows authentication.
:paramtype password: ~azure.mgmt.datafactory.models.SecretBase
"""
super().__init__(
additional_properties=additional_properties,
version=version,
connect_via=connect_via,
description=description,
parameters=parameters,
annotations=annotations,
**kwargs
)
self.type: str = "Hdfs"
self.url = url
self.authentication_type = authentication_type
self.encrypted_credential = encrypted_credential
self.user_name = user_name
self.password = password
|
class HdfsLinkedService(LinkedService):
'''Hadoop Distributed File System (HDFS) linked service.
All required parameters must be populated in order to send to server.
:ivar additional_properties: Unmatched properties from the message are deserialized to this
collection.
:vartype additional_properties: dict[str, JSON]
:ivar type: Type of linked service. Required.
:vartype type: str
:ivar version: Version of the linked service.
:vartype version: str
:ivar connect_via: The integration runtime reference.
:vartype connect_via: ~azure.mgmt.datafactory.models.IntegrationRuntimeReference
:ivar description: Linked service description.
:vartype description: str
:ivar parameters: Parameters for linked service.
:vartype parameters: dict[str, ~azure.mgmt.datafactory.models.ParameterSpecification]
:ivar annotations: List of tags that can be used for describing the linked service.
:vartype annotations: list[JSON]
:ivar url: The URL of the HDFS service endpoint, e.g. http://myhostname:50070/webhdfs/v1 .
Type: string (or Expression with resultType string). Required.
:vartype url: JSON
:ivar authentication_type: Type of authentication used to connect to the HDFS. Possible values
are: Anonymous and Windows. Type: string (or Expression with resultType string).
:vartype authentication_type: JSON
:ivar encrypted_credential: The encrypted credential used for authentication. Credentials are
encrypted using the integration runtime credential manager. Type: string.
:vartype encrypted_credential: str
:ivar user_name: User name for Windows authentication. Type: string (or Expression with
resultType string).
:vartype user_name: JSON
:ivar password: Password for Windows authentication.
:vartype password: ~azure.mgmt.datafactory.models.SecretBase
'''
def __init__(
self,
*,
url: JSON,
additional_properties: Optional[Dict[str, JSON]] = None,
version: Optional[str] = None,
connect_via: Optional["_models.IntegrationRuntimeReference"] = None,
description: Optional[str] = None,
parameters: Optional[Dict[str, "_models.ParameterSpecification"]] = None,
annotations: Optional[List[JSON]] = None,
authentication_type: Optional[JSON] = None,
encrypted_credential: Optional[str] = None,
user_name: Optional[JSON] = None,
password: Optional["_models.SecretBase"] = None,
**kwargs: Any
) -> None:
'''
:keyword additional_properties: Unmatched properties from the message are deserialized to this
collection.
:paramtype additional_properties: dict[str, JSON]
:keyword version: Version of the linked service.
:paramtype version: str
:keyword connect_via: The integration runtime reference.
:paramtype connect_via: ~azure.mgmt.datafactory.models.IntegrationRuntimeReference
:keyword description: Linked service description.
:paramtype description: str
:keyword parameters: Parameters for linked service.
:paramtype parameters: dict[str, ~azure.mgmt.datafactory.models.ParameterSpecification]
:keyword annotations: List of tags that can be used for describing the linked service.
:paramtype annotations: list[JSON]
:keyword url: The URL of the HDFS service endpoint, e.g. http://myhostname:50070/webhdfs/v1 .
Type: string (or Expression with resultType string). Required.
:paramtype url: JSON
:keyword authentication_type: Type of authentication used to connect to the HDFS. Possible
values are: Anonymous and Windows. Type: string (or Expression with resultType string).
:paramtype authentication_type: JSON
:keyword encrypted_credential: The encrypted credential used for authentication. Credentials
are encrypted using the integration runtime credential manager. Type: string.
:paramtype encrypted_credential: str
:keyword user_name: User name for Windows authentication. Type: string (or Expression with
resultType string).
:paramtype user_name: JSON
:keyword password: Password for Windows authentication.
:paramtype password: ~azure.mgmt.datafactory.models.SecretBase
'''
pass
| 2 | 2 | 60 | 0 | 31 | 29 | 1 | 1.22 | 1 | 3 | 0 | 0 | 1 | 6 | 1 | 17 | 116 | 5 | 50 | 25 | 33 | 61 | 11 | 10 | 9 | 1 | 3 | 0 | 1 |
10,890 |
Azure/azure-cli-extensions
|
src/datafactory/azext_datafactory/vendored_sdks/datafactory/models/_models_py3.py
|
azext_datafactory.vendored_sdks.datafactory.models._models_py3.HdfsLocation
|
class HdfsLocation(DatasetLocation):
"""The location of HDFS.
All required parameters must be populated in order to send to server.
:ivar additional_properties: Unmatched properties from the message are deserialized to this
collection.
:vartype additional_properties: dict[str, JSON]
:ivar type: Type of dataset storage location. Required.
:vartype type: str
:ivar folder_path: Specify the folder path of dataset. Type: string (or Expression with
resultType string).
:vartype folder_path: JSON
:ivar file_name: Specify the file name of dataset. Type: string (or Expression with resultType
string).
:vartype file_name: JSON
"""
_validation = {
"type": {"required": True},
}
_attribute_map = {
"additional_properties": {"key": "", "type": "{object}"},
"type": {"key": "type", "type": "str"},
"folder_path": {"key": "folderPath", "type": "object"},
"file_name": {"key": "fileName", "type": "object"},
}
def __init__(
self,
*,
additional_properties: Optional[Dict[str, JSON]] = None,
folder_path: Optional[JSON] = None,
file_name: Optional[JSON] = None,
**kwargs: Any
) -> None:
"""
:keyword additional_properties: Unmatched properties from the message are deserialized to this
collection.
:paramtype additional_properties: dict[str, JSON]
:keyword folder_path: Specify the folder path of dataset. Type: string (or Expression with
resultType string).
:paramtype folder_path: JSON
:keyword file_name: Specify the file name of dataset. Type: string (or Expression with
resultType string).
:paramtype file_name: JSON
"""
super().__init__(
additional_properties=additional_properties, folder_path=folder_path, file_name=file_name, **kwargs
)
self.type: str = "HdfsLocation"
|
class HdfsLocation(DatasetLocation):
'''The location of HDFS.
All required parameters must be populated in order to send to server.
:ivar additional_properties: Unmatched properties from the message are deserialized to this
collection.
:vartype additional_properties: dict[str, JSON]
:ivar type: Type of dataset storage location. Required.
:vartype type: str
:ivar folder_path: Specify the folder path of dataset. Type: string (or Expression with
resultType string).
:vartype folder_path: JSON
:ivar file_name: Specify the file name of dataset. Type: string (or Expression with resultType
string).
:vartype file_name: JSON
'''
def __init__(
self,
*,
additional_properties: Optional[Dict[str, JSON]] = None,
folder_path: Optional[JSON] = None,
file_name: Optional[JSON] = None,
**kwargs: Any
) -> None:
'''
:keyword additional_properties: Unmatched properties from the message are deserialized to this
collection.
:paramtype additional_properties: dict[str, JSON]
:keyword folder_path: Specify the folder path of dataset. Type: string (or Expression with
resultType string).
:paramtype folder_path: JSON
:keyword file_name: Specify the file name of dataset. Type: string (or Expression with
resultType string).
:paramtype file_name: JSON
'''
pass
| 2 | 2 | 23 | 0 | 12 | 11 | 1 | 1.14 | 1 | 3 | 0 | 0 | 1 | 1 | 1 | 17 | 52 | 5 | 22 | 12 | 13 | 25 | 6 | 5 | 4 | 1 | 3 | 0 | 1 |
10,891 |
Azure/azure-cli-extensions
|
src/datafactory/azext_datafactory/vendored_sdks/datafactory/models/_models_py3.py
|
azext_datafactory.vendored_sdks.datafactory.models._models_py3.HdfsReadSettings
|
class HdfsReadSettings(StoreReadSettings):
"""HDFS read settings.
All required parameters must be populated in order to send to server.
:ivar additional_properties: Unmatched properties from the message are deserialized to this
collection.
:vartype additional_properties: dict[str, JSON]
:ivar type: The read setting type. Required.
:vartype type: str
:ivar max_concurrent_connections: The maximum concurrent connection count for the source data
store. Type: integer (or Expression with resultType integer).
:vartype max_concurrent_connections: JSON
:ivar disable_metrics_collection: If true, disable data store metrics collection. Default is
false. Type: boolean (or Expression with resultType boolean).
:vartype disable_metrics_collection: JSON
:ivar recursive: If true, files under the folder path will be read recursively. Default is
true. Type: boolean (or Expression with resultType boolean).
:vartype recursive: JSON
:ivar wildcard_folder_path: HDFS wildcardFolderPath. Type: string (or Expression with
resultType string).
:vartype wildcard_folder_path: JSON
:ivar wildcard_file_name: HDFS wildcardFileName. Type: string (or Expression with resultType
string).
:vartype wildcard_file_name: JSON
:ivar file_list_path: Point to a text file that lists each file (relative path to the path
configured in the dataset) that you want to copy. Type: string (or Expression with resultType
string).
:vartype file_list_path: JSON
:ivar enable_partition_discovery: Indicates whether to enable partition discovery. Type:
boolean (or Expression with resultType boolean).
:vartype enable_partition_discovery: JSON
:ivar partition_root_path: Specify the root path where partition discovery starts from. Type:
string (or Expression with resultType string).
:vartype partition_root_path: JSON
:ivar modified_datetime_start: The start of file's modified datetime. Type: string (or
Expression with resultType string).
:vartype modified_datetime_start: JSON
:ivar modified_datetime_end: The end of file's modified datetime. Type: string (or Expression
with resultType string).
:vartype modified_datetime_end: JSON
:ivar distcp_settings: Specifies Distcp-related settings.
:vartype distcp_settings: ~azure.mgmt.datafactory.models.DistcpSettings
:ivar delete_files_after_completion: Indicates whether the source files need to be deleted
after copy completion. Default is false. Type: boolean (or Expression with resultType boolean).
:vartype delete_files_after_completion: JSON
"""
_validation = {
"type": {"required": True},
}
_attribute_map = {
"additional_properties": {"key": "", "type": "{object}"},
"type": {"key": "type", "type": "str"},
"max_concurrent_connections": {"key": "maxConcurrentConnections", "type": "object"},
"disable_metrics_collection": {"key": "disableMetricsCollection", "type": "object"},
"recursive": {"key": "recursive", "type": "object"},
"wildcard_folder_path": {"key": "wildcardFolderPath", "type": "object"},
"wildcard_file_name": {"key": "wildcardFileName", "type": "object"},
"file_list_path": {"key": "fileListPath", "type": "object"},
"enable_partition_discovery": {"key": "enablePartitionDiscovery", "type": "object"},
"partition_root_path": {"key": "partitionRootPath", "type": "object"},
"modified_datetime_start": {"key": "modifiedDatetimeStart", "type": "object"},
"modified_datetime_end": {"key": "modifiedDatetimeEnd", "type": "object"},
"distcp_settings": {"key": "distcpSettings", "type": "DistcpSettings"},
"delete_files_after_completion": {"key": "deleteFilesAfterCompletion", "type": "object"},
}
def __init__(
self,
*,
additional_properties: Optional[Dict[str, JSON]] = None,
max_concurrent_connections: Optional[JSON] = None,
disable_metrics_collection: Optional[JSON] = None,
recursive: Optional[JSON] = None,
wildcard_folder_path: Optional[JSON] = None,
wildcard_file_name: Optional[JSON] = None,
file_list_path: Optional[JSON] = None,
enable_partition_discovery: Optional[JSON] = None,
partition_root_path: Optional[JSON] = None,
modified_datetime_start: Optional[JSON] = None,
modified_datetime_end: Optional[JSON] = None,
distcp_settings: Optional["_models.DistcpSettings"] = None,
delete_files_after_completion: Optional[JSON] = None,
**kwargs: Any
) -> None:
"""
:keyword additional_properties: Unmatched properties from the message are deserialized to this
collection.
:paramtype additional_properties: dict[str, JSON]
:keyword max_concurrent_connections: The maximum concurrent connection count for the source
data store. Type: integer (or Expression with resultType integer).
:paramtype max_concurrent_connections: JSON
:keyword disable_metrics_collection: If true, disable data store metrics collection. Default is
false. Type: boolean (or Expression with resultType boolean).
:paramtype disable_metrics_collection: JSON
:keyword recursive: If true, files under the folder path will be read recursively. Default is
true. Type: boolean (or Expression with resultType boolean).
:paramtype recursive: JSON
:keyword wildcard_folder_path: HDFS wildcardFolderPath. Type: string (or Expression with
resultType string).
:paramtype wildcard_folder_path: JSON
:keyword wildcard_file_name: HDFS wildcardFileName. Type: string (or Expression with resultType
string).
:paramtype wildcard_file_name: JSON
:keyword file_list_path: Point to a text file that lists each file (relative path to the path
configured in the dataset) that you want to copy. Type: string (or Expression with resultType
string).
:paramtype file_list_path: JSON
:keyword enable_partition_discovery: Indicates whether to enable partition discovery. Type:
boolean (or Expression with resultType boolean).
:paramtype enable_partition_discovery: JSON
:keyword partition_root_path: Specify the root path where partition discovery starts from.
Type: string (or Expression with resultType string).
:paramtype partition_root_path: JSON
:keyword modified_datetime_start: The start of file's modified datetime. Type: string (or
Expression with resultType string).
:paramtype modified_datetime_start: JSON
:keyword modified_datetime_end: The end of file's modified datetime. Type: string (or
Expression with resultType string).
:paramtype modified_datetime_end: JSON
:keyword distcp_settings: Specifies Distcp-related settings.
:paramtype distcp_settings: ~azure.mgmt.datafactory.models.DistcpSettings
:keyword delete_files_after_completion: Indicates whether the source files need to be deleted
after copy completion. Default is false. Type: boolean (or Expression with resultType boolean).
:paramtype delete_files_after_completion: JSON
"""
super().__init__(
additional_properties=additional_properties,
max_concurrent_connections=max_concurrent_connections,
disable_metrics_collection=disable_metrics_collection,
**kwargs
)
self.type: str = "HdfsReadSettings"
self.recursive = recursive
self.wildcard_folder_path = wildcard_folder_path
self.wildcard_file_name = wildcard_file_name
self.file_list_path = file_list_path
self.enable_partition_discovery = enable_partition_discovery
self.partition_root_path = partition_root_path
self.modified_datetime_start = modified_datetime_start
self.modified_datetime_end = modified_datetime_end
self.distcp_settings = distcp_settings
self.delete_files_after_completion = delete_files_after_completion
|
class HdfsReadSettings(StoreReadSettings):
'''HDFS read settings.
All required parameters must be populated in order to send to server.
:ivar additional_properties: Unmatched properties from the message are deserialized to this
collection.
:vartype additional_properties: dict[str, JSON]
:ivar type: The read setting type. Required.
:vartype type: str
:ivar max_concurrent_connections: The maximum concurrent connection count for the source data
store. Type: integer (or Expression with resultType integer).
:vartype max_concurrent_connections: JSON
:ivar disable_metrics_collection: If true, disable data store metrics collection. Default is
false. Type: boolean (or Expression with resultType boolean).
:vartype disable_metrics_collection: JSON
:ivar recursive: If true, files under the folder path will be read recursively. Default is
true. Type: boolean (or Expression with resultType boolean).
:vartype recursive: JSON
:ivar wildcard_folder_path: HDFS wildcardFolderPath. Type: string (or Expression with
resultType string).
:vartype wildcard_folder_path: JSON
:ivar wildcard_file_name: HDFS wildcardFileName. Type: string (or Expression with resultType
string).
:vartype wildcard_file_name: JSON
:ivar file_list_path: Point to a text file that lists each file (relative path to the path
configured in the dataset) that you want to copy. Type: string (or Expression with resultType
string).
:vartype file_list_path: JSON
:ivar enable_partition_discovery: Indicates whether to enable partition discovery. Type:
boolean (or Expression with resultType boolean).
:vartype enable_partition_discovery: JSON
:ivar partition_root_path: Specify the root path where partition discovery starts from. Type:
string (or Expression with resultType string).
:vartype partition_root_path: JSON
:ivar modified_datetime_start: The start of file's modified datetime. Type: string (or
Expression with resultType string).
:vartype modified_datetime_start: JSON
:ivar modified_datetime_end: The end of file's modified datetime. Type: string (or Expression
with resultType string).
:vartype modified_datetime_end: JSON
:ivar distcp_settings: Specifies Distcp-related settings.
:vartype distcp_settings: ~azure.mgmt.datafactory.models.DistcpSettings
:ivar delete_files_after_completion: Indicates whether the source files need to be deleted
after copy completion. Default is false. Type: boolean (or Expression with resultType boolean).
:vartype delete_files_after_completion: JSON
'''
def __init__(
self,
*,
additional_properties: Optional[Dict[str, JSON]] = None,
max_concurrent_connections: Optional[JSON] = None,
disable_metrics_collection: Optional[JSON] = None,
recursive: Optional[JSON] = None,
wildcard_folder_path: Optional[JSON] = None,
wildcard_file_name: Optional[JSON] = None,
file_list_path: Optional[JSON] = None,
enable_partition_discovery: Optional[JSON] = None,
partition_root_path: Optional[JSON] = None,
modified_datetime_start: Optional[JSON] = None,
modified_datetime_end: Optional[JSON] = None,
distcp_settings: Optional["_models.DistcpSettings"] = None,
delete_files_after_completion: Optional[JSON] = None,
**kwargs: Any
) -> None:
'''
:keyword additional_properties: Unmatched properties from the message are deserialized to this
collection.
:paramtype additional_properties: dict[str, JSON]
:keyword max_concurrent_connections: The maximum concurrent connection count for the source
data store. Type: integer (or Expression with resultType integer).
:paramtype max_concurrent_connections: JSON
:keyword disable_metrics_collection: If true, disable data store metrics collection. Default is
false. Type: boolean (or Expression with resultType boolean).
:paramtype disable_metrics_collection: JSON
:keyword recursive: If true, files under the folder path will be read recursively. Default is
true. Type: boolean (or Expression with resultType boolean).
:paramtype recursive: JSON
:keyword wildcard_folder_path: HDFS wildcardFolderPath. Type: string (or Expression with
resultType string).
:paramtype wildcard_folder_path: JSON
:keyword wildcard_file_name: HDFS wildcardFileName. Type: string (or Expression with resultType
string).
:paramtype wildcard_file_name: JSON
:keyword file_list_path: Point to a text file that lists each file (relative path to the path
configured in the dataset) that you want to copy. Type: string (or Expression with resultType
string).
:paramtype file_list_path: JSON
:keyword enable_partition_discovery: Indicates whether to enable partition discovery. Type:
boolean (or Expression with resultType boolean).
:paramtype enable_partition_discovery: JSON
:keyword partition_root_path: Specify the root path where partition discovery starts from.
Type: string (or Expression with resultType string).
:paramtype partition_root_path: JSON
:keyword modified_datetime_start: The start of file's modified datetime. Type: string (or
Expression with resultType string).
:paramtype modified_datetime_start: JSON
:keyword modified_datetime_end: The end of file's modified datetime. Type: string (or
Expression with resultType string).
:paramtype modified_datetime_end: JSON
:keyword distcp_settings: Specifies Distcp-related settings.
:paramtype distcp_settings: ~azure.mgmt.datafactory.models.DistcpSettings
:keyword delete_files_after_completion: Indicates whether the source files need to be deleted
after copy completion. Default is false. Type: boolean (or Expression with resultType boolean).
:paramtype delete_files_after_completion: JSON
'''
pass
| 2 | 2 | 76 | 0 | 35 | 41 | 1 | 1.55 | 1 | 3 | 0 | 0 | 1 | 11 | 1 | 17 | 145 | 5 | 55 | 32 | 36 | 85 | 16 | 15 | 14 | 1 | 3 | 0 | 1 |
10,892 |
Azure/azure-cli-extensions
|
src/datafactory/azext_datafactory/vendored_sdks/datafactory/models/_models_py3.py
|
azext_datafactory.vendored_sdks.datafactory.models._models_py3.HdfsSource
|
class HdfsSource(CopySource):
"""A copy activity HDFS source.
All required parameters must be populated in order to send to server.
:ivar additional_properties: Unmatched properties from the message are deserialized to this
collection.
:vartype additional_properties: dict[str, JSON]
:ivar type: Copy source type. Required.
:vartype type: str
:ivar source_retry_count: Source retry count. Type: integer (or Expression with resultType
integer).
:vartype source_retry_count: JSON
:ivar source_retry_wait: Source retry wait. Type: string (or Expression with resultType
string), pattern: ((\\d+).)?(\\d\\d):(60|([0-5][0-9])):(60|([0-5][0-9])).
:vartype source_retry_wait: JSON
:ivar max_concurrent_connections: The maximum concurrent connection count for the source data
store. Type: integer (or Expression with resultType integer).
:vartype max_concurrent_connections: JSON
:ivar disable_metrics_collection: If true, disable data store metrics collection. Default is
false. Type: boolean (or Expression with resultType boolean).
:vartype disable_metrics_collection: JSON
:ivar recursive: If true, files under the folder path will be read recursively. Default is
true. Type: boolean (or Expression with resultType boolean).
:vartype recursive: JSON
:ivar distcp_settings: Specifies Distcp-related settings.
:vartype distcp_settings: ~azure.mgmt.datafactory.models.DistcpSettings
"""
_validation = {
"type": {"required": True},
}
_attribute_map = {
"additional_properties": {"key": "", "type": "{object}"},
"type": {"key": "type", "type": "str"},
"source_retry_count": {"key": "sourceRetryCount", "type": "object"},
"source_retry_wait": {"key": "sourceRetryWait", "type": "object"},
"max_concurrent_connections": {"key": "maxConcurrentConnections", "type": "object"},
"disable_metrics_collection": {"key": "disableMetricsCollection", "type": "object"},
"recursive": {"key": "recursive", "type": "object"},
"distcp_settings": {"key": "distcpSettings", "type": "DistcpSettings"},
}
def __init__(
self,
*,
additional_properties: Optional[Dict[str, JSON]] = None,
source_retry_count: Optional[JSON] = None,
source_retry_wait: Optional[JSON] = None,
max_concurrent_connections: Optional[JSON] = None,
disable_metrics_collection: Optional[JSON] = None,
recursive: Optional[JSON] = None,
distcp_settings: Optional["_models.DistcpSettings"] = None,
**kwargs: Any
) -> None:
"""
:keyword additional_properties: Unmatched properties from the message are deserialized to this
collection.
:paramtype additional_properties: dict[str, JSON]
:keyword source_retry_count: Source retry count. Type: integer (or Expression with resultType
integer).
:paramtype source_retry_count: JSON
:keyword source_retry_wait: Source retry wait. Type: string (or Expression with resultType
string), pattern: ((\\d+).)?(\\d\\d):(60|([0-5][0-9])):(60|([0-5][0-9])).
:paramtype source_retry_wait: JSON
:keyword max_concurrent_connections: The maximum concurrent connection count for the source
data store. Type: integer (or Expression with resultType integer).
:paramtype max_concurrent_connections: JSON
:keyword disable_metrics_collection: If true, disable data store metrics collection. Default is
false. Type: boolean (or Expression with resultType boolean).
:paramtype disable_metrics_collection: JSON
:keyword recursive: If true, files under the folder path will be read recursively. Default is
true. Type: boolean (or Expression with resultType boolean).
:paramtype recursive: JSON
:keyword distcp_settings: Specifies Distcp-related settings.
:paramtype distcp_settings: ~azure.mgmt.datafactory.models.DistcpSettings
"""
super().__init__(
additional_properties=additional_properties,
source_retry_count=source_retry_count,
source_retry_wait=source_retry_wait,
max_concurrent_connections=max_concurrent_connections,
disable_metrics_collection=disable_metrics_collection,
**kwargs
)
self.type: str = "HdfsSource"
self.recursive = recursive
self.distcp_settings = distcp_settings
|
class HdfsSource(CopySource):
'''A copy activity HDFS source.
All required parameters must be populated in order to send to server.
:ivar additional_properties: Unmatched properties from the message are deserialized to this
collection.
:vartype additional_properties: dict[str, JSON]
:ivar type: Copy source type. Required.
:vartype type: str
:ivar source_retry_count: Source retry count. Type: integer (or Expression with resultType
integer).
:vartype source_retry_count: JSON
:ivar source_retry_wait: Source retry wait. Type: string (or Expression with resultType
string), pattern: ((\d+).)?(\d\d):(60|([0-5][0-9])):(60|([0-5][0-9])).
:vartype source_retry_wait: JSON
:ivar max_concurrent_connections: The maximum concurrent connection count for the source data
store. Type: integer (or Expression with resultType integer).
:vartype max_concurrent_connections: JSON
:ivar disable_metrics_collection: If true, disable data store metrics collection. Default is
false. Type: boolean (or Expression with resultType boolean).
:vartype disable_metrics_collection: JSON
:ivar recursive: If true, files under the folder path will be read recursively. Default is
true. Type: boolean (or Expression with resultType boolean).
:vartype recursive: JSON
:ivar distcp_settings: Specifies Distcp-related settings.
:vartype distcp_settings: ~azure.mgmt.datafactory.models.DistcpSettings
'''
def __init__(
self,
*,
additional_properties: Optional[Dict[str, JSON]] = None,
source_retry_count: Optional[JSON] = None,
source_retry_wait: Optional[JSON] = None,
max_concurrent_connections: Optional[JSON] = None,
disable_metrics_collection: Optional[JSON] = None,
recursive: Optional[JSON] = None,
distcp_settings: Optional["_models.DistcpSettings"] = None,
**kwargs: Any
) -> None:
'''
:keyword additional_properties: Unmatched properties from the message are deserialized to this
collection.
:paramtype additional_properties: dict[str, JSON]
:keyword source_retry_count: Source retry count. Type: integer (or Expression with resultType
integer).
:paramtype source_retry_count: JSON
:keyword source_retry_wait: Source retry wait. Type: string (or Expression with resultType
string), pattern: ((\d+).)?(\d\d):(60|([0-5][0-9])):(60|([0-5][0-9])).
:paramtype source_retry_wait: JSON
:keyword max_concurrent_connections: The maximum concurrent connection count for the source
data store. Type: integer (or Expression with resultType integer).
:paramtype max_concurrent_connections: JSON
:keyword disable_metrics_collection: If true, disable data store metrics collection. Default is
false. Type: boolean (or Expression with resultType boolean).
:paramtype disable_metrics_collection: JSON
:keyword recursive: If true, files under the folder path will be read recursively. Default is
true. Type: boolean (or Expression with resultType boolean).
:paramtype recursive: JSON
:keyword distcp_settings: Specifies Distcp-related settings.
:paramtype distcp_settings: ~azure.mgmt.datafactory.models.DistcpSettings
'''
pass
| 2 | 2 | 45 | 0 | 23 | 22 | 1 | 1.27 | 1 | 3 | 0 | 0 | 1 | 3 | 1 | 17 | 89 | 5 | 37 | 18 | 24 | 47 | 8 | 7 | 6 | 1 | 3 | 0 | 1 |
10,893 |
Azure/azure-cli-extensions
|
src/datafactory/azext_datafactory/vendored_sdks/datafactory/models/_models_py3.py
|
azext_datafactory.vendored_sdks.datafactory.models._models_py3.HiveLinkedService
|
class HiveLinkedService(LinkedService):
"""Hive Server linked service.
All required parameters must be populated in order to send to server.
:ivar additional_properties: Unmatched properties from the message are deserialized to this
collection.
:vartype additional_properties: dict[str, JSON]
:ivar type: Type of linked service. Required.
:vartype type: str
:ivar version: Version of the linked service.
:vartype version: str
:ivar connect_via: The integration runtime reference.
:vartype connect_via: ~azure.mgmt.datafactory.models.IntegrationRuntimeReference
:ivar description: Linked service description.
:vartype description: str
:ivar parameters: Parameters for linked service.
:vartype parameters: dict[str, ~azure.mgmt.datafactory.models.ParameterSpecification]
:ivar annotations: List of tags that can be used for describing the linked service.
:vartype annotations: list[JSON]
:ivar host: IP address or host name of the Hive server, separated by ';' for multiple hosts
(only when serviceDiscoveryMode is enable). Required.
:vartype host: JSON
:ivar port: The TCP port that the Hive server uses to listen for client connections.
:vartype port: JSON
:ivar server_type: The type of Hive server. Known values are: "HiveServer1", "HiveServer2", and
"HiveThriftServer".
:vartype server_type: str or ~azure.mgmt.datafactory.models.HiveServerType
:ivar thrift_transport_protocol: The transport protocol to use in the Thrift layer. Known
values are: "Binary", "SASL", and "HTTP ".
:vartype thrift_transport_protocol: str or
~azure.mgmt.datafactory.models.HiveThriftTransportProtocol
:ivar authentication_type: The authentication method used to access the Hive server. Required.
Known values are: "Anonymous", "Username", "UsernameAndPassword", and
"WindowsAzureHDInsightService".
:vartype authentication_type: str or ~azure.mgmt.datafactory.models.HiveAuthenticationType
:ivar service_discovery_mode: true to indicate using the ZooKeeper service, false not.
:vartype service_discovery_mode: JSON
:ivar zoo_keeper_name_space: The namespace on ZooKeeper under which Hive Server 2 nodes are
added.
:vartype zoo_keeper_name_space: JSON
:ivar use_native_query: Specifies whether the driver uses native HiveQL queries,or converts
them into an equivalent form in HiveQL.
:vartype use_native_query: JSON
:ivar username: The user name that you use to access Hive Server.
:vartype username: JSON
:ivar password: The password corresponding to the user name that you provided in the Username
field.
:vartype password: ~azure.mgmt.datafactory.models.SecretBase
:ivar http_path: The partial URL corresponding to the Hive server.
:vartype http_path: JSON
:ivar enable_ssl: Specifies whether the connections to the server are encrypted using SSL. The
default value is false.
:vartype enable_ssl: JSON
:ivar trusted_cert_path: The full path of the .pem file containing trusted CA certificates for
verifying the server when connecting over SSL. This property can only be set when using SSL on
self-hosted IR. The default value is the cacerts.pem file installed with the IR.
:vartype trusted_cert_path: JSON
:ivar use_system_trust_store: Specifies whether to use a CA certificate from the system trust
store or from a specified PEM file. The default value is false.
:vartype use_system_trust_store: JSON
:ivar allow_host_name_cn_mismatch: Specifies whether to require a CA-issued SSL certificate
name to match the host name of the server when connecting over SSL. The default value is false.
:vartype allow_host_name_cn_mismatch: JSON
:ivar allow_self_signed_server_cert: Specifies whether to allow self-signed certificates from
the server. The default value is false.
:vartype allow_self_signed_server_cert: JSON
:ivar encrypted_credential: The encrypted credential used for authentication. Credentials are
encrypted using the integration runtime credential manager. Type: string.
:vartype encrypted_credential: str
"""
_validation = {
"type": {"required": True},
"host": {"required": True},
"authentication_type": {"required": True},
}
_attribute_map = {
"additional_properties": {"key": "", "type": "{object}"},
"type": {"key": "type", "type": "str"},
"version": {"key": "version", "type": "str"},
"connect_via": {"key": "connectVia", "type": "IntegrationRuntimeReference"},
"description": {"key": "description", "type": "str"},
"parameters": {"key": "parameters", "type": "{ParameterSpecification}"},
"annotations": {"key": "annotations", "type": "[object]"},
"host": {"key": "typeProperties.host", "type": "object"},
"port": {"key": "typeProperties.port", "type": "object"},
"server_type": {"key": "typeProperties.serverType", "type": "str"},
"thrift_transport_protocol": {"key": "typeProperties.thriftTransportProtocol", "type": "str"},
"authentication_type": {"key": "typeProperties.authenticationType", "type": "str"},
"service_discovery_mode": {"key": "typeProperties.serviceDiscoveryMode", "type": "object"},
"zoo_keeper_name_space": {"key": "typeProperties.zooKeeperNameSpace", "type": "object"},
"use_native_query": {"key": "typeProperties.useNativeQuery", "type": "object"},
"username": {"key": "typeProperties.username", "type": "object"},
"password": {"key": "typeProperties.password", "type": "SecretBase"},
"http_path": {"key": "typeProperties.httpPath", "type": "object"},
"enable_ssl": {"key": "typeProperties.enableSsl", "type": "object"},
"trusted_cert_path": {"key": "typeProperties.trustedCertPath", "type": "object"},
"use_system_trust_store": {"key": "typeProperties.useSystemTrustStore", "type": "object"},
"allow_host_name_cn_mismatch": {"key": "typeProperties.allowHostNameCNMismatch", "type": "object"},
"allow_self_signed_server_cert": {"key": "typeProperties.allowSelfSignedServerCert", "type": "object"},
"encrypted_credential": {"key": "typeProperties.encryptedCredential", "type": "str"},
}
def __init__( # pylint: disable=too-many-locals
self,
*,
host: JSON,
authentication_type: Union[str, "_models.HiveAuthenticationType"],
additional_properties: Optional[Dict[str, JSON]] = None,
version: Optional[str] = None,
connect_via: Optional["_models.IntegrationRuntimeReference"] = None,
description: Optional[str] = None,
parameters: Optional[Dict[str, "_models.ParameterSpecification"]] = None,
annotations: Optional[List[JSON]] = None,
port: Optional[JSON] = None,
server_type: Optional[Union[str, "_models.HiveServerType"]] = None,
thrift_transport_protocol: Optional[Union[str, "_models.HiveThriftTransportProtocol"]] = None,
service_discovery_mode: Optional[JSON] = None,
zoo_keeper_name_space: Optional[JSON] = None,
use_native_query: Optional[JSON] = None,
username: Optional[JSON] = None,
password: Optional["_models.SecretBase"] = None,
http_path: Optional[JSON] = None,
enable_ssl: Optional[JSON] = None,
trusted_cert_path: Optional[JSON] = None,
use_system_trust_store: Optional[JSON] = None,
allow_host_name_cn_mismatch: Optional[JSON] = None,
allow_self_signed_server_cert: Optional[JSON] = None,
encrypted_credential: Optional[str] = None,
**kwargs: Any
) -> None:
"""
:keyword additional_properties: Unmatched properties from the message are deserialized to this
collection.
:paramtype additional_properties: dict[str, JSON]
:keyword version: Version of the linked service.
:paramtype version: str
:keyword connect_via: The integration runtime reference.
:paramtype connect_via: ~azure.mgmt.datafactory.models.IntegrationRuntimeReference
:keyword description: Linked service description.
:paramtype description: str
:keyword parameters: Parameters for linked service.
:paramtype parameters: dict[str, ~azure.mgmt.datafactory.models.ParameterSpecification]
:keyword annotations: List of tags that can be used for describing the linked service.
:paramtype annotations: list[JSON]
:keyword host: IP address or host name of the Hive server, separated by ';' for multiple hosts
(only when serviceDiscoveryMode is enable). Required.
:paramtype host: JSON
:keyword port: The TCP port that the Hive server uses to listen for client connections.
:paramtype port: JSON
:keyword server_type: The type of Hive server. Known values are: "HiveServer1", "HiveServer2",
and "HiveThriftServer".
:paramtype server_type: str or ~azure.mgmt.datafactory.models.HiveServerType
:keyword thrift_transport_protocol: The transport protocol to use in the Thrift layer. Known
values are: "Binary", "SASL", and "HTTP ".
:paramtype thrift_transport_protocol: str or
~azure.mgmt.datafactory.models.HiveThriftTransportProtocol
:keyword authentication_type: The authentication method used to access the Hive server.
Required. Known values are: "Anonymous", "Username", "UsernameAndPassword", and
"WindowsAzureHDInsightService".
:paramtype authentication_type: str or ~azure.mgmt.datafactory.models.HiveAuthenticationType
:keyword service_discovery_mode: true to indicate using the ZooKeeper service, false not.
:paramtype service_discovery_mode: JSON
:keyword zoo_keeper_name_space: The namespace on ZooKeeper under which Hive Server 2 nodes are
added.
:paramtype zoo_keeper_name_space: JSON
:keyword use_native_query: Specifies whether the driver uses native HiveQL queries,or converts
them into an equivalent form in HiveQL.
:paramtype use_native_query: JSON
:keyword username: The user name that you use to access Hive Server.
:paramtype username: JSON
:keyword password: The password corresponding to the user name that you provided in the
Username field.
:paramtype password: ~azure.mgmt.datafactory.models.SecretBase
:keyword http_path: The partial URL corresponding to the Hive server.
:paramtype http_path: JSON
:keyword enable_ssl: Specifies whether the connections to the server are encrypted using SSL.
The default value is false.
:paramtype enable_ssl: JSON
:keyword trusted_cert_path: The full path of the .pem file containing trusted CA certificates
for verifying the server when connecting over SSL. This property can only be set when using SSL
on self-hosted IR. The default value is the cacerts.pem file installed with the IR.
:paramtype trusted_cert_path: JSON
:keyword use_system_trust_store: Specifies whether to use a CA certificate from the system
trust store or from a specified PEM file. The default value is false.
:paramtype use_system_trust_store: JSON
:keyword allow_host_name_cn_mismatch: Specifies whether to require a CA-issued SSL certificate
name to match the host name of the server when connecting over SSL. The default value is false.
:paramtype allow_host_name_cn_mismatch: JSON
:keyword allow_self_signed_server_cert: Specifies whether to allow self-signed certificates
from the server. The default value is false.
:paramtype allow_self_signed_server_cert: JSON
:keyword encrypted_credential: The encrypted credential used for authentication. Credentials
are encrypted using the integration runtime credential manager. Type: string.
:paramtype encrypted_credential: str
"""
super().__init__(
additional_properties=additional_properties,
version=version,
connect_via=connect_via,
description=description,
parameters=parameters,
annotations=annotations,
**kwargs
)
self.type: str = "Hive"
self.host = host
self.port = port
self.server_type = server_type
self.thrift_transport_protocol = thrift_transport_protocol
self.authentication_type = authentication_type
self.service_discovery_mode = service_discovery_mode
self.zoo_keeper_name_space = zoo_keeper_name_space
self.use_native_query = use_native_query
self.username = username
self.password = password
self.http_path = http_path
self.enable_ssl = enable_ssl
self.trusted_cert_path = trusted_cert_path
self.use_system_trust_store = use_system_trust_store
self.allow_host_name_cn_mismatch = allow_host_name_cn_mismatch
self.allow_self_signed_server_cert = allow_self_signed_server_cert
self.encrypted_credential = encrypted_credential
|
class HiveLinkedService(LinkedService):
'''Hive Server linked service.
All required parameters must be populated in order to send to server.
:ivar additional_properties: Unmatched properties from the message are deserialized to this
collection.
:vartype additional_properties: dict[str, JSON]
:ivar type: Type of linked service. Required.
:vartype type: str
:ivar version: Version of the linked service.
:vartype version: str
:ivar connect_via: The integration runtime reference.
:vartype connect_via: ~azure.mgmt.datafactory.models.IntegrationRuntimeReference
:ivar description: Linked service description.
:vartype description: str
:ivar parameters: Parameters for linked service.
:vartype parameters: dict[str, ~azure.mgmt.datafactory.models.ParameterSpecification]
:ivar annotations: List of tags that can be used for describing the linked service.
:vartype annotations: list[JSON]
:ivar host: IP address or host name of the Hive server, separated by ';' for multiple hosts
(only when serviceDiscoveryMode is enable). Required.
:vartype host: JSON
:ivar port: The TCP port that the Hive server uses to listen for client connections.
:vartype port: JSON
:ivar server_type: The type of Hive server. Known values are: "HiveServer1", "HiveServer2", and
"HiveThriftServer".
:vartype server_type: str or ~azure.mgmt.datafactory.models.HiveServerType
:ivar thrift_transport_protocol: The transport protocol to use in the Thrift layer. Known
values are: "Binary", "SASL", and "HTTP ".
:vartype thrift_transport_protocol: str or
~azure.mgmt.datafactory.models.HiveThriftTransportProtocol
:ivar authentication_type: The authentication method used to access the Hive server. Required.
Known values are: "Anonymous", "Username", "UsernameAndPassword", and
"WindowsAzureHDInsightService".
:vartype authentication_type: str or ~azure.mgmt.datafactory.models.HiveAuthenticationType
:ivar service_discovery_mode: true to indicate using the ZooKeeper service, false not.
:vartype service_discovery_mode: JSON
:ivar zoo_keeper_name_space: The namespace on ZooKeeper under which Hive Server 2 nodes are
added.
:vartype zoo_keeper_name_space: JSON
:ivar use_native_query: Specifies whether the driver uses native HiveQL queries,or converts
them into an equivalent form in HiveQL.
:vartype use_native_query: JSON
:ivar username: The user name that you use to access Hive Server.
:vartype username: JSON
:ivar password: The password corresponding to the user name that you provided in the Username
field.
:vartype password: ~azure.mgmt.datafactory.models.SecretBase
:ivar http_path: The partial URL corresponding to the Hive server.
:vartype http_path: JSON
:ivar enable_ssl: Specifies whether the connections to the server are encrypted using SSL. The
default value is false.
:vartype enable_ssl: JSON
:ivar trusted_cert_path: The full path of the .pem file containing trusted CA certificates for
verifying the server when connecting over SSL. This property can only be set when using SSL on
self-hosted IR. The default value is the cacerts.pem file installed with the IR.
:vartype trusted_cert_path: JSON
:ivar use_system_trust_store: Specifies whether to use a CA certificate from the system trust
store or from a specified PEM file. The default value is false.
:vartype use_system_trust_store: JSON
:ivar allow_host_name_cn_mismatch: Specifies whether to require a CA-issued SSL certificate
name to match the host name of the server when connecting over SSL. The default value is false.
:vartype allow_host_name_cn_mismatch: JSON
:ivar allow_self_signed_server_cert: Specifies whether to allow self-signed certificates from
the server. The default value is false.
:vartype allow_self_signed_server_cert: JSON
:ivar encrypted_credential: The encrypted credential used for authentication. Credentials are
encrypted using the integration runtime credential manager. Type: string.
:vartype encrypted_credential: str
'''
def __init__( # pylint: disable=too-many-locals
self,
*,
host: JSON,
authentication_type: Union[str, "_models.HiveAuthenticationType"],
additional_properties: Optional[Dict[str, JSON]] = None,
version: Optional[str] = None,
connect_via: Optional["_models.IntegrationRuntimeReference"] = None,
description: Optional[str] = None,
parameters: Optional[Dict[str, "_models.ParameterSpecification"]] = None,
annotations: Optional[List[JSON]] = None,
port: Optional[JSON] = None,
server_type: Optional[Union[str, "_models.HiveServerType"]] = None,
thrift_transport_protocol: Optional[Union[str, "_models.HiveThriftTransportProtocol"]] = None,
service_discovery_mode: Optional[JSON] = None,
zoo_keeper_name_space: Optional[JSON] = None,
use_native_query: Optional[JSON] = None,
username: Optional[JSON] = None,
password: Optional["_models.SecretBase"] = None,
http_path: Optional[JSON] = None,
enable_ssl: Optional[JSON] = None,
trusted_cert_path: Optional[JSON] = None,
use_system_trust_store: Optional[JSON] = None,
allow_host_name_cn_mismatch: Optional[JSON] = None,
allow_self_signed_server_cert: Optional[JSON] = None,
encrypted_credential: Optional[str] = None,
**kwargs: Any
) -> None:
'''
:keyword additional_properties: Unmatched properties from the message are deserialized to this
collection.
:paramtype additional_properties: dict[str, JSON]
:keyword version: Version of the linked service.
:paramtype version: str
:keyword connect_via: The integration runtime reference.
:paramtype connect_via: ~azure.mgmt.datafactory.models.IntegrationRuntimeReference
:keyword description: Linked service description.
:paramtype description: str
:keyword parameters: Parameters for linked service.
:paramtype parameters: dict[str, ~azure.mgmt.datafactory.models.ParameterSpecification]
:keyword annotations: List of tags that can be used for describing the linked service.
:paramtype annotations: list[JSON]
:keyword host: IP address or host name of the Hive server, separated by ';' for multiple hosts
(only when serviceDiscoveryMode is enable). Required.
:paramtype host: JSON
:keyword port: The TCP port that the Hive server uses to listen for client connections.
:paramtype port: JSON
:keyword server_type: The type of Hive server. Known values are: "HiveServer1", "HiveServer2",
and "HiveThriftServer".
:paramtype server_type: str or ~azure.mgmt.datafactory.models.HiveServerType
:keyword thrift_transport_protocol: The transport protocol to use in the Thrift layer. Known
values are: "Binary", "SASL", and "HTTP ".
:paramtype thrift_transport_protocol: str or
~azure.mgmt.datafactory.models.HiveThriftTransportProtocol
:keyword authentication_type: The authentication method used to access the Hive server.
Required. Known values are: "Anonymous", "Username", "UsernameAndPassword", and
"WindowsAzureHDInsightService".
:paramtype authentication_type: str or ~azure.mgmt.datafactory.models.HiveAuthenticationType
:keyword service_discovery_mode: true to indicate using the ZooKeeper service, false not.
:paramtype service_discovery_mode: JSON
:keyword zoo_keeper_name_space: The namespace on ZooKeeper under which Hive Server 2 nodes are
added.
:paramtype zoo_keeper_name_space: JSON
:keyword use_native_query: Specifies whether the driver uses native HiveQL queries,or converts
them into an equivalent form in HiveQL.
:paramtype use_native_query: JSON
:keyword username: The user name that you use to access Hive Server.
:paramtype username: JSON
:keyword password: The password corresponding to the user name that you provided in the
Username field.
:paramtype password: ~azure.mgmt.datafactory.models.SecretBase
:keyword http_path: The partial URL corresponding to the Hive server.
:paramtype http_path: JSON
:keyword enable_ssl: Specifies whether the connections to the server are encrypted using SSL.
The default value is false.
:paramtype enable_ssl: JSON
:keyword trusted_cert_path: The full path of the .pem file containing trusted CA certificates
for verifying the server when connecting over SSL. This property can only be set when using SSL
on self-hosted IR. The default value is the cacerts.pem file installed with the IR.
:paramtype trusted_cert_path: JSON
:keyword use_system_trust_store: Specifies whether to use a CA certificate from the system
trust store or from a specified PEM file. The default value is false.
:paramtype use_system_trust_store: JSON
:keyword allow_host_name_cn_mismatch: Specifies whether to require a CA-issued SSL certificate
name to match the host name of the server when connecting over SSL. The default value is false.
:paramtype allow_host_name_cn_mismatch: JSON
:keyword allow_self_signed_server_cert: Specifies whether to allow self-signed certificates
from the server. The default value is false.
:paramtype allow_self_signed_server_cert: JSON
:keyword encrypted_credential: The encrypted credential used for authentication. Credentials
are encrypted using the integration runtime credential manager. Type: string.
:paramtype encrypted_credential: str
'''
pass
| 2 | 2 | 120 | 0 | 55 | 66 | 1 | 1.54 | 1 | 3 | 0 | 0 | 1 | 18 | 1 | 17 | 225 | 5 | 87 | 49 | 58 | 134 | 23 | 22 | 21 | 1 | 3 | 0 | 1 |
10,894 |
Azure/azure-cli-extensions
|
src/datafactory/azext_datafactory/vendored_sdks/datafactory/models/_models_py3.py
|
azext_datafactory.vendored_sdks.datafactory.models._models_py3.HiveObjectDataset
|
class HiveObjectDataset(Dataset):
"""Hive Server dataset.
All required parameters must be populated in order to send to server.
:ivar additional_properties: Unmatched properties from the message are deserialized to this
collection.
:vartype additional_properties: dict[str, JSON]
:ivar type: Type of dataset. Required.
:vartype type: str
:ivar description: Dataset description.
:vartype description: str
:ivar structure: Columns that define the structure of the dataset. Type: array (or Expression
with resultType array), itemType: DatasetDataElement.
:vartype structure: JSON
:ivar schema: Columns that define the physical type schema of the dataset. Type: array (or
Expression with resultType array), itemType: DatasetSchemaDataElement.
:vartype schema: JSON
:ivar linked_service_name: Linked service reference. Required.
:vartype linked_service_name: ~azure.mgmt.datafactory.models.LinkedServiceReference
:ivar parameters: Parameters for dataset.
:vartype parameters: dict[str, ~azure.mgmt.datafactory.models.ParameterSpecification]
:ivar annotations: List of tags that can be used for describing the Dataset.
:vartype annotations: list[JSON]
:ivar folder: The folder that this Dataset is in. If not specified, Dataset will appear at the
root level.
:vartype folder: ~azure.mgmt.datafactory.models.DatasetFolder
:ivar table_name: This property will be retired. Please consider using schema + table
properties instead.
:vartype table_name: JSON
:ivar table: The table name of the Hive. Type: string (or Expression with resultType string).
:vartype table: JSON
:ivar schema_type_properties_schema: The schema name of the Hive. Type: string (or Expression
with resultType string).
:vartype schema_type_properties_schema: JSON
"""
_validation = {
"type": {"required": True},
"linked_service_name": {"required": True},
}
_attribute_map = {
"additional_properties": {"key": "", "type": "{object}"},
"type": {"key": "type", "type": "str"},
"description": {"key": "description", "type": "str"},
"structure": {"key": "structure", "type": "object"},
"schema": {"key": "schema", "type": "object"},
"linked_service_name": {"key": "linkedServiceName", "type": "LinkedServiceReference"},
"parameters": {"key": "parameters", "type": "{ParameterSpecification}"},
"annotations": {"key": "annotations", "type": "[object]"},
"folder": {"key": "folder", "type": "DatasetFolder"},
"table_name": {"key": "typeProperties.tableName", "type": "object"},
"table": {"key": "typeProperties.table", "type": "object"},
"schema_type_properties_schema": {"key": "typeProperties.schema", "type": "object"},
}
def __init__(
self,
*,
linked_service_name: "_models.LinkedServiceReference",
additional_properties: Optional[Dict[str, JSON]] = None,
description: Optional[str] = None,
structure: Optional[JSON] = None,
schema: Optional[JSON] = None,
parameters: Optional[Dict[str, "_models.ParameterSpecification"]] = None,
annotations: Optional[List[JSON]] = None,
folder: Optional["_models.DatasetFolder"] = None,
table_name: Optional[JSON] = None,
table: Optional[JSON] = None,
schema_type_properties_schema: Optional[JSON] = None,
**kwargs: Any
) -> None:
"""
:keyword additional_properties: Unmatched properties from the message are deserialized to this
collection.
:paramtype additional_properties: dict[str, JSON]
:keyword description: Dataset description.
:paramtype description: str
:keyword structure: Columns that define the structure of the dataset. Type: array (or
Expression with resultType array), itemType: DatasetDataElement.
:paramtype structure: JSON
:keyword schema: Columns that define the physical type schema of the dataset. Type: array (or
Expression with resultType array), itemType: DatasetSchemaDataElement.
:paramtype schema: JSON
:keyword linked_service_name: Linked service reference. Required.
:paramtype linked_service_name: ~azure.mgmt.datafactory.models.LinkedServiceReference
:keyword parameters: Parameters for dataset.
:paramtype parameters: dict[str, ~azure.mgmt.datafactory.models.ParameterSpecification]
:keyword annotations: List of tags that can be used for describing the Dataset.
:paramtype annotations: list[JSON]
:keyword folder: The folder that this Dataset is in. If not specified, Dataset will appear at
the root level.
:paramtype folder: ~azure.mgmt.datafactory.models.DatasetFolder
:keyword table_name: This property will be retired. Please consider using schema + table
properties instead.
:paramtype table_name: JSON
:keyword table: The table name of the Hive. Type: string (or Expression with resultType
string).
:paramtype table: JSON
:keyword schema_type_properties_schema: The schema name of the Hive. Type: string (or
Expression with resultType string).
:paramtype schema_type_properties_schema: JSON
"""
super().__init__(
additional_properties=additional_properties,
description=description,
structure=structure,
schema=schema,
linked_service_name=linked_service_name,
parameters=parameters,
annotations=annotations,
folder=folder,
**kwargs
)
self.type: str = "HiveObject"
self.table_name = table_name
self.table = table
self.schema_type_properties_schema = schema_type_properties_schema
|
class HiveObjectDataset(Dataset):
'''Hive Server dataset.
All required parameters must be populated in order to send to server.
:ivar additional_properties: Unmatched properties from the message are deserialized to this
collection.
:vartype additional_properties: dict[str, JSON]
:ivar type: Type of dataset. Required.
:vartype type: str
:ivar description: Dataset description.
:vartype description: str
:ivar structure: Columns that define the structure of the dataset. Type: array (or Expression
with resultType array), itemType: DatasetDataElement.
:vartype structure: JSON
:ivar schema: Columns that define the physical type schema of the dataset. Type: array (or
Expression with resultType array), itemType: DatasetSchemaDataElement.
:vartype schema: JSON
:ivar linked_service_name: Linked service reference. Required.
:vartype linked_service_name: ~azure.mgmt.datafactory.models.LinkedServiceReference
:ivar parameters: Parameters for dataset.
:vartype parameters: dict[str, ~azure.mgmt.datafactory.models.ParameterSpecification]
:ivar annotations: List of tags that can be used for describing the Dataset.
:vartype annotations: list[JSON]
:ivar folder: The folder that this Dataset is in. If not specified, Dataset will appear at the
root level.
:vartype folder: ~azure.mgmt.datafactory.models.DatasetFolder
:ivar table_name: This property will be retired. Please consider using schema + table
properties instead.
:vartype table_name: JSON
:ivar table: The table name of the Hive. Type: string (or Expression with resultType string).
:vartype table: JSON
:ivar schema_type_properties_schema: The schema name of the Hive. Type: string (or Expression
with resultType string).
:vartype schema_type_properties_schema: JSON
'''
def __init__(
self,
*,
linked_service_name: "_models.LinkedServiceReference",
additional_properties: Optional[Dict[str, JSON]] = None,
description: Optional[str] = None,
structure: Optional[JSON] = None,
schema: Optional[JSON] = None,
parameters: Optional[Dict[str, "_models.ParameterSpecification"]] = None,
annotations: Optional[List[JSON]] = None,
folder: Optional["_models.DatasetFolder"] = None,
table_name: Optional[JSON] = None,
table: Optional[JSON] = None,
schema_type_properties_schema: Optional[JSON] = None,
**kwargs: Any
) -> None:
'''
:keyword additional_properties: Unmatched properties from the message are deserialized to this
collection.
:paramtype additional_properties: dict[str, JSON]
:keyword description: Dataset description.
:paramtype description: str
:keyword structure: Columns that define the structure of the dataset. Type: array (or
Expression with resultType array), itemType: DatasetDataElement.
:paramtype structure: JSON
:keyword schema: Columns that define the physical type schema of the dataset. Type: array (or
Expression with resultType array), itemType: DatasetSchemaDataElement.
:paramtype schema: JSON
:keyword linked_service_name: Linked service reference. Required.
:paramtype linked_service_name: ~azure.mgmt.datafactory.models.LinkedServiceReference
:keyword parameters: Parameters for dataset.
:paramtype parameters: dict[str, ~azure.mgmt.datafactory.models.ParameterSpecification]
:keyword annotations: List of tags that can be used for describing the Dataset.
:paramtype annotations: list[JSON]
:keyword folder: The folder that this Dataset is in. If not specified, Dataset will appear at
the root level.
:paramtype folder: ~azure.mgmt.datafactory.models.DatasetFolder
:keyword table_name: This property will be retired. Please consider using schema + table
properties instead.
:paramtype table_name: JSON
:keyword table: The table name of the Hive. Type: string (or Expression with resultType
string).
:paramtype table: JSON
:keyword schema_type_properties_schema: The schema name of the Hive. Type: string (or
Expression with resultType string).
:paramtype schema_type_properties_schema: JSON
'''
pass
| 2 | 2 | 62 | 0 | 31 | 31 | 1 | 1.28 | 1 | 3 | 0 | 0 | 1 | 4 | 1 | 17 | 119 | 5 | 50 | 23 | 33 | 64 | 9 | 8 | 7 | 1 | 3 | 0 | 1 |
10,895 |
Azure/azure-cli-extensions
|
src/datafactory/azext_datafactory/vendored_sdks/datafactory/models/_models_py3.py
|
azext_datafactory.vendored_sdks.datafactory.models._models_py3.HiveSource
|
class HiveSource(TabularSource):
"""A copy activity Hive Server source.
All required parameters must be populated in order to send to server.
:ivar additional_properties: Unmatched properties from the message are deserialized to this
collection.
:vartype additional_properties: dict[str, JSON]
:ivar type: Copy source type. Required.
:vartype type: str
:ivar source_retry_count: Source retry count. Type: integer (or Expression with resultType
integer).
:vartype source_retry_count: JSON
:ivar source_retry_wait: Source retry wait. Type: string (or Expression with resultType
string), pattern: ((\\d+).)?(\\d\\d):(60|([0-5][0-9])):(60|([0-5][0-9])).
:vartype source_retry_wait: JSON
:ivar max_concurrent_connections: The maximum concurrent connection count for the source data
store. Type: integer (or Expression with resultType integer).
:vartype max_concurrent_connections: JSON
:ivar disable_metrics_collection: If true, disable data store metrics collection. Default is
false. Type: boolean (or Expression with resultType boolean).
:vartype disable_metrics_collection: JSON
:ivar query_timeout: Query timeout. Type: string (or Expression with resultType string),
pattern: ((\\d+).)?(\\d\\d):(60|([0-5][0-9])):(60|([0-5][0-9])).
:vartype query_timeout: JSON
:ivar additional_columns: Specifies the additional columns to be added to source data. Type:
array of objects(AdditionalColumns) (or Expression with resultType array of objects).
:vartype additional_columns: JSON
:ivar query: A query to retrieve data from source. Type: string (or Expression with resultType
string).
:vartype query: JSON
"""
_validation = {
"type": {"required": True},
}
_attribute_map = {
"additional_properties": {"key": "", "type": "{object}"},
"type": {"key": "type", "type": "str"},
"source_retry_count": {"key": "sourceRetryCount", "type": "object"},
"source_retry_wait": {"key": "sourceRetryWait", "type": "object"},
"max_concurrent_connections": {"key": "maxConcurrentConnections", "type": "object"},
"disable_metrics_collection": {"key": "disableMetricsCollection", "type": "object"},
"query_timeout": {"key": "queryTimeout", "type": "object"},
"additional_columns": {"key": "additionalColumns", "type": "object"},
"query": {"key": "query", "type": "object"},
}
def __init__(
self,
*,
additional_properties: Optional[Dict[str, JSON]] = None,
source_retry_count: Optional[JSON] = None,
source_retry_wait: Optional[JSON] = None,
max_concurrent_connections: Optional[JSON] = None,
disable_metrics_collection: Optional[JSON] = None,
query_timeout: Optional[JSON] = None,
additional_columns: Optional[JSON] = None,
query: Optional[JSON] = None,
**kwargs: Any
) -> None:
"""
:keyword additional_properties: Unmatched properties from the message are deserialized to this
collection.
:paramtype additional_properties: dict[str, JSON]
:keyword source_retry_count: Source retry count. Type: integer (or Expression with resultType
integer).
:paramtype source_retry_count: JSON
:keyword source_retry_wait: Source retry wait. Type: string (or Expression with resultType
string), pattern: ((\\d+).)?(\\d\\d):(60|([0-5][0-9])):(60|([0-5][0-9])).
:paramtype source_retry_wait: JSON
:keyword max_concurrent_connections: The maximum concurrent connection count for the source
data store. Type: integer (or Expression with resultType integer).
:paramtype max_concurrent_connections: JSON
:keyword disable_metrics_collection: If true, disable data store metrics collection. Default is
false. Type: boolean (or Expression with resultType boolean).
:paramtype disable_metrics_collection: JSON
:keyword query_timeout: Query timeout. Type: string (or Expression with resultType string),
pattern: ((\\d+).)?(\\d\\d):(60|([0-5][0-9])):(60|([0-5][0-9])).
:paramtype query_timeout: JSON
:keyword additional_columns: Specifies the additional columns to be added to source data. Type:
array of objects(AdditionalColumns) (or Expression with resultType array of objects).
:paramtype additional_columns: JSON
:keyword query: A query to retrieve data from source. Type: string (or Expression with
resultType string).
:paramtype query: JSON
"""
super().__init__(
additional_properties=additional_properties,
source_retry_count=source_retry_count,
source_retry_wait=source_retry_wait,
max_concurrent_connections=max_concurrent_connections,
disable_metrics_collection=disable_metrics_collection,
query_timeout=query_timeout,
additional_columns=additional_columns,
**kwargs
)
self.type: str = "HiveSource"
self.query = query
|
class HiveSource(TabularSource):
'''A copy activity Hive Server source.
All required parameters must be populated in order to send to server.
:ivar additional_properties: Unmatched properties from the message are deserialized to this
collection.
:vartype additional_properties: dict[str, JSON]
:ivar type: Copy source type. Required.
:vartype type: str
:ivar source_retry_count: Source retry count. Type: integer (or Expression with resultType
integer).
:vartype source_retry_count: JSON
:ivar source_retry_wait: Source retry wait. Type: string (or Expression with resultType
string), pattern: ((\d+).)?(\d\d):(60|([0-5][0-9])):(60|([0-5][0-9])).
:vartype source_retry_wait: JSON
:ivar max_concurrent_connections: The maximum concurrent connection count for the source data
store. Type: integer (or Expression with resultType integer).
:vartype max_concurrent_connections: JSON
:ivar disable_metrics_collection: If true, disable data store metrics collection. Default is
false. Type: boolean (or Expression with resultType boolean).
:vartype disable_metrics_collection: JSON
:ivar query_timeout: Query timeout. Type: string (or Expression with resultType string),
pattern: ((\d+).)?(\d\d):(60|([0-5][0-9])):(60|([0-5][0-9])).
:vartype query_timeout: JSON
:ivar additional_columns: Specifies the additional columns to be added to source data. Type:
array of objects(AdditionalColumns) (or Expression with resultType array of objects).
:vartype additional_columns: JSON
:ivar query: A query to retrieve data from source. Type: string (or Expression with resultType
string).
:vartype query: JSON
'''
def __init__(
self,
*,
additional_properties: Optional[Dict[str, JSON]] = None,
source_retry_count: Optional[JSON] = None,
source_retry_wait: Optional[JSON] = None,
max_concurrent_connections: Optional[JSON] = None,
disable_metrics_collection: Optional[JSON] = None,
query_timeout: Optional[JSON] = None,
additional_columns: Optional[JSON] = None,
query: Optional[JSON] = None,
**kwargs: Any
) -> None:
'''
:keyword additional_properties: Unmatched properties from the message are deserialized to this
collection.
:paramtype additional_properties: dict[str, JSON]
:keyword source_retry_count: Source retry count. Type: integer (or Expression with resultType
integer).
:paramtype source_retry_count: JSON
:keyword source_retry_wait: Source retry wait. Type: string (or Expression with resultType
string), pattern: ((\d+).)?(\d\d):(60|([0-5][0-9])):(60|([0-5][0-9])).
:paramtype source_retry_wait: JSON
:keyword max_concurrent_connections: The maximum concurrent connection count for the source
data store. Type: integer (or Expression with resultType integer).
:paramtype max_concurrent_connections: JSON
:keyword disable_metrics_collection: If true, disable data store metrics collection. Default is
false. Type: boolean (or Expression with resultType boolean).
:paramtype disable_metrics_collection: JSON
:keyword query_timeout: Query timeout. Type: string (or Expression with resultType string),
pattern: ((\d+).)?(\d\d):(60|([0-5][0-9])):(60|([0-5][0-9])).
:paramtype query_timeout: JSON
:keyword additional_columns: Specifies the additional columns to be added to source data. Type:
array of objects(AdditionalColumns) (or Expression with resultType array of objects).
:paramtype additional_columns: JSON
:keyword query: A query to retrieve data from source. Type: string (or Expression with
resultType string).
:paramtype query: JSON
'''
pass
| 2 | 2 | 51 | 0 | 25 | 26 | 1 | 1.38 | 1 | 3 | 0 | 0 | 1 | 2 | 1 | 18 | 100 | 5 | 40 | 18 | 26 | 55 | 7 | 6 | 5 | 1 | 4 | 0 | 1 |
10,896 |
Azure/azure-cli-extensions
|
src/securityinsight/azext_sentinel/aaz/latest/sentinel/alert_rule/template/_show.py
|
azext_sentinel.aaz.latest.sentinel.alert_rule.template._show.Show
|
class Show(AAZCommand):
"""Get the alert rule template.
"""
_aaz_info = {
"version": "2022-06-01-preview",
"resources": [
["mgmt-plane", "/subscriptions/{}/resourcegroups/{}/providers/microsoft.operationalinsights/workspaces/{}/providers/microsoft.securityinsights/alertruletemplates/{}", "2022-06-01-preview"],
]
}
def _handler(self, command_args):
super()._handler(command_args)
self._execute_operations()
return self._output()
_args_schema = None
@classmethod
def _build_arguments_schema(cls, *args, **kwargs):
if cls._args_schema is not None:
return cls._args_schema
cls._args_schema = super()._build_arguments_schema(*args, **kwargs)
# define Arg Group ""
_args_schema = cls._args_schema
_args_schema.alert_rule_template_id = AAZStrArg(
options=["-n", "--name", "--alert-rule-template-id"],
help="Alert rule template ID",
required=True,
id_part="child_name_1",
)
_args_schema.resource_group = AAZResourceGroupNameArg(
required=True,
)
_args_schema.workspace_name = AAZStrArg(
options=["-w", "--workspace-name"],
help="The name of the workspace.",
required=True,
is_experimental=True,
id_part="name",
)
return cls._args_schema
def _execute_operations(self):
self.AlertRuleTemplatesGet(ctx=self.ctx)()
def _output(self, *args, **kwargs):
result = self.deserialize_output(self.ctx.vars.instance, client_flatten=True)
return result
class AlertRuleTemplatesGet(AAZHttpOperation):
CLIENT_TYPE = "MgmtClient"
def __call__(self, *args, **kwargs):
request = self.make_request()
session = self.client.send_request(request=request, stream=False, **kwargs)
if session.http_response.status_code in [200]:
return self.on_200(session)
return self.on_error(session.http_response)
@property
def url(self):
return self.client.format_url(
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.OperationalInsights/workspaces/{workspaceName}/providers/Microsoft.SecurityInsights/alertRuleTemplates/{alertRuleTemplateId}",
**self.url_parameters
)
@property
def method(self):
return "GET"
@property
def error_format(self):
return "ODataV4Format"
@property
def url_parameters(self):
parameters = {
**self.serialize_url_param(
"alertRuleTemplateId", self.ctx.args.alert_rule_template_id,
required=True,
),
**self.serialize_url_param(
"resourceGroupName", self.ctx.args.resource_group,
required=True,
),
**self.serialize_url_param(
"subscriptionId", self.ctx.subscription_id,
required=True,
),
**self.serialize_url_param(
"workspaceName", self.ctx.args.workspace_name,
required=True,
),
}
return parameters
@property
def query_parameters(self):
parameters = {
**self.serialize_query_param(
"api-version", "2022-06-01-preview",
required=True,
),
}
return parameters
@property
def header_parameters(self):
parameters = {
**self.serialize_header_param(
"Accept", "application/json",
),
}
return parameters
def on_200(self, session):
data = self.deserialize_http_content(session)
self.ctx.set_var(
"instance",
data,
schema_builder=self._build_schema_on_200
)
_schema_on_200 = None
@classmethod
def _build_schema_on_200(cls):
if cls._schema_on_200 is not None:
return cls._schema_on_200
cls._schema_on_200 = AAZObjectType()
_schema_on_200 = cls._schema_on_200
_schema_on_200.id = AAZStrType(
flags={"read_only": True},
)
_schema_on_200.kind = AAZStrType(
flags={"required": True},
)
_schema_on_200.name = AAZStrType(
flags={"read_only": True},
)
_schema_on_200.system_data = AAZObjectType(
serialized_name="systemData",
flags={"read_only": True},
)
_schema_on_200.type = AAZStrType(
flags={"read_only": True},
)
system_data = cls._schema_on_200.system_data
system_data.created_at = AAZStrType(
serialized_name="createdAt",
flags={"read_only": True},
)
system_data.created_by = AAZStrType(
serialized_name="createdBy",
flags={"read_only": True},
)
system_data.created_by_type = AAZStrType(
serialized_name="createdByType",
flags={"read_only": True},
)
system_data.last_modified_at = AAZStrType(
serialized_name="lastModifiedAt",
flags={"read_only": True},
)
system_data.last_modified_by = AAZStrType(
serialized_name="lastModifiedBy",
flags={"read_only": True},
)
system_data.last_modified_by_type = AAZStrType(
serialized_name="lastModifiedByType",
flags={"read_only": True},
)
disc_fusion = cls._schema_on_200.discriminate_by("kind", "Fusion")
disc_fusion.properties = AAZObjectType(
flags={"client_flatten": True},
)
properties = cls._schema_on_200.discriminate_by("kind", "Fusion").properties
properties.alert_rules_created_by_template_count = AAZIntType(
serialized_name="alertRulesCreatedByTemplateCount",
flags={"required": True},
)
properties.created_date_utc = AAZStrType(
serialized_name="createdDateUTC",
flags={"read_only": True},
)
properties.description = AAZStrType(
flags={"required": True},
)
properties.display_name = AAZStrType(
serialized_name="displayName",
flags={"required": True},
)
properties.last_updated_date_utc = AAZStrType(
serialized_name="lastUpdatedDateUTC",
flags={"read_only": True},
)
properties.required_data_connectors = AAZListType(
serialized_name="requiredDataConnectors",
)
properties.severity = AAZStrType(
flags={"required": True},
)
properties.source_settings = AAZListType(
serialized_name="sourceSettings",
)
properties.status = AAZStrType(
flags={"required": True},
)
properties.tactics = AAZListType()
properties.techniques = AAZListType()
required_data_connectors = cls._schema_on_200.discriminate_by("kind", "Fusion").properties.required_data_connectors
required_data_connectors.Element = AAZObjectType()
_build_schema_alert_rule_template_data_source_read(required_data_connectors.Element)
source_settings = cls._schema_on_200.discriminate_by("kind", "Fusion").properties.source_settings
source_settings.Element = AAZObjectType()
_element = cls._schema_on_200.discriminate_by("kind", "Fusion").properties.source_settings.Element
_element.source_name = AAZStrType(
serialized_name="sourceName",
flags={"required": True},
)
_element.source_sub_types = AAZListType(
serialized_name="sourceSubTypes",
)
source_sub_types = cls._schema_on_200.discriminate_by("kind", "Fusion").properties.source_settings.Element.source_sub_types
source_sub_types.Element = AAZObjectType()
_element = cls._schema_on_200.discriminate_by("kind", "Fusion").properties.source_settings.Element.source_sub_types.Element
_element.severity_filter = AAZObjectType(
serialized_name="severityFilter",
flags={"required": True},
)
_element.source_sub_type_display_name = AAZStrType(
serialized_name="sourceSubTypeDisplayName",
flags={"read_only": True},
)
_element.source_sub_type_name = AAZStrType(
serialized_name="sourceSubTypeName",
flags={"required": True},
)
severity_filter = cls._schema_on_200.discriminate_by("kind", "Fusion").properties.source_settings.Element.source_sub_types.Element.severity_filter
severity_filter.is_supported = AAZBoolType(
serialized_name="isSupported",
flags={"required": True},
)
severity_filter.severity_filters = AAZListType(
serialized_name="severityFilters",
)
severity_filters = cls._schema_on_200.discriminate_by("kind", "Fusion").properties.source_settings.Element.source_sub_types.Element.severity_filter.severity_filters
severity_filters.Element = AAZStrType()
tactics = cls._schema_on_200.discriminate_by("kind", "Fusion").properties.tactics
tactics.Element = AAZStrType()
techniques = cls._schema_on_200.discriminate_by("kind", "Fusion").properties.techniques
techniques.Element = AAZStrType()
disc_ml_behavior_analytics = cls._schema_on_200.discriminate_by("kind", "MLBehaviorAnalytics")
disc_ml_behavior_analytics.properties = AAZObjectType(
flags={"client_flatten": True},
)
properties = cls._schema_on_200.discriminate_by("kind", "MLBehaviorAnalytics").properties
properties.alert_rules_created_by_template_count = AAZIntType(
serialized_name="alertRulesCreatedByTemplateCount",
flags={"required": True},
)
properties.created_date_utc = AAZStrType(
serialized_name="createdDateUTC",
flags={"read_only": True},
)
properties.description = AAZStrType(
flags={"required": True},
)
properties.display_name = AAZStrType(
serialized_name="displayName",
flags={"required": True},
)
properties.last_updated_date_utc = AAZStrType(
serialized_name="lastUpdatedDateUTC",
flags={"read_only": True},
)
properties.required_data_connectors = AAZListType(
serialized_name="requiredDataConnectors",
)
properties.severity = AAZStrType(
flags={"required": True},
)
properties.status = AAZStrType(
flags={"required": True},
)
properties.tactics = AAZListType()
properties.techniques = AAZListType()
required_data_connectors = cls._schema_on_200.discriminate_by("kind", "MLBehaviorAnalytics").properties.required_data_connectors
required_data_connectors.Element = AAZObjectType()
_build_schema_alert_rule_template_data_source_read(required_data_connectors.Element)
tactics = cls._schema_on_200.discriminate_by("kind", "MLBehaviorAnalytics").properties.tactics
tactics.Element = AAZStrType()
techniques = cls._schema_on_200.discriminate_by("kind", "MLBehaviorAnalytics").properties.techniques
techniques.Element = AAZStrType()
disc_microsoft_security_incident_creation = cls._schema_on_200.discriminate_by("kind", "MicrosoftSecurityIncidentCreation")
disc_microsoft_security_incident_creation.properties = AAZObjectType(
flags={"client_flatten": True},
)
properties = cls._schema_on_200.discriminate_by("kind", "MicrosoftSecurityIncidentCreation").properties
properties.alert_rules_created_by_template_count = AAZIntType(
serialized_name="alertRulesCreatedByTemplateCount",
flags={"required": True},
)
properties.created_date_utc = AAZStrType(
serialized_name="createdDateUTC",
flags={"required": True, "read_only": True},
)
properties.description = AAZStrType(
flags={"required": True},
)
properties.display_name = AAZStrType(
serialized_name="displayName",
flags={"required": True},
)
properties.display_names_exclude_filter = AAZListType(
serialized_name="displayNamesExcludeFilter",
)
properties.display_names_filter = AAZListType(
serialized_name="displayNamesFilter",
)
properties.last_updated_date_utc = AAZStrType(
serialized_name="lastUpdatedDateUTC",
flags={"read_only": True},
)
properties.product_filter = AAZStrType(
serialized_name="productFilter",
flags={"required": True},
)
properties.required_data_connectors = AAZListType(
serialized_name="requiredDataConnectors",
)
properties.severities_filter = AAZListType(
serialized_name="severitiesFilter",
)
properties.status = AAZStrType(
flags={"required": True},
)
display_names_exclude_filter = cls._schema_on_200.discriminate_by("kind", "MicrosoftSecurityIncidentCreation").properties.display_names_exclude_filter
display_names_exclude_filter.Element = AAZStrType()
display_names_filter = cls._schema_on_200.discriminate_by("kind", "MicrosoftSecurityIncidentCreation").properties.display_names_filter
display_names_filter.Element = AAZStrType()
required_data_connectors = cls._schema_on_200.discriminate_by("kind", "MicrosoftSecurityIncidentCreation").properties.required_data_connectors
required_data_connectors.Element = AAZObjectType()
_build_schema_alert_rule_template_data_source_read(required_data_connectors.Element)
severities_filter = cls._schema_on_200.discriminate_by("kind", "MicrosoftSecurityIncidentCreation").properties.severities_filter
severities_filter.Element = AAZStrType()
disc_nrt = cls._schema_on_200.discriminate_by("kind", "NRT")
disc_nrt.properties = AAZObjectType(
flags={"client_flatten": True},
)
properties = cls._schema_on_200.discriminate_by("kind", "NRT").properties
properties.alert_details_override = AAZObjectType(
serialized_name="alertDetailsOverride",
)
_build_schema_alert_details_override_read(properties.alert_details_override)
properties.alert_rules_created_by_template_count = AAZIntType(
serialized_name="alertRulesCreatedByTemplateCount",
flags={"required": True},
)
properties.created_date_utc = AAZStrType(
serialized_name="createdDateUTC",
flags={"read_only": True},
)
properties.custom_details = AAZDictType(
serialized_name="customDetails",
)
properties.description = AAZStrType(
flags={"required": True},
)
properties.display_name = AAZStrType(
serialized_name="displayName",
flags={"required": True},
)
properties.entity_mappings = AAZListType(
serialized_name="entityMappings",
)
_build_schema_entity_mappings_read(properties.entity_mappings)
properties.last_updated_date_utc = AAZStrType(
serialized_name="lastUpdatedDateUTC",
flags={"read_only": True},
)
properties.query = AAZStrType(
flags={"required": True},
)
properties.required_data_connectors = AAZListType(
serialized_name="requiredDataConnectors",
)
properties.severity = AAZStrType(
flags={"required": True},
)
properties.status = AAZStrType(
flags={"required": True},
)
properties.tactics = AAZListType()
properties.techniques = AAZListType()
properties.version = AAZStrType(
flags={"required": True},
)
custom_details = cls._schema_on_200.discriminate_by("kind", "NRT").properties.custom_details
custom_details.Element = AAZStrType()
required_data_connectors = cls._schema_on_200.discriminate_by("kind", "NRT").properties.required_data_connectors
required_data_connectors.Element = AAZObjectType()
_build_schema_alert_rule_template_data_source_read(required_data_connectors.Element)
tactics = cls._schema_on_200.discriminate_by("kind", "NRT").properties.tactics
tactics.Element = AAZStrType()
techniques = cls._schema_on_200.discriminate_by("kind", "NRT").properties.techniques
techniques.Element = AAZStrType()
disc_scheduled = cls._schema_on_200.discriminate_by("kind", "Scheduled")
disc_scheduled.properties = AAZObjectType(
flags={"client_flatten": True},
)
properties = cls._schema_on_200.discriminate_by("kind", "Scheduled").properties
properties.alert_details_override = AAZObjectType(
serialized_name="alertDetailsOverride",
)
_build_schema_alert_details_override_read(properties.alert_details_override)
properties.alert_rules_created_by_template_count = AAZIntType(
serialized_name="alertRulesCreatedByTemplateCount",
flags={"required": True},
)
properties.created_date_utc = AAZStrType(
serialized_name="createdDateUTC",
flags={"read_only": True},
)
properties.custom_details = AAZDictType(
serialized_name="customDetails",
)
properties.description = AAZStrType(
flags={"required": True},
)
properties.display_name = AAZStrType(
serialized_name="displayName",
flags={"required": True},
)
properties.entity_mappings = AAZListType(
serialized_name="entityMappings",
)
_build_schema_entity_mappings_read(properties.entity_mappings)
properties.event_grouping_settings = AAZObjectType(
serialized_name="eventGroupingSettings",
)
properties.last_updated_date_utc = AAZStrType(
serialized_name="lastUpdatedDateUTC",
flags={"read_only": True},
)
properties.query = AAZStrType(
flags={"required": True},
)
properties.query_frequency = AAZStrType(
serialized_name="queryFrequency",
flags={"required": True},
)
properties.query_period = AAZStrType(
serialized_name="queryPeriod",
flags={"required": True},
)
properties.required_data_connectors = AAZListType(
serialized_name="requiredDataConnectors",
)
properties.severity = AAZStrType(
flags={"required": True},
)
properties.status = AAZStrType(
flags={"required": True},
)
properties.tactics = AAZListType()
properties.techniques = AAZListType()
properties.trigger_operator = AAZStrType(
serialized_name="triggerOperator",
flags={"required": True},
)
properties.trigger_threshold = AAZIntType(
serialized_name="triggerThreshold",
flags={"required": True},
)
properties.version = AAZStrType(
flags={"required": True},
)
custom_details = cls._schema_on_200.discriminate_by("kind", "Scheduled").properties.custom_details
custom_details.Element = AAZStrType()
event_grouping_settings = cls._schema_on_200.discriminate_by("kind", "Scheduled").properties.event_grouping_settings
event_grouping_settings.aggregation_kind = AAZStrType(
serialized_name="aggregationKind",
)
required_data_connectors = cls._schema_on_200.discriminate_by("kind", "Scheduled").properties.required_data_connectors
required_data_connectors.Element = AAZObjectType()
_build_schema_alert_rule_template_data_source_read(required_data_connectors.Element)
tactics = cls._schema_on_200.discriminate_by("kind", "Scheduled").properties.tactics
tactics.Element = AAZStrType()
techniques = cls._schema_on_200.discriminate_by("kind", "Scheduled").properties.techniques
techniques.Element = AAZStrType()
disc_threat_intelligence = cls._schema_on_200.discriminate_by("kind", "ThreatIntelligence")
disc_threat_intelligence.properties = AAZObjectType(
flags={"client_flatten": True},
)
properties = cls._schema_on_200.discriminate_by("kind", "ThreatIntelligence").properties
properties.alert_rules_created_by_template_count = AAZIntType(
serialized_name="alertRulesCreatedByTemplateCount",
flags={"required": True},
)
properties.created_date_utc = AAZStrType(
serialized_name="createdDateUTC",
flags={"read_only": True},
)
properties.description = AAZStrType(
flags={"required": True},
)
properties.display_name = AAZStrType(
serialized_name="displayName",
flags={"required": True},
)
properties.last_updated_date_utc = AAZStrType(
serialized_name="lastUpdatedDateUTC",
flags={"read_only": True},
)
properties.required_data_connectors = AAZListType(
serialized_name="requiredDataConnectors",
)
properties.severity = AAZStrType(
flags={"required": True},
)
properties.status = AAZStrType(
flags={"required": True},
)
properties.tactics = AAZListType()
properties.techniques = AAZListType()
required_data_connectors = cls._schema_on_200.discriminate_by("kind", "ThreatIntelligence").properties.required_data_connectors
required_data_connectors.Element = AAZObjectType()
_build_schema_alert_rule_template_data_source_read(required_data_connectors.Element)
tactics = cls._schema_on_200.discriminate_by("kind", "ThreatIntelligence").properties.tactics
tactics.Element = AAZStrType()
techniques = cls._schema_on_200.discriminate_by("kind", "ThreatIntelligence").properties.techniques
techniques.Element = AAZStrType()
return cls._schema_on_200
|
class Show(AAZCommand):
'''Get the alert rule template.
'''
def _handler(self, command_args):
pass
@classmethod
def _build_arguments_schema(cls, *args, **kwargs):
pass
def _execute_operations(self):
pass
def _output(self, *args, **kwargs):
pass
class AlertRuleTemplatesGet(AAZHttpOperation):
def __call__(self, *args, **kwargs):
pass
@property
def url(self):
pass
@property
def method(self):
pass
@property
def error_format(self):
pass
@property
def url_parameters(self):
pass
@property
def query_parameters(self):
pass
@property
def header_parameters(self):
pass
def on_200(self, session):
pass
@classmethod
def _build_schema_on_200(cls):
pass
| 23 | 1 | 42 | 4 | 38 | 0 | 1 | 0.01 | 1 | 2 | 1 | 0 | 3 | 0 | 4 | 4 | 582 | 64 | 515 | 57 | 492 | 3 | 231 | 49 | 216 | 2 | 1 | 1 | 16 |
10,897 |
Azure/azure-cli-extensions
|
src/securityinsight/azext_sentinel/aaz/latest/sentinel/alert_rule/template/_list.py
|
azext_sentinel.aaz.latest.sentinel.alert_rule.template._list.List
|
class List(AAZCommand):
"""Get all alert rule templates.
"""
_aaz_info = {
"version": "2022-06-01-preview",
"resources": [
["mgmt-plane", "/subscriptions/{}/resourcegroups/{}/providers/microsoft.operationalinsights/workspaces/{}/providers/microsoft.securityinsights/alertruletemplates", "2022-06-01-preview"],
]
}
def _handler(self, command_args):
super()._handler(command_args)
return self.build_paging(self._execute_operations, self._output)
_args_schema = None
@classmethod
def _build_arguments_schema(cls, *args, **kwargs):
if cls._args_schema is not None:
return cls._args_schema
cls._args_schema = super()._build_arguments_schema(*args, **kwargs)
# define Arg Group ""
_args_schema = cls._args_schema
_args_schema.resource_group = AAZResourceGroupNameArg(
required=True,
)
_args_schema.workspace_name = AAZStrArg(
options=["-w", "--workspace-name"],
help="The name of the workspace.",
required=True,
is_experimental=True,
)
return cls._args_schema
def _execute_operations(self):
self.AlertRuleTemplatesList(ctx=self.ctx)()
def _output(self, *args, **kwargs):
result = self.deserialize_output(self.ctx.vars.instance.value, client_flatten=True)
next_link = self.deserialize_output(self.ctx.vars.instance.next_link)
return result, next_link
class AlertRuleTemplatesList(AAZHttpOperation):
CLIENT_TYPE = "MgmtClient"
def __call__(self, *args, **kwargs):
request = self.make_request()
session = self.client.send_request(request=request, stream=False, **kwargs)
if session.http_response.status_code in [200]:
return self.on_200(session)
return self.on_error(session.http_response)
@property
def url(self):
return self.client.format_url(
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.OperationalInsights/workspaces/{workspaceName}/providers/Microsoft.SecurityInsights/alertRuleTemplates",
**self.url_parameters
)
@property
def method(self):
return "GET"
@property
def error_format(self):
return "ODataV4Format"
@property
def url_parameters(self):
parameters = {
**self.serialize_url_param(
"resourceGroupName", self.ctx.args.resource_group,
required=True,
),
**self.serialize_url_param(
"subscriptionId", self.ctx.subscription_id,
required=True,
),
**self.serialize_url_param(
"workspaceName", self.ctx.args.workspace_name,
required=True,
),
}
return parameters
@property
def query_parameters(self):
parameters = {
**self.serialize_query_param(
"api-version", "2022-06-01-preview",
required=True,
),
}
return parameters
@property
def header_parameters(self):
parameters = {
**self.serialize_header_param(
"Accept", "application/json",
),
}
return parameters
def on_200(self, session):
data = self.deserialize_http_content(session)
self.ctx.set_var(
"instance",
data,
schema_builder=self._build_schema_on_200
)
_schema_on_200 = None
@classmethod
def _build_schema_on_200(cls):
if cls._schema_on_200 is not None:
return cls._schema_on_200
cls._schema_on_200 = AAZObjectType()
_schema_on_200 = cls._schema_on_200
_schema_on_200.next_link = AAZStrType(
serialized_name="nextLink",
flags={"read_only": True},
)
_schema_on_200.value = AAZListType(
flags={"required": True},
)
value = cls._schema_on_200.value
value.Element = AAZObjectType()
_element = cls._schema_on_200.value.Element
_element.id = AAZStrType(
flags={"read_only": True},
)
_element.kind = AAZStrType(
flags={"required": True},
)
_element.name = AAZStrType(
flags={"read_only": True},
)
_element.system_data = AAZObjectType(
serialized_name="systemData",
flags={"read_only": True},
)
_element.type = AAZStrType(
flags={"read_only": True},
)
system_data = cls._schema_on_200.value.Element.system_data
system_data.created_at = AAZStrType(
serialized_name="createdAt",
flags={"read_only": True},
)
system_data.created_by = AAZStrType(
serialized_name="createdBy",
flags={"read_only": True},
)
system_data.created_by_type = AAZStrType(
serialized_name="createdByType",
flags={"read_only": True},
)
system_data.last_modified_at = AAZStrType(
serialized_name="lastModifiedAt",
flags={"read_only": True},
)
system_data.last_modified_by = AAZStrType(
serialized_name="lastModifiedBy",
flags={"read_only": True},
)
system_data.last_modified_by_type = AAZStrType(
serialized_name="lastModifiedByType",
flags={"read_only": True},
)
disc_fusion = cls._schema_on_200.value.Element.discriminate_by("kind", "Fusion")
disc_fusion.properties = AAZObjectType(
flags={"client_flatten": True},
)
properties = cls._schema_on_200.value.Element.discriminate_by("kind", "Fusion").properties
properties.alert_rules_created_by_template_count = AAZIntType(
serialized_name="alertRulesCreatedByTemplateCount",
flags={"required": True},
)
properties.created_date_utc = AAZStrType(
serialized_name="createdDateUTC",
flags={"read_only": True},
)
properties.description = AAZStrType(
flags={"required": True},
)
properties.display_name = AAZStrType(
serialized_name="displayName",
flags={"required": True},
)
properties.last_updated_date_utc = AAZStrType(
serialized_name="lastUpdatedDateUTC",
flags={"read_only": True},
)
properties.required_data_connectors = AAZListType(
serialized_name="requiredDataConnectors",
)
properties.severity = AAZStrType(
flags={"required": True},
)
properties.source_settings = AAZListType(
serialized_name="sourceSettings",
)
properties.status = AAZStrType(
flags={"required": True},
)
properties.tactics = AAZListType()
properties.techniques = AAZListType()
required_data_connectors = cls._schema_on_200.value.Element.discriminate_by("kind", "Fusion").properties.required_data_connectors
required_data_connectors.Element = AAZObjectType()
_build_schema_alert_rule_template_data_source_read(required_data_connectors.Element)
source_settings = cls._schema_on_200.value.Element.discriminate_by("kind", "Fusion").properties.source_settings
source_settings.Element = AAZObjectType()
_element = cls._schema_on_200.value.Element.discriminate_by("kind", "Fusion").properties.source_settings.Element
_element.source_name = AAZStrType(
serialized_name="sourceName",
flags={"required": True},
)
_element.source_sub_types = AAZListType(
serialized_name="sourceSubTypes",
)
source_sub_types = cls._schema_on_200.value.Element.discriminate_by("kind", "Fusion").properties.source_settings.Element.source_sub_types
source_sub_types.Element = AAZObjectType()
_element = cls._schema_on_200.value.Element.discriminate_by("kind", "Fusion").properties.source_settings.Element.source_sub_types.Element
_element.severity_filter = AAZObjectType(
serialized_name="severityFilter",
flags={"required": True},
)
_element.source_sub_type_display_name = AAZStrType(
serialized_name="sourceSubTypeDisplayName",
flags={"read_only": True},
)
_element.source_sub_type_name = AAZStrType(
serialized_name="sourceSubTypeName",
flags={"required": True},
)
severity_filter = cls._schema_on_200.value.Element.discriminate_by("kind", "Fusion").properties.source_settings.Element.source_sub_types.Element.severity_filter
severity_filter.is_supported = AAZBoolType(
serialized_name="isSupported",
flags={"required": True},
)
severity_filter.severity_filters = AAZListType(
serialized_name="severityFilters",
)
severity_filters = cls._schema_on_200.value.Element.discriminate_by("kind", "Fusion").properties.source_settings.Element.source_sub_types.Element.severity_filter.severity_filters
severity_filters.Element = AAZStrType()
tactics = cls._schema_on_200.value.Element.discriminate_by("kind", "Fusion").properties.tactics
tactics.Element = AAZStrType()
techniques = cls._schema_on_200.value.Element.discriminate_by("kind", "Fusion").properties.techniques
techniques.Element = AAZStrType()
disc_ml_behavior_analytics = cls._schema_on_200.value.Element.discriminate_by("kind", "MLBehaviorAnalytics")
disc_ml_behavior_analytics.properties = AAZObjectType(
flags={"client_flatten": True},
)
properties = cls._schema_on_200.value.Element.discriminate_by("kind", "MLBehaviorAnalytics").properties
properties.alert_rules_created_by_template_count = AAZIntType(
serialized_name="alertRulesCreatedByTemplateCount",
flags={"required": True},
)
properties.created_date_utc = AAZStrType(
serialized_name="createdDateUTC",
flags={"read_only": True},
)
properties.description = AAZStrType(
flags={"required": True},
)
properties.display_name = AAZStrType(
serialized_name="displayName",
flags={"required": True},
)
properties.last_updated_date_utc = AAZStrType(
serialized_name="lastUpdatedDateUTC",
flags={"read_only": True},
)
properties.required_data_connectors = AAZListType(
serialized_name="requiredDataConnectors",
)
properties.severity = AAZStrType(
flags={"required": True},
)
properties.status = AAZStrType(
flags={"required": True},
)
properties.tactics = AAZListType()
properties.techniques = AAZListType()
required_data_connectors = cls._schema_on_200.value.Element.discriminate_by("kind", "MLBehaviorAnalytics").properties.required_data_connectors
required_data_connectors.Element = AAZObjectType()
_build_schema_alert_rule_template_data_source_read(required_data_connectors.Element)
tactics = cls._schema_on_200.value.Element.discriminate_by("kind", "MLBehaviorAnalytics").properties.tactics
tactics.Element = AAZStrType()
techniques = cls._schema_on_200.value.Element.discriminate_by("kind", "MLBehaviorAnalytics").properties.techniques
techniques.Element = AAZStrType()
disc_microsoft_security_incident_creation = cls._schema_on_200.value.Element.discriminate_by("kind", "MicrosoftSecurityIncidentCreation")
disc_microsoft_security_incident_creation.properties = AAZObjectType(
flags={"client_flatten": True},
)
properties = cls._schema_on_200.value.Element.discriminate_by("kind", "MicrosoftSecurityIncidentCreation").properties
properties.alert_rules_created_by_template_count = AAZIntType(
serialized_name="alertRulesCreatedByTemplateCount",
flags={"required": True},
)
properties.created_date_utc = AAZStrType(
serialized_name="createdDateUTC",
flags={"required": True, "read_only": True},
)
properties.description = AAZStrType(
flags={"required": True},
)
properties.display_name = AAZStrType(
serialized_name="displayName",
flags={"required": True},
)
properties.display_names_exclude_filter = AAZListType(
serialized_name="displayNamesExcludeFilter",
)
properties.display_names_filter = AAZListType(
serialized_name="displayNamesFilter",
)
properties.last_updated_date_utc = AAZStrType(
serialized_name="lastUpdatedDateUTC",
flags={"read_only": True},
)
properties.product_filter = AAZStrType(
serialized_name="productFilter",
flags={"required": True},
)
properties.required_data_connectors = AAZListType(
serialized_name="requiredDataConnectors",
)
properties.severities_filter = AAZListType(
serialized_name="severitiesFilter",
)
properties.status = AAZStrType(
flags={"required": True},
)
display_names_exclude_filter = cls._schema_on_200.value.Element.discriminate_by("kind", "MicrosoftSecurityIncidentCreation").properties.display_names_exclude_filter
display_names_exclude_filter.Element = AAZStrType()
display_names_filter = cls._schema_on_200.value.Element.discriminate_by("kind", "MicrosoftSecurityIncidentCreation").properties.display_names_filter
display_names_filter.Element = AAZStrType()
required_data_connectors = cls._schema_on_200.value.Element.discriminate_by("kind", "MicrosoftSecurityIncidentCreation").properties.required_data_connectors
required_data_connectors.Element = AAZObjectType()
_build_schema_alert_rule_template_data_source_read(required_data_connectors.Element)
severities_filter = cls._schema_on_200.value.Element.discriminate_by("kind", "MicrosoftSecurityIncidentCreation").properties.severities_filter
severities_filter.Element = AAZStrType()
disc_nrt = cls._schema_on_200.value.Element.discriminate_by("kind", "NRT")
disc_nrt.properties = AAZObjectType(
flags={"client_flatten": True},
)
properties = cls._schema_on_200.value.Element.discriminate_by("kind", "NRT").properties
properties.alert_details_override = AAZObjectType(
serialized_name="alertDetailsOverride",
)
_build_schema_alert_details_override_read(properties.alert_details_override)
properties.alert_rules_created_by_template_count = AAZIntType(
serialized_name="alertRulesCreatedByTemplateCount",
flags={"required": True},
)
properties.created_date_utc = AAZStrType(
serialized_name="createdDateUTC",
flags={"read_only": True},
)
properties.custom_details = AAZDictType(
serialized_name="customDetails",
)
properties.description = AAZStrType(
flags={"required": True},
)
properties.display_name = AAZStrType(
serialized_name="displayName",
flags={"required": True},
)
properties.entity_mappings = AAZListType(
serialized_name="entityMappings",
)
_build_schema_entity_mappings_read(properties.entity_mappings)
properties.last_updated_date_utc = AAZStrType(
serialized_name="lastUpdatedDateUTC",
flags={"read_only": True},
)
properties.query = AAZStrType(
flags={"required": True},
)
properties.required_data_connectors = AAZListType(
serialized_name="requiredDataConnectors",
)
properties.severity = AAZStrType(
flags={"required": True},
)
properties.status = AAZStrType(
flags={"required": True},
)
properties.tactics = AAZListType()
properties.techniques = AAZListType()
properties.version = AAZStrType(
flags={"required": True},
)
custom_details = cls._schema_on_200.value.Element.discriminate_by("kind", "NRT").properties.custom_details
custom_details.Element = AAZStrType()
required_data_connectors = cls._schema_on_200.value.Element.discriminate_by("kind", "NRT").properties.required_data_connectors
required_data_connectors.Element = AAZObjectType()
_build_schema_alert_rule_template_data_source_read(required_data_connectors.Element)
tactics = cls._schema_on_200.value.Element.discriminate_by("kind", "NRT").properties.tactics
tactics.Element = AAZStrType()
techniques = cls._schema_on_200.value.Element.discriminate_by("kind", "NRT").properties.techniques
techniques.Element = AAZStrType()
disc_scheduled = cls._schema_on_200.value.Element.discriminate_by("kind", "Scheduled")
disc_scheduled.properties = AAZObjectType(
flags={"client_flatten": True},
)
properties = cls._schema_on_200.value.Element.discriminate_by("kind", "Scheduled").properties
properties.alert_details_override = AAZObjectType(
serialized_name="alertDetailsOverride",
)
_build_schema_alert_details_override_read(properties.alert_details_override)
properties.alert_rules_created_by_template_count = AAZIntType(
serialized_name="alertRulesCreatedByTemplateCount",
flags={"required": True},
)
properties.created_date_utc = AAZStrType(
serialized_name="createdDateUTC",
flags={"read_only": True},
)
properties.custom_details = AAZDictType(
serialized_name="customDetails",
)
properties.description = AAZStrType(
flags={"required": True},
)
properties.display_name = AAZStrType(
serialized_name="displayName",
flags={"required": True},
)
properties.entity_mappings = AAZListType(
serialized_name="entityMappings",
)
_build_schema_entity_mappings_read(properties.entity_mappings)
properties.event_grouping_settings = AAZObjectType(
serialized_name="eventGroupingSettings",
)
properties.last_updated_date_utc = AAZStrType(
serialized_name="lastUpdatedDateUTC",
flags={"read_only": True},
)
properties.query = AAZStrType(
flags={"required": True},
)
properties.query_frequency = AAZStrType(
serialized_name="queryFrequency",
flags={"required": True},
)
properties.query_period = AAZStrType(
serialized_name="queryPeriod",
flags={"required": True},
)
properties.required_data_connectors = AAZListType(
serialized_name="requiredDataConnectors",
)
properties.severity = AAZStrType(
flags={"required": True},
)
properties.status = AAZStrType(
flags={"required": True},
)
properties.tactics = AAZListType()
properties.techniques = AAZListType()
properties.trigger_operator = AAZStrType(
serialized_name="triggerOperator",
flags={"required": True},
)
properties.trigger_threshold = AAZIntType(
serialized_name="triggerThreshold",
flags={"required": True},
)
properties.version = AAZStrType(
flags={"required": True},
)
custom_details = cls._schema_on_200.value.Element.discriminate_by("kind", "Scheduled").properties.custom_details
custom_details.Element = AAZStrType()
event_grouping_settings = cls._schema_on_200.value.Element.discriminate_by("kind", "Scheduled").properties.event_grouping_settings
event_grouping_settings.aggregation_kind = AAZStrType(
serialized_name="aggregationKind",
)
required_data_connectors = cls._schema_on_200.value.Element.discriminate_by("kind", "Scheduled").properties.required_data_connectors
required_data_connectors.Element = AAZObjectType()
_build_schema_alert_rule_template_data_source_read(required_data_connectors.Element)
tactics = cls._schema_on_200.value.Element.discriminate_by("kind", "Scheduled").properties.tactics
tactics.Element = AAZStrType()
techniques = cls._schema_on_200.value.Element.discriminate_by("kind", "Scheduled").properties.techniques
techniques.Element = AAZStrType()
disc_threat_intelligence = cls._schema_on_200.value.Element.discriminate_by("kind", "ThreatIntelligence")
disc_threat_intelligence.properties = AAZObjectType(
flags={"client_flatten": True},
)
properties = cls._schema_on_200.value.Element.discriminate_by("kind", "ThreatIntelligence").properties
properties.alert_rules_created_by_template_count = AAZIntType(
serialized_name="alertRulesCreatedByTemplateCount",
flags={"required": True},
)
properties.created_date_utc = AAZStrType(
serialized_name="createdDateUTC",
flags={"read_only": True},
)
properties.description = AAZStrType(
flags={"required": True},
)
properties.display_name = AAZStrType(
serialized_name="displayName",
flags={"required": True},
)
properties.last_updated_date_utc = AAZStrType(
serialized_name="lastUpdatedDateUTC",
flags={"read_only": True},
)
properties.required_data_connectors = AAZListType(
serialized_name="requiredDataConnectors",
)
properties.severity = AAZStrType(
flags={"required": True},
)
properties.status = AAZStrType(
flags={"required": True},
)
properties.tactics = AAZListType()
properties.techniques = AAZListType()
required_data_connectors = cls._schema_on_200.value.Element.discriminate_by("kind", "ThreatIntelligence").properties.required_data_connectors
required_data_connectors.Element = AAZObjectType()
_build_schema_alert_rule_template_data_source_read(required_data_connectors.Element)
tactics = cls._schema_on_200.value.Element.discriminate_by("kind", "ThreatIntelligence").properties.tactics
tactics.Element = AAZStrType()
techniques = cls._schema_on_200.value.Element.discriminate_by("kind", "ThreatIntelligence").properties.techniques
techniques.Element = AAZStrType()
return cls._schema_on_200
|
class List(AAZCommand):
'''Get all alert rule templates.
'''
def _handler(self, command_args):
pass
@classmethod
def _build_arguments_schema(cls, *args, **kwargs):
pass
def _execute_operations(self):
pass
def _output(self, *args, **kwargs):
pass
class AlertRuleTemplatesList(AAZHttpOperation):
def __call__(self, *args, **kwargs):
pass
@property
def url(self):
pass
@property
def method(self):
pass
@property
def error_format(self):
pass
@property
def url_parameters(self):
pass
@property
def query_parameters(self):
pass
@property
def header_parameters(self):
pass
def on_200(self, session):
pass
@classmethod
def _build_schema_on_200(cls):
pass
| 23 | 1 | 42 | 4 | 38 | 0 | 1 | 0.01 | 1 | 2 | 1 | 0 | 3 | 0 | 4 | 4 | 583 | 66 | 514 | 59 | 491 | 3 | 235 | 51 | 220 | 2 | 1 | 1 | 16 |
10,898 |
Azure/azure-cli-extensions
|
src/securityinsight/azext_sentinel/aaz/latest/sentinel/alert_rule/template/__cmd_group.py
|
azext_sentinel.aaz.latest.sentinel.alert_rule.template.__cmd_group.__CMDGroup
|
class __CMDGroup(AAZCommandGroup):
"""Manage alert rule template with sentinel.
"""
pass
|
class __CMDGroup(AAZCommandGroup):
'''Manage alert rule template with sentinel.
'''
pass
| 1 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0 | 2 | 1 | 1 | 2 | 2 | 1 | 1 | 0 | 1 | 0 | 0 |
10,899 |
Azure/azure-cli-extensions
|
src/datafactory/azext_datafactory/vendored_sdks/datafactory/models/_models_py3.py
|
azext_datafactory.vendored_sdks.datafactory.models._models_py3.HBaseSource
|
class HBaseSource(TabularSource):
"""A copy activity HBase server source.
All required parameters must be populated in order to send to server.
:ivar additional_properties: Unmatched properties from the message are deserialized to this
collection.
:vartype additional_properties: dict[str, JSON]
:ivar type: Copy source type. Required.
:vartype type: str
:ivar source_retry_count: Source retry count. Type: integer (or Expression with resultType
integer).
:vartype source_retry_count: JSON
:ivar source_retry_wait: Source retry wait. Type: string (or Expression with resultType
string), pattern: ((\\d+).)?(\\d\\d):(60|([0-5][0-9])):(60|([0-5][0-9])).
:vartype source_retry_wait: JSON
:ivar max_concurrent_connections: The maximum concurrent connection count for the source data
store. Type: integer (or Expression with resultType integer).
:vartype max_concurrent_connections: JSON
:ivar disable_metrics_collection: If true, disable data store metrics collection. Default is
false. Type: boolean (or Expression with resultType boolean).
:vartype disable_metrics_collection: JSON
:ivar query_timeout: Query timeout. Type: string (or Expression with resultType string),
pattern: ((\\d+).)?(\\d\\d):(60|([0-5][0-9])):(60|([0-5][0-9])).
:vartype query_timeout: JSON
:ivar additional_columns: Specifies the additional columns to be added to source data. Type:
array of objects(AdditionalColumns) (or Expression with resultType array of objects).
:vartype additional_columns: JSON
:ivar query: A query to retrieve data from source. Type: string (or Expression with resultType
string).
:vartype query: JSON
"""
_validation = {
"type": {"required": True},
}
_attribute_map = {
"additional_properties": {"key": "", "type": "{object}"},
"type": {"key": "type", "type": "str"},
"source_retry_count": {"key": "sourceRetryCount", "type": "object"},
"source_retry_wait": {"key": "sourceRetryWait", "type": "object"},
"max_concurrent_connections": {"key": "maxConcurrentConnections", "type": "object"},
"disable_metrics_collection": {"key": "disableMetricsCollection", "type": "object"},
"query_timeout": {"key": "queryTimeout", "type": "object"},
"additional_columns": {"key": "additionalColumns", "type": "object"},
"query": {"key": "query", "type": "object"},
}
def __init__(
self,
*,
additional_properties: Optional[Dict[str, JSON]] = None,
source_retry_count: Optional[JSON] = None,
source_retry_wait: Optional[JSON] = None,
max_concurrent_connections: Optional[JSON] = None,
disable_metrics_collection: Optional[JSON] = None,
query_timeout: Optional[JSON] = None,
additional_columns: Optional[JSON] = None,
query: Optional[JSON] = None,
**kwargs: Any
) -> None:
"""
:keyword additional_properties: Unmatched properties from the message are deserialized to this
collection.
:paramtype additional_properties: dict[str, JSON]
:keyword source_retry_count: Source retry count. Type: integer (or Expression with resultType
integer).
:paramtype source_retry_count: JSON
:keyword source_retry_wait: Source retry wait. Type: string (or Expression with resultType
string), pattern: ((\\d+).)?(\\d\\d):(60|([0-5][0-9])):(60|([0-5][0-9])).
:paramtype source_retry_wait: JSON
:keyword max_concurrent_connections: The maximum concurrent connection count for the source
data store. Type: integer (or Expression with resultType integer).
:paramtype max_concurrent_connections: JSON
:keyword disable_metrics_collection: If true, disable data store metrics collection. Default is
false. Type: boolean (or Expression with resultType boolean).
:paramtype disable_metrics_collection: JSON
:keyword query_timeout: Query timeout. Type: string (or Expression with resultType string),
pattern: ((\\d+).)?(\\d\\d):(60|([0-5][0-9])):(60|([0-5][0-9])).
:paramtype query_timeout: JSON
:keyword additional_columns: Specifies the additional columns to be added to source data. Type:
array of objects(AdditionalColumns) (or Expression with resultType array of objects).
:paramtype additional_columns: JSON
:keyword query: A query to retrieve data from source. Type: string (or Expression with
resultType string).
:paramtype query: JSON
"""
super().__init__(
additional_properties=additional_properties,
source_retry_count=source_retry_count,
source_retry_wait=source_retry_wait,
max_concurrent_connections=max_concurrent_connections,
disable_metrics_collection=disable_metrics_collection,
query_timeout=query_timeout,
additional_columns=additional_columns,
**kwargs
)
self.type: str = "HBaseSource"
self.query = query
|
class HBaseSource(TabularSource):
'''A copy activity HBase server source.
All required parameters must be populated in order to send to server.
:ivar additional_properties: Unmatched properties from the message are deserialized to this
collection.
:vartype additional_properties: dict[str, JSON]
:ivar type: Copy source type. Required.
:vartype type: str
:ivar source_retry_count: Source retry count. Type: integer (or Expression with resultType
integer).
:vartype source_retry_count: JSON
:ivar source_retry_wait: Source retry wait. Type: string (or Expression with resultType
string), pattern: ((\d+).)?(\d\d):(60|([0-5][0-9])):(60|([0-5][0-9])).
:vartype source_retry_wait: JSON
:ivar max_concurrent_connections: The maximum concurrent connection count for the source data
store. Type: integer (or Expression with resultType integer).
:vartype max_concurrent_connections: JSON
:ivar disable_metrics_collection: If true, disable data store metrics collection. Default is
false. Type: boolean (or Expression with resultType boolean).
:vartype disable_metrics_collection: JSON
:ivar query_timeout: Query timeout. Type: string (or Expression with resultType string),
pattern: ((\d+).)?(\d\d):(60|([0-5][0-9])):(60|([0-5][0-9])).
:vartype query_timeout: JSON
:ivar additional_columns: Specifies the additional columns to be added to source data. Type:
array of objects(AdditionalColumns) (or Expression with resultType array of objects).
:vartype additional_columns: JSON
:ivar query: A query to retrieve data from source. Type: string (or Expression with resultType
string).
:vartype query: JSON
'''
def __init__(
self,
*,
additional_properties: Optional[Dict[str, JSON]] = None,
source_retry_count: Optional[JSON] = None,
source_retry_wait: Optional[JSON] = None,
max_concurrent_connections: Optional[JSON] = None,
disable_metrics_collection: Optional[JSON] = None,
query_timeout: Optional[JSON] = None,
additional_columns: Optional[JSON] = None,
query: Optional[JSON] = None,
**kwargs: Any
) -> None:
'''
:keyword additional_properties: Unmatched properties from the message are deserialized to this
collection.
:paramtype additional_properties: dict[str, JSON]
:keyword source_retry_count: Source retry count. Type: integer (or Expression with resultType
integer).
:paramtype source_retry_count: JSON
:keyword source_retry_wait: Source retry wait. Type: string (or Expression with resultType
string), pattern: ((\d+).)?(\d\d):(60|([0-5][0-9])):(60|([0-5][0-9])).
:paramtype source_retry_wait: JSON
:keyword max_concurrent_connections: The maximum concurrent connection count for the source
data store. Type: integer (or Expression with resultType integer).
:paramtype max_concurrent_connections: JSON
:keyword disable_metrics_collection: If true, disable data store metrics collection. Default is
false. Type: boolean (or Expression with resultType boolean).
:paramtype disable_metrics_collection: JSON
:keyword query_timeout: Query timeout. Type: string (or Expression with resultType string),
pattern: ((\d+).)?(\d\d):(60|([0-5][0-9])):(60|([0-5][0-9])).
:paramtype query_timeout: JSON
:keyword additional_columns: Specifies the additional columns to be added to source data. Type:
array of objects(AdditionalColumns) (or Expression with resultType array of objects).
:paramtype additional_columns: JSON
:keyword query: A query to retrieve data from source. Type: string (or Expression with
resultType string).
:paramtype query: JSON
'''
pass
| 2 | 2 | 51 | 0 | 25 | 26 | 1 | 1.38 | 1 | 3 | 0 | 0 | 1 | 2 | 1 | 18 | 100 | 5 | 40 | 18 | 26 | 55 | 7 | 6 | 5 | 1 | 4 | 0 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.