index
int64
0
731k
package
stringlengths
2
98
name
stringlengths
1
76
docstring
stringlengths
0
281k
code
stringlengths
4
1.07M
signature
stringlengths
2
42.8k
720,040
ibm_platform_services.usage_reports_v4
update_reports_snapshot_config
Update the snapshot configuration. Updates the configuration of snapshot of the billing reports setup by the customer for the given Account Id. :param str account_id: Account ID for which billing report snapshot is configured. :param str interval: (optional) Frequency of taking the snapshot of the billing reports. :param str cos_bucket: (optional) The name of the COS bucket to store the snapshot of the billing reports. :param str cos_location: (optional) Region of the COS instance. :param str cos_reports_folder: (optional) The billing reports root folder to store the billing reports snapshots. :param List[str] report_types: (optional) The type of billing reports to take snapshot of. Possible values are [account_summary, enterprise_summary, account_resource_instance_usage]. :param str versioning: (optional) A new version of report is created or the existing report version is overwritten with every update. :param dict headers: A `dict` containing the request headers :return: A `DetailedResponse` containing the result, headers and HTTP status code. :rtype: DetailedResponse with `dict` result representing a `SnapshotConfig` object
def update_reports_snapshot_config( self, account_id: str, *, interval: Optional[str] = None, cos_bucket: Optional[str] = None, cos_location: Optional[str] = None, cos_reports_folder: Optional[str] = None, report_types: Optional[List[str]] = None, versioning: Optional[str] = None, **kwargs, ) -> DetailedResponse: """ Update the snapshot configuration. Updates the configuration of snapshot of the billing reports setup by the customer for the given Account Id. :param str account_id: Account ID for which billing report snapshot is configured. :param str interval: (optional) Frequency of taking the snapshot of the billing reports. :param str cos_bucket: (optional) The name of the COS bucket to store the snapshot of the billing reports. :param str cos_location: (optional) Region of the COS instance. :param str cos_reports_folder: (optional) The billing reports root folder to store the billing reports snapshots. :param List[str] report_types: (optional) The type of billing reports to take snapshot of. Possible values are [account_summary, enterprise_summary, account_resource_instance_usage]. :param str versioning: (optional) A new version of report is created or the existing report version is overwritten with every update. :param dict headers: A `dict` containing the request headers :return: A `DetailedResponse` containing the result, headers and HTTP status code. :rtype: DetailedResponse with `dict` result representing a `SnapshotConfig` object """ if account_id is None: raise ValueError('account_id must be provided') headers = {} sdk_headers = get_sdk_headers( service_name=self.DEFAULT_SERVICE_NAME, service_version='V4', operation_id='update_reports_snapshot_config', ) headers.update(sdk_headers) data = { 'account_id': account_id, 'interval': interval, 'cos_bucket': cos_bucket, 'cos_location': cos_location, 'cos_reports_folder': cos_reports_folder, 'report_types': report_types, 'versioning': versioning, } data = {k: v for (k, v) in data.items() if v is not None} data = json.dumps(data) headers['content-type'] = 'application/json' if 'headers' in kwargs: headers.update(kwargs.get('headers')) del kwargs['headers'] headers['Accept'] = 'application/json' url = '/v1/billing-reports-snapshot-config' request = self.prepare_request( method='PATCH', url=url, headers=headers, data=data, ) response = self.send(request, **kwargs) return response
(self, account_id: str, *, interval: Optional[str] = None, cos_bucket: Optional[str] = None, cos_location: Optional[str] = None, cos_reports_folder: Optional[str] = None, report_types: Optional[List[str]] = None, versioning: Optional[str] = None, **kwargs) -> ibm_cloud_sdk_core.detailed_response.DetailedResponse
720,041
ibm_platform_services.usage_reports_v4
validate_reports_snapshot_config
Verify billing to COS authorization. Verify billing service to COS bucket authorization for the given account_id. If COS bucket information is not provided, COS bucket information is retrieved from the configuration file. :param str account_id: Account ID for which billing report snapshot is configured. :param str interval: (optional) Frequency of taking the snapshot of the billing reports. :param str cos_bucket: (optional) The name of the COS bucket to store the snapshot of the billing reports. :param str cos_location: (optional) Region of the COS instance. :param str cos_reports_folder: (optional) The billing reports root folder to store the billing reports snapshots. Defaults to "IBMCloud-Billing-Reports". :param List[str] report_types: (optional) The type of billing reports to take snapshot of. Possible values are [account_summary, enterprise_summary, account_resource_instance_usage]. :param str versioning: (optional) A new version of report is created or the existing report version is overwritten with every update. Defaults to "new". :param dict headers: A `dict` containing the request headers :return: A `DetailedResponse` containing the result, headers and HTTP status code. :rtype: DetailedResponse with `dict` result representing a `SnapshotConfigValidateResponse` object
def validate_reports_snapshot_config( self, account_id: str, *, interval: str = None, cos_bucket: str = None, cos_location: str = None, cos_reports_folder: str = None, report_types: List[str] = None, versioning: str = None, **kwargs, ) -> DetailedResponse: """ Verify billing to COS authorization. Verify billing service to COS bucket authorization for the given account_id. If COS bucket information is not provided, COS bucket information is retrieved from the configuration file. :param str account_id: Account ID for which billing report snapshot is configured. :param str interval: (optional) Frequency of taking the snapshot of the billing reports. :param str cos_bucket: (optional) The name of the COS bucket to store the snapshot of the billing reports. :param str cos_location: (optional) Region of the COS instance. :param str cos_reports_folder: (optional) The billing reports root folder to store the billing reports snapshots. Defaults to "IBMCloud-Billing-Reports". :param List[str] report_types: (optional) The type of billing reports to take snapshot of. Possible values are [account_summary, enterprise_summary, account_resource_instance_usage]. :param str versioning: (optional) A new version of report is created or the existing report version is overwritten with every update. Defaults to "new". :param dict headers: A `dict` containing the request headers :return: A `DetailedResponse` containing the result, headers and HTTP status code. :rtype: DetailedResponse with `dict` result representing a `SnapshotConfigValidateResponse` object """ if account_id is None: raise ValueError('account_id must be provided') headers = {} sdk_headers = get_sdk_headers( service_name=self.DEFAULT_SERVICE_NAME, service_version='V4', operation_id='validate_reports_snapshot_config', ) headers.update(sdk_headers) data = { 'account_id': account_id, 'interval': interval, 'cos_bucket': cos_bucket, 'cos_location': cos_location, 'cos_reports_folder': cos_reports_folder, 'report_types': report_types, 'versioning': versioning, } data = {k: v for (k, v) in data.items() if v is not None} data = json.dumps(data) headers['content-type'] = 'application/json' if 'headers' in kwargs: headers.update(kwargs.get('headers')) del kwargs['headers'] headers['Accept'] = 'application/json' url = '/v1/billing-reports-snapshot-config/validate' request = self.prepare_request( method='POST', url=url, headers=headers, data=data, ) response = self.send(request, **kwargs) return response
(self, account_id: str, *, interval: Optional[str] = None, cos_bucket: Optional[str] = None, cos_location: Optional[str] = None, cos_reports_folder: Optional[str] = None, report_types: Optional[List[str]] = None, versioning: Optional[str] = None, **kwargs) -> ibm_cloud_sdk_core.detailed_response.DetailedResponse
720,042
ibm_platform_services.user_management_v1
UserManagementV1
The User Management V1 service.
class UserManagementV1(BaseService): """The User Management V1 service.""" DEFAULT_SERVICE_URL = 'https://user-management.cloud.ibm.com' DEFAULT_SERVICE_NAME = 'user_management' @classmethod def new_instance( cls, service_name: str = DEFAULT_SERVICE_NAME, ) -> 'UserManagementV1': """ Return a new client for the User Management service using the specified parameters and external configuration. """ authenticator = get_authenticator_from_environment(service_name) service = cls(authenticator) service.configure_service(service_name) return service def __init__( self, authenticator: Authenticator = None, ) -> None: """ Construct a new client for the User Management service. :param Authenticator authenticator: The authenticator specifies the authentication mechanism. Get up to date information from https://github.com/IBM/python-sdk-core/blob/main/README.md about initializing the authenticator of your choice. """ BaseService.__init__(self, service_url=self.DEFAULT_SERVICE_URL, authenticator=authenticator) ######################### # Users ######################### def list_users( self, account_id: str, *, limit: int = None, include_settings: bool = None, search: str = None, start: str = None, user_id: str = None, **kwargs, ) -> DetailedResponse: """ List users. Retrieve users in the account. You can use the IAM service token or a user token for authorization. To use this method, the requesting user or service ID must have at least the viewer, editor, or administrator role on the User Management service. If unrestricted view is enabled, the user can see all users in the same account without an IAM role. If restricted view is enabled and user has the viewer, editor, or administrator role on the user management service, the API returns all users in the account. If unrestricted view is enabled and the user does not have these roles, the API returns only the current user. Users are returned in a paginated list with a default limit of 100 users. You can iterate through all users by following the `next_url` field. Additional substring search fields are supported to filter the users. :param str account_id: The account ID of the specified user. :param int limit: (optional) The number of results to be returned. :param bool include_settings: (optional) The user settings to be returned. Set to true to view language, allowed IP address, and authentication settings. :param str search: (optional) The desired search results to be returned. To view the list of users with the additional search filter, use the following query options: `firstname`, `lastname`, `email`, `state`, `substate`, `iam_id`, `realm`, and `userId`. HTML URL encoding for the search query and `:` must be used. For example, search=state%3AINVALID returns a list of invalid users. Multiple search queries can be combined to obtain `OR` results using `,` operator (not URL encoded). For example, search=state%3AINVALID,email%3Amail.test.ibm.com. :param str start: (optional) An optional token that indicates the beginning of the page of results to be returned. If omitted, the first page of results is returned. This value is obtained from the 'next_url' field of the operation response. :param str user_id: (optional) Filter users based on their user ID. :param dict headers: A `dict` containing the request headers :return: A `DetailedResponse` containing the result, headers and HTTP status code. :rtype: DetailedResponse with `dict` result representing a `UserList` object """ if not account_id: raise ValueError('account_id must be provided') headers = {} sdk_headers = get_sdk_headers( service_name=self.DEFAULT_SERVICE_NAME, service_version='V1', operation_id='list_users', ) headers.update(sdk_headers) params = { 'limit': limit, 'include_settings': include_settings, 'search': search, '_start': start, 'user_id': user_id, } if 'headers' in kwargs: headers.update(kwargs.get('headers')) del kwargs['headers'] headers['Accept'] = 'application/json' path_param_keys = ['account_id'] path_param_values = self.encode_path_vars(account_id) path_param_dict = dict(zip(path_param_keys, path_param_values)) url = '/v2/accounts/{account_id}/users'.format(**path_param_dict) request = self.prepare_request( method='GET', url=url, headers=headers, params=params, ) response = self.send(request, **kwargs) return response def invite_users( self, account_id: str, *, users: List['InviteUser'] = None, iam_policy: List['InviteUserIamPolicy'] = None, access_groups: List[str] = None, **kwargs, ) -> DetailedResponse: """ Invite users to an account. Invite users to the account. You must use a user token for authorization. Service IDs can't invite users to the account. To use this method, the requesting user must have the editor or administrator role on the User Management service. For more information, see the [Inviting users](https://cloud.ibm.com/docs/account?topic=account-iamuserinv) documentation. You can specify the user account role and the corresponding IAM policy information in the request body. <br/><br/>When you invite a user to an account, the user is initially created in the `PROCESSING` state. After the user is successfully created, all specified permissions are configured, and the activation email is sent, the invited user is transitioned to the `PENDING` state. When the invited user clicks the activation email and creates and confirms their IBM Cloud account, the user is transitioned to `ACTIVE` state. If the user email is already verified, no email is generated. :param str account_id: The account ID of the specified user. :param List[InviteUser] users: (optional) A list of users to be invited. :param List[InviteUserIamPolicy] iam_policy: (optional) A list of IAM policies. :param List[str] access_groups: (optional) A list of access groups. :param dict headers: A `dict` containing the request headers :return: A `DetailedResponse` containing the result, headers and HTTP status code. :rtype: DetailedResponse with `dict` result representing a `InvitedUserList` object """ if not account_id: raise ValueError('account_id must be provided') if users is not None: users = [convert_model(x) for x in users] if iam_policy is not None: iam_policy = [convert_model(x) for x in iam_policy] headers = {} sdk_headers = get_sdk_headers( service_name=self.DEFAULT_SERVICE_NAME, service_version='V1', operation_id='invite_users', ) headers.update(sdk_headers) data = { 'users': users, 'iam_policy': iam_policy, 'access_groups': access_groups, } data = {k: v for (k, v) in data.items() if v is not None} data = json.dumps(data) headers['content-type'] = 'application/json' if 'headers' in kwargs: headers.update(kwargs.get('headers')) del kwargs['headers'] headers['Accept'] = 'application/json' path_param_keys = ['account_id'] path_param_values = self.encode_path_vars(account_id) path_param_dict = dict(zip(path_param_keys, path_param_values)) url = '/v2/accounts/{account_id}/users'.format(**path_param_dict) request = self.prepare_request( method='POST', url=url, headers=headers, data=data, ) response = self.send(request, **kwargs) return response def get_user_profile( self, account_id: str, iam_id: str, *, include_activity: str = None, **kwargs, ) -> DetailedResponse: """ Get user profile. Retrieve a user's profile by the user's IAM ID in your account. You can use the IAM service token or a user token for authorization. To use this method, the requesting user or service ID must have at least the viewer, editor, or administrator role on the User Management service. :param str account_id: The account ID of the specified user. :param str iam_id: The user's IAM ID. :param str include_activity: (optional) Include activity information of the user, such as the last authentication timestamp. :param dict headers: A `dict` containing the request headers :return: A `DetailedResponse` containing the result, headers and HTTP status code. :rtype: DetailedResponse with `dict` result representing a `UserProfile` object """ if not account_id: raise ValueError('account_id must be provided') if not iam_id: raise ValueError('iam_id must be provided') headers = {} sdk_headers = get_sdk_headers( service_name=self.DEFAULT_SERVICE_NAME, service_version='V1', operation_id='get_user_profile', ) headers.update(sdk_headers) params = { 'include_activity': include_activity, } if 'headers' in kwargs: headers.update(kwargs.get('headers')) del kwargs['headers'] headers['Accept'] = 'application/json' path_param_keys = ['account_id', 'iam_id'] path_param_values = self.encode_path_vars(account_id, iam_id) path_param_dict = dict(zip(path_param_keys, path_param_values)) url = '/v2/accounts/{account_id}/users/{iam_id}'.format(**path_param_dict) request = self.prepare_request( method='GET', url=url, headers=headers, params=params, ) response = self.send(request, **kwargs) return response def update_user_profile( self, account_id: str, iam_id: str, *, firstname: str = None, lastname: str = None, state: str = None, email: str = None, phonenumber: str = None, altphonenumber: str = None, photo: str = None, include_activity: str = None, **kwargs, ) -> DetailedResponse: """ Partially update user profile. Partially update a user's profile by user's IAM ID. You can use the IAM service token or a user token for authorization. To use this method, the requesting user or service ID must have at least the editor or administrator role on the User Management service. A user or service ID with these roles can change a user's state between `ACTIVE`, `VPN_ONLY`, or `DISABLED_CLASSIC_INFRASTRUCTURE`, but they can't change the state to `PROCESSING` or `PENDING` because these are system states. For other request body fields, a user can update their own profile without having User Management service permissions. :param str account_id: The account ID of the specified user. :param str iam_id: The user's IAM ID. :param str firstname: (optional) The first name of the user. :param str lastname: (optional) The last name of the user. :param str state: (optional) The state of the user. Possible values are `PROCESSING`, `PENDING`, `ACTIVE`, `DISABLED_CLASSIC_INFRASTRUCTURE`, and `VPN_ONLY`. :param str email: (optional) The email address of the user. :param str phonenumber: (optional) The phone number of the user. :param str altphonenumber: (optional) The alternative phone number of the user. :param str photo: (optional) A link to a photo of the user. :param str include_activity: (optional) Include activity information of the user, such as the last authentication timestamp. :param dict headers: A `dict` containing the request headers :return: A `DetailedResponse` containing the result, headers and HTTP status code. :rtype: DetailedResponse """ if not account_id: raise ValueError('account_id must be provided') if not iam_id: raise ValueError('iam_id must be provided') headers = {} sdk_headers = get_sdk_headers( service_name=self.DEFAULT_SERVICE_NAME, service_version='V1', operation_id='update_user_profile', ) headers.update(sdk_headers) params = { 'include_activity': include_activity, } data = { 'firstname': firstname, 'lastname': lastname, 'state': state, 'email': email, 'phonenumber': phonenumber, 'altphonenumber': altphonenumber, 'photo': photo, } data = {k: v for (k, v) in data.items() if v is not None} data = json.dumps(data) headers['content-type'] = 'application/json' if 'headers' in kwargs: headers.update(kwargs.get('headers')) del kwargs['headers'] path_param_keys = ['account_id', 'iam_id'] path_param_values = self.encode_path_vars(account_id, iam_id) path_param_dict = dict(zip(path_param_keys, path_param_values)) url = '/v2/accounts/{account_id}/users/{iam_id}'.format(**path_param_dict) request = self.prepare_request( method='PATCH', url=url, headers=headers, params=params, data=data, ) response = self.send(request, **kwargs) return response def remove_user( self, account_id: str, iam_id: str, *, include_activity: str = None, **kwargs, ) -> DetailedResponse: """ Remove user from account. Remove users from an account by user's IAM ID. You must use a user token for authorization. Service IDs can't remove users from an account. To use this method, the requesting user must have the editor or administrator role on the User Management service. For more information, see the [Removing users](https://cloud.ibm.com/docs/account?topic=account-remove) documentation. :param str account_id: The account ID of the specified user. :param str iam_id: The user's IAM ID. :param str include_activity: (optional) Include activity information of the user, such as the last authentication timestamp. :param dict headers: A `dict` containing the request headers :return: A `DetailedResponse` containing the result, headers and HTTP status code. :rtype: DetailedResponse """ if not account_id: raise ValueError('account_id must be provided') if not iam_id: raise ValueError('iam_id must be provided') headers = {} sdk_headers = get_sdk_headers( service_name=self.DEFAULT_SERVICE_NAME, service_version='V1', operation_id='remove_user', ) headers.update(sdk_headers) params = { 'include_activity': include_activity, } if 'headers' in kwargs: headers.update(kwargs.get('headers')) del kwargs['headers'] path_param_keys = ['account_id', 'iam_id'] path_param_values = self.encode_path_vars(account_id, iam_id) path_param_dict = dict(zip(path_param_keys, path_param_values)) url = '/v2/accounts/{account_id}/users/{iam_id}'.format(**path_param_dict) request = self.prepare_request( method='DELETE', url=url, headers=headers, params=params, ) response = self.send(request, **kwargs) return response def accept( self, *, account_id: str = None, **kwargs, ) -> DetailedResponse: """ Accept an invitation. Accept a user invitation to an account. You can use the user's token for authorization. To use this method, the requesting user must provide the account ID for the account that they are accepting an invitation for. If the user already accepted the invitation request, it returns 204 with no response body. :param str account_id: (optional) The account ID. :param dict headers: A `dict` containing the request headers :return: A `DetailedResponse` containing the result, headers and HTTP status code. :rtype: DetailedResponse """ headers = {} sdk_headers = get_sdk_headers( service_name=self.DEFAULT_SERVICE_NAME, service_version='V1', operation_id='accept', ) headers.update(sdk_headers) data = { 'account_id': account_id, } data = {k: v for (k, v) in data.items() if v is not None} data = json.dumps(data) headers['content-type'] = 'application/json' if 'headers' in kwargs: headers.update(kwargs.get('headers')) del kwargs['headers'] url = '/v2/users/accept' request = self.prepare_request( method='POST', url=url, headers=headers, data=data, ) response = self.send(request, **kwargs) return response def v3_remove_user( self, account_id: str, iam_id: str, **kwargs, ) -> DetailedResponse: """ Remove user from account (Asynchronous). Remove users from an account by using the user's IAM ID. You must use a user token for authorization. Service IDs can't remove users from an account. If removing the user fails it will set the user's state to ERROR_WHILE_DELETING. To use this method, the requesting user must have the editor or administrator role on the User Management service. For more information, see the [Removing users](https://cloud.ibm.com/docs/account?topic=account-remove) documentation. :param str account_id: The account ID of the specified user. :param str iam_id: The user's IAM ID. :param dict headers: A `dict` containing the request headers :return: A `DetailedResponse` containing the result, headers and HTTP status code. :rtype: DetailedResponse """ if not account_id: raise ValueError('account_id must be provided') if not iam_id: raise ValueError('iam_id must be provided') headers = {} sdk_headers = get_sdk_headers( service_name=self.DEFAULT_SERVICE_NAME, service_version='V1', operation_id='v3_remove_user', ) headers.update(sdk_headers) if 'headers' in kwargs: headers.update(kwargs.get('headers')) del kwargs['headers'] path_param_keys = ['account_id', 'iam_id'] path_param_values = self.encode_path_vars(account_id, iam_id) path_param_dict = dict(zip(path_param_keys, path_param_values)) url = '/v3/accounts/{account_id}/users/{iam_id}'.format(**path_param_dict) request = self.prepare_request( method='DELETE', url=url, headers=headers, ) response = self.send(request, **kwargs) return response ######################### # User Settings ######################### def get_user_settings( self, account_id: str, iam_id: str, **kwargs, ) -> DetailedResponse: """ Get user settings. Retrieve a user's settings by the user's IAM ID. You can use the IAM service token or a user token for authorization. To use this method, the requesting user or service ID must have the viewer, editor, or administrator role on the User Management service. <br/><br/>The user settings have several fields. The `language` field is the language setting for the user interface display language. The `notification_language` field is the language setting for phone and email notifications. The `allowed_ip_addresses` field specifies a list of IP addresses that the user can log in and perform operations from as described in [Allowing specific IP addresses for a user](https://cloud.ibm.com/docs/account?topic=account-ips). For information about the `self_manage` field, review information about the [user-managed login setting](https://cloud.ibm.com/docs/account?topic=account-types). :param str account_id: The account ID of the specified user. :param str iam_id: The user's IAM ID. :param dict headers: A `dict` containing the request headers :return: A `DetailedResponse` containing the result, headers and HTTP status code. :rtype: DetailedResponse with `dict` result representing a `UserSettings` object """ if not account_id: raise ValueError('account_id must be provided') if not iam_id: raise ValueError('iam_id must be provided') headers = {} sdk_headers = get_sdk_headers( service_name=self.DEFAULT_SERVICE_NAME, service_version='V1', operation_id='get_user_settings', ) headers.update(sdk_headers) if 'headers' in kwargs: headers.update(kwargs.get('headers')) del kwargs['headers'] headers['Accept'] = 'application/json' path_param_keys = ['account_id', 'iam_id'] path_param_values = self.encode_path_vars(account_id, iam_id) path_param_dict = dict(zip(path_param_keys, path_param_values)) url = '/v2/accounts/{account_id}/users/{iam_id}/settings'.format(**path_param_dict) request = self.prepare_request( method='GET', url=url, headers=headers, ) response = self.send(request, **kwargs) return response def update_user_settings( self, account_id: str, iam_id: str, *, language: str = None, notification_language: str = None, allowed_ip_addresses: str = None, self_manage: bool = None, **kwargs, ) -> DetailedResponse: """ Partially update user settings. Update a user's settings by the user's IAM ID. You can use the IAM service token or a user token for authorization. To fully use this method, the user or service ID must have the editor or administrator role on the User Management service. Without these roles, a user can update only their own `language` or `notification_language` fields. If `self_manage` is `true`, the user can also update the `allowed_ip_addresses` field. :param str account_id: The account ID of the specified user. :param str iam_id: The user's IAM ID. :param str language: (optional) The console UI language. By default, this field is empty. :param str notification_language: (optional) The language for email and phone notifications. By default, this field is empty. :param str allowed_ip_addresses: (optional) A comma-separated list of IP addresses. :param bool self_manage: (optional) Whether user managed login is enabled. The default value is `false`. :param dict headers: A `dict` containing the request headers :return: A `DetailedResponse` containing the result, headers and HTTP status code. :rtype: DetailedResponse """ if not account_id: raise ValueError('account_id must be provided') if not iam_id: raise ValueError('iam_id must be provided') headers = {} sdk_headers = get_sdk_headers( service_name=self.DEFAULT_SERVICE_NAME, service_version='V1', operation_id='update_user_settings', ) headers.update(sdk_headers) data = { 'language': language, 'notification_language': notification_language, 'allowed_ip_addresses': allowed_ip_addresses, 'self_manage': self_manage, } data = {k: v for (k, v) in data.items() if v is not None} data = json.dumps(data) headers['content-type'] = 'application/json' if 'headers' in kwargs: headers.update(kwargs.get('headers')) del kwargs['headers'] path_param_keys = ['account_id', 'iam_id'] path_param_values = self.encode_path_vars(account_id, iam_id) path_param_dict = dict(zip(path_param_keys, path_param_values)) url = '/v2/accounts/{account_id}/users/{iam_id}/settings'.format(**path_param_dict) request = self.prepare_request( method='PATCH', url=url, headers=headers, data=data, ) response = self.send(request, **kwargs) return response
(authenticator: ibm_cloud_sdk_core.authenticators.authenticator.Authenticator = None) -> None
720,043
ibm_platform_services.user_management_v1
__init__
Construct a new client for the User Management service. :param Authenticator authenticator: The authenticator specifies the authentication mechanism. Get up to date information from https://github.com/IBM/python-sdk-core/blob/main/README.md about initializing the authenticator of your choice.
def __init__( self, authenticator: Authenticator = None, ) -> None: """ Construct a new client for the User Management service. :param Authenticator authenticator: The authenticator specifies the authentication mechanism. Get up to date information from https://github.com/IBM/python-sdk-core/blob/main/README.md about initializing the authenticator of your choice. """ BaseService.__init__(self, service_url=self.DEFAULT_SERVICE_URL, authenticator=authenticator)
(self, authenticator: Optional[ibm_cloud_sdk_core.authenticators.authenticator.Authenticator] = None) -> NoneType
720,048
ibm_platform_services.user_management_v1
accept
Accept an invitation. Accept a user invitation to an account. You can use the user's token for authorization. To use this method, the requesting user must provide the account ID for the account that they are accepting an invitation for. If the user already accepted the invitation request, it returns 204 with no response body. :param str account_id: (optional) The account ID. :param dict headers: A `dict` containing the request headers :return: A `DetailedResponse` containing the result, headers and HTTP status code. :rtype: DetailedResponse
def accept( self, *, account_id: str = None, **kwargs, ) -> DetailedResponse: """ Accept an invitation. Accept a user invitation to an account. You can use the user's token for authorization. To use this method, the requesting user must provide the account ID for the account that they are accepting an invitation for. If the user already accepted the invitation request, it returns 204 with no response body. :param str account_id: (optional) The account ID. :param dict headers: A `dict` containing the request headers :return: A `DetailedResponse` containing the result, headers and HTTP status code. :rtype: DetailedResponse """ headers = {} sdk_headers = get_sdk_headers( service_name=self.DEFAULT_SERVICE_NAME, service_version='V1', operation_id='accept', ) headers.update(sdk_headers) data = { 'account_id': account_id, } data = {k: v for (k, v) in data.items() if v is not None} data = json.dumps(data) headers['content-type'] = 'application/json' if 'headers' in kwargs: headers.update(kwargs.get('headers')) del kwargs['headers'] url = '/v2/users/accept' request = self.prepare_request( method='POST', url=url, headers=headers, data=data, ) response = self.send(request, **kwargs) return response
(self, *, account_id: Optional[str] = None, **kwargs) -> ibm_cloud_sdk_core.detailed_response.DetailedResponse
720,056
ibm_platform_services.user_management_v1
get_user_profile
Get user profile. Retrieve a user's profile by the user's IAM ID in your account. You can use the IAM service token or a user token for authorization. To use this method, the requesting user or service ID must have at least the viewer, editor, or administrator role on the User Management service. :param str account_id: The account ID of the specified user. :param str iam_id: The user's IAM ID. :param str include_activity: (optional) Include activity information of the user, such as the last authentication timestamp. :param dict headers: A `dict` containing the request headers :return: A `DetailedResponse` containing the result, headers and HTTP status code. :rtype: DetailedResponse with `dict` result representing a `UserProfile` object
def get_user_profile( self, account_id: str, iam_id: str, *, include_activity: str = None, **kwargs, ) -> DetailedResponse: """ Get user profile. Retrieve a user's profile by the user's IAM ID in your account. You can use the IAM service token or a user token for authorization. To use this method, the requesting user or service ID must have at least the viewer, editor, or administrator role on the User Management service. :param str account_id: The account ID of the specified user. :param str iam_id: The user's IAM ID. :param str include_activity: (optional) Include activity information of the user, such as the last authentication timestamp. :param dict headers: A `dict` containing the request headers :return: A `DetailedResponse` containing the result, headers and HTTP status code. :rtype: DetailedResponse with `dict` result representing a `UserProfile` object """ if not account_id: raise ValueError('account_id must be provided') if not iam_id: raise ValueError('iam_id must be provided') headers = {} sdk_headers = get_sdk_headers( service_name=self.DEFAULT_SERVICE_NAME, service_version='V1', operation_id='get_user_profile', ) headers.update(sdk_headers) params = { 'include_activity': include_activity, } if 'headers' in kwargs: headers.update(kwargs.get('headers')) del kwargs['headers'] headers['Accept'] = 'application/json' path_param_keys = ['account_id', 'iam_id'] path_param_values = self.encode_path_vars(account_id, iam_id) path_param_dict = dict(zip(path_param_keys, path_param_values)) url = '/v2/accounts/{account_id}/users/{iam_id}'.format(**path_param_dict) request = self.prepare_request( method='GET', url=url, headers=headers, params=params, ) response = self.send(request, **kwargs) return response
(self, account_id: str, iam_id: str, *, include_activity: Optional[str] = None, **kwargs) -> ibm_cloud_sdk_core.detailed_response.DetailedResponse
720,057
ibm_platform_services.user_management_v1
get_user_settings
Get user settings. Retrieve a user's settings by the user's IAM ID. You can use the IAM service token or a user token for authorization. To use this method, the requesting user or service ID must have the viewer, editor, or administrator role on the User Management service. <br/><br/>The user settings have several fields. The `language` field is the language setting for the user interface display language. The `notification_language` field is the language setting for phone and email notifications. The `allowed_ip_addresses` field specifies a list of IP addresses that the user can log in and perform operations from as described in [Allowing specific IP addresses for a user](https://cloud.ibm.com/docs/account?topic=account-ips). For information about the `self_manage` field, review information about the [user-managed login setting](https://cloud.ibm.com/docs/account?topic=account-types). :param str account_id: The account ID of the specified user. :param str iam_id: The user's IAM ID. :param dict headers: A `dict` containing the request headers :return: A `DetailedResponse` containing the result, headers and HTTP status code. :rtype: DetailedResponse with `dict` result representing a `UserSettings` object
def get_user_settings( self, account_id: str, iam_id: str, **kwargs, ) -> DetailedResponse: """ Get user settings. Retrieve a user's settings by the user's IAM ID. You can use the IAM service token or a user token for authorization. To use this method, the requesting user or service ID must have the viewer, editor, or administrator role on the User Management service. <br/><br/>The user settings have several fields. The `language` field is the language setting for the user interface display language. The `notification_language` field is the language setting for phone and email notifications. The `allowed_ip_addresses` field specifies a list of IP addresses that the user can log in and perform operations from as described in [Allowing specific IP addresses for a user](https://cloud.ibm.com/docs/account?topic=account-ips). For information about the `self_manage` field, review information about the [user-managed login setting](https://cloud.ibm.com/docs/account?topic=account-types). :param str account_id: The account ID of the specified user. :param str iam_id: The user's IAM ID. :param dict headers: A `dict` containing the request headers :return: A `DetailedResponse` containing the result, headers and HTTP status code. :rtype: DetailedResponse with `dict` result representing a `UserSettings` object """ if not account_id: raise ValueError('account_id must be provided') if not iam_id: raise ValueError('iam_id must be provided') headers = {} sdk_headers = get_sdk_headers( service_name=self.DEFAULT_SERVICE_NAME, service_version='V1', operation_id='get_user_settings', ) headers.update(sdk_headers) if 'headers' in kwargs: headers.update(kwargs.get('headers')) del kwargs['headers'] headers['Accept'] = 'application/json' path_param_keys = ['account_id', 'iam_id'] path_param_values = self.encode_path_vars(account_id, iam_id) path_param_dict = dict(zip(path_param_keys, path_param_values)) url = '/v2/accounts/{account_id}/users/{iam_id}/settings'.format(**path_param_dict) request = self.prepare_request( method='GET', url=url, headers=headers, ) response = self.send(request, **kwargs) return response
(self, account_id: str, iam_id: str, **kwargs) -> ibm_cloud_sdk_core.detailed_response.DetailedResponse
720,058
ibm_platform_services.user_management_v1
invite_users
Invite users to an account. Invite users to the account. You must use a user token for authorization. Service IDs can't invite users to the account. To use this method, the requesting user must have the editor or administrator role on the User Management service. For more information, see the [Inviting users](https://cloud.ibm.com/docs/account?topic=account-iamuserinv) documentation. You can specify the user account role and the corresponding IAM policy information in the request body. <br/><br/>When you invite a user to an account, the user is initially created in the `PROCESSING` state. After the user is successfully created, all specified permissions are configured, and the activation email is sent, the invited user is transitioned to the `PENDING` state. When the invited user clicks the activation email and creates and confirms their IBM Cloud account, the user is transitioned to `ACTIVE` state. If the user email is already verified, no email is generated. :param str account_id: The account ID of the specified user. :param List[InviteUser] users: (optional) A list of users to be invited. :param List[InviteUserIamPolicy] iam_policy: (optional) A list of IAM policies. :param List[str] access_groups: (optional) A list of access groups. :param dict headers: A `dict` containing the request headers :return: A `DetailedResponse` containing the result, headers and HTTP status code. :rtype: DetailedResponse with `dict` result representing a `InvitedUserList` object
def invite_users( self, account_id: str, *, users: List['InviteUser'] = None, iam_policy: List['InviteUserIamPolicy'] = None, access_groups: List[str] = None, **kwargs, ) -> DetailedResponse: """ Invite users to an account. Invite users to the account. You must use a user token for authorization. Service IDs can't invite users to the account. To use this method, the requesting user must have the editor or administrator role on the User Management service. For more information, see the [Inviting users](https://cloud.ibm.com/docs/account?topic=account-iamuserinv) documentation. You can specify the user account role and the corresponding IAM policy information in the request body. <br/><br/>When you invite a user to an account, the user is initially created in the `PROCESSING` state. After the user is successfully created, all specified permissions are configured, and the activation email is sent, the invited user is transitioned to the `PENDING` state. When the invited user clicks the activation email and creates and confirms their IBM Cloud account, the user is transitioned to `ACTIVE` state. If the user email is already verified, no email is generated. :param str account_id: The account ID of the specified user. :param List[InviteUser] users: (optional) A list of users to be invited. :param List[InviteUserIamPolicy] iam_policy: (optional) A list of IAM policies. :param List[str] access_groups: (optional) A list of access groups. :param dict headers: A `dict` containing the request headers :return: A `DetailedResponse` containing the result, headers and HTTP status code. :rtype: DetailedResponse with `dict` result representing a `InvitedUserList` object """ if not account_id: raise ValueError('account_id must be provided') if users is not None: users = [convert_model(x) for x in users] if iam_policy is not None: iam_policy = [convert_model(x) for x in iam_policy] headers = {} sdk_headers = get_sdk_headers( service_name=self.DEFAULT_SERVICE_NAME, service_version='V1', operation_id='invite_users', ) headers.update(sdk_headers) data = { 'users': users, 'iam_policy': iam_policy, 'access_groups': access_groups, } data = {k: v for (k, v) in data.items() if v is not None} data = json.dumps(data) headers['content-type'] = 'application/json' if 'headers' in kwargs: headers.update(kwargs.get('headers')) del kwargs['headers'] headers['Accept'] = 'application/json' path_param_keys = ['account_id'] path_param_values = self.encode_path_vars(account_id) path_param_dict = dict(zip(path_param_keys, path_param_values)) url = '/v2/accounts/{account_id}/users'.format(**path_param_dict) request = self.prepare_request( method='POST', url=url, headers=headers, data=data, ) response = self.send(request, **kwargs) return response
(self, account_id: str, *, users: Optional[List[ibm_platform_services.user_management_v1.InviteUser]] = None, iam_policy: Optional[List[ibm_platform_services.user_management_v1.InviteUserIamPolicy]] = None, access_groups: Optional[List[str]] = None, **kwargs) -> ibm_cloud_sdk_core.detailed_response.DetailedResponse
720,059
ibm_platform_services.user_management_v1
list_users
List users. Retrieve users in the account. You can use the IAM service token or a user token for authorization. To use this method, the requesting user or service ID must have at least the viewer, editor, or administrator role on the User Management service. If unrestricted view is enabled, the user can see all users in the same account without an IAM role. If restricted view is enabled and user has the viewer, editor, or administrator role on the user management service, the API returns all users in the account. If unrestricted view is enabled and the user does not have these roles, the API returns only the current user. Users are returned in a paginated list with a default limit of 100 users. You can iterate through all users by following the `next_url` field. Additional substring search fields are supported to filter the users. :param str account_id: The account ID of the specified user. :param int limit: (optional) The number of results to be returned. :param bool include_settings: (optional) The user settings to be returned. Set to true to view language, allowed IP address, and authentication settings. :param str search: (optional) The desired search results to be returned. To view the list of users with the additional search filter, use the following query options: `firstname`, `lastname`, `email`, `state`, `substate`, `iam_id`, `realm`, and `userId`. HTML URL encoding for the search query and `:` must be used. For example, search=state%3AINVALID returns a list of invalid users. Multiple search queries can be combined to obtain `OR` results using `,` operator (not URL encoded). For example, search=state%3AINVALID,email%3Amail.test.ibm.com. :param str start: (optional) An optional token that indicates the beginning of the page of results to be returned. If omitted, the first page of results is returned. This value is obtained from the 'next_url' field of the operation response. :param str user_id: (optional) Filter users based on their user ID. :param dict headers: A `dict` containing the request headers :return: A `DetailedResponse` containing the result, headers and HTTP status code. :rtype: DetailedResponse with `dict` result representing a `UserList` object
def list_users( self, account_id: str, *, limit: int = None, include_settings: bool = None, search: str = None, start: str = None, user_id: str = None, **kwargs, ) -> DetailedResponse: """ List users. Retrieve users in the account. You can use the IAM service token or a user token for authorization. To use this method, the requesting user or service ID must have at least the viewer, editor, or administrator role on the User Management service. If unrestricted view is enabled, the user can see all users in the same account without an IAM role. If restricted view is enabled and user has the viewer, editor, or administrator role on the user management service, the API returns all users in the account. If unrestricted view is enabled and the user does not have these roles, the API returns only the current user. Users are returned in a paginated list with a default limit of 100 users. You can iterate through all users by following the `next_url` field. Additional substring search fields are supported to filter the users. :param str account_id: The account ID of the specified user. :param int limit: (optional) The number of results to be returned. :param bool include_settings: (optional) The user settings to be returned. Set to true to view language, allowed IP address, and authentication settings. :param str search: (optional) The desired search results to be returned. To view the list of users with the additional search filter, use the following query options: `firstname`, `lastname`, `email`, `state`, `substate`, `iam_id`, `realm`, and `userId`. HTML URL encoding for the search query and `:` must be used. For example, search=state%3AINVALID returns a list of invalid users. Multiple search queries can be combined to obtain `OR` results using `,` operator (not URL encoded). For example, search=state%3AINVALID,email%3Amail.test.ibm.com. :param str start: (optional) An optional token that indicates the beginning of the page of results to be returned. If omitted, the first page of results is returned. This value is obtained from the 'next_url' field of the operation response. :param str user_id: (optional) Filter users based on their user ID. :param dict headers: A `dict` containing the request headers :return: A `DetailedResponse` containing the result, headers and HTTP status code. :rtype: DetailedResponse with `dict` result representing a `UserList` object """ if not account_id: raise ValueError('account_id must be provided') headers = {} sdk_headers = get_sdk_headers( service_name=self.DEFAULT_SERVICE_NAME, service_version='V1', operation_id='list_users', ) headers.update(sdk_headers) params = { 'limit': limit, 'include_settings': include_settings, 'search': search, '_start': start, 'user_id': user_id, } if 'headers' in kwargs: headers.update(kwargs.get('headers')) del kwargs['headers'] headers['Accept'] = 'application/json' path_param_keys = ['account_id'] path_param_values = self.encode_path_vars(account_id) path_param_dict = dict(zip(path_param_keys, path_param_values)) url = '/v2/accounts/{account_id}/users'.format(**path_param_dict) request = self.prepare_request( method='GET', url=url, headers=headers, params=params, ) response = self.send(request, **kwargs) return response
(self, account_id: str, *, limit: Optional[int] = None, include_settings: Optional[bool] = None, search: Optional[str] = None, start: Optional[str] = None, user_id: Optional[str] = None, **kwargs) -> ibm_cloud_sdk_core.detailed_response.DetailedResponse
720,061
ibm_platform_services.user_management_v1
remove_user
Remove user from account. Remove users from an account by user's IAM ID. You must use a user token for authorization. Service IDs can't remove users from an account. To use this method, the requesting user must have the editor or administrator role on the User Management service. For more information, see the [Removing users](https://cloud.ibm.com/docs/account?topic=account-remove) documentation. :param str account_id: The account ID of the specified user. :param str iam_id: The user's IAM ID. :param str include_activity: (optional) Include activity information of the user, such as the last authentication timestamp. :param dict headers: A `dict` containing the request headers :return: A `DetailedResponse` containing the result, headers and HTTP status code. :rtype: DetailedResponse
def remove_user( self, account_id: str, iam_id: str, *, include_activity: str = None, **kwargs, ) -> DetailedResponse: """ Remove user from account. Remove users from an account by user's IAM ID. You must use a user token for authorization. Service IDs can't remove users from an account. To use this method, the requesting user must have the editor or administrator role on the User Management service. For more information, see the [Removing users](https://cloud.ibm.com/docs/account?topic=account-remove) documentation. :param str account_id: The account ID of the specified user. :param str iam_id: The user's IAM ID. :param str include_activity: (optional) Include activity information of the user, such as the last authentication timestamp. :param dict headers: A `dict` containing the request headers :return: A `DetailedResponse` containing the result, headers and HTTP status code. :rtype: DetailedResponse """ if not account_id: raise ValueError('account_id must be provided') if not iam_id: raise ValueError('iam_id must be provided') headers = {} sdk_headers = get_sdk_headers( service_name=self.DEFAULT_SERVICE_NAME, service_version='V1', operation_id='remove_user', ) headers.update(sdk_headers) params = { 'include_activity': include_activity, } if 'headers' in kwargs: headers.update(kwargs.get('headers')) del kwargs['headers'] path_param_keys = ['account_id', 'iam_id'] path_param_values = self.encode_path_vars(account_id, iam_id) path_param_dict = dict(zip(path_param_keys, path_param_values)) url = '/v2/accounts/{account_id}/users/{iam_id}'.format(**path_param_dict) request = self.prepare_request( method='DELETE', url=url, headers=headers, params=params, ) response = self.send(request, **kwargs) return response
(self, account_id: str, iam_id: str, *, include_activity: Optional[str] = None, **kwargs) -> ibm_cloud_sdk_core.detailed_response.DetailedResponse
720,069
ibm_platform_services.user_management_v1
update_user_profile
Partially update user profile. Partially update a user's profile by user's IAM ID. You can use the IAM service token or a user token for authorization. To use this method, the requesting user or service ID must have at least the editor or administrator role on the User Management service. A user or service ID with these roles can change a user's state between `ACTIVE`, `VPN_ONLY`, or `DISABLED_CLASSIC_INFRASTRUCTURE`, but they can't change the state to `PROCESSING` or `PENDING` because these are system states. For other request body fields, a user can update their own profile without having User Management service permissions. :param str account_id: The account ID of the specified user. :param str iam_id: The user's IAM ID. :param str firstname: (optional) The first name of the user. :param str lastname: (optional) The last name of the user. :param str state: (optional) The state of the user. Possible values are `PROCESSING`, `PENDING`, `ACTIVE`, `DISABLED_CLASSIC_INFRASTRUCTURE`, and `VPN_ONLY`. :param str email: (optional) The email address of the user. :param str phonenumber: (optional) The phone number of the user. :param str altphonenumber: (optional) The alternative phone number of the user. :param str photo: (optional) A link to a photo of the user. :param str include_activity: (optional) Include activity information of the user, such as the last authentication timestamp. :param dict headers: A `dict` containing the request headers :return: A `DetailedResponse` containing the result, headers and HTTP status code. :rtype: DetailedResponse
def update_user_profile( self, account_id: str, iam_id: str, *, firstname: str = None, lastname: str = None, state: str = None, email: str = None, phonenumber: str = None, altphonenumber: str = None, photo: str = None, include_activity: str = None, **kwargs, ) -> DetailedResponse: """ Partially update user profile. Partially update a user's profile by user's IAM ID. You can use the IAM service token or a user token for authorization. To use this method, the requesting user or service ID must have at least the editor or administrator role on the User Management service. A user or service ID with these roles can change a user's state between `ACTIVE`, `VPN_ONLY`, or `DISABLED_CLASSIC_INFRASTRUCTURE`, but they can't change the state to `PROCESSING` or `PENDING` because these are system states. For other request body fields, a user can update their own profile without having User Management service permissions. :param str account_id: The account ID of the specified user. :param str iam_id: The user's IAM ID. :param str firstname: (optional) The first name of the user. :param str lastname: (optional) The last name of the user. :param str state: (optional) The state of the user. Possible values are `PROCESSING`, `PENDING`, `ACTIVE`, `DISABLED_CLASSIC_INFRASTRUCTURE`, and `VPN_ONLY`. :param str email: (optional) The email address of the user. :param str phonenumber: (optional) The phone number of the user. :param str altphonenumber: (optional) The alternative phone number of the user. :param str photo: (optional) A link to a photo of the user. :param str include_activity: (optional) Include activity information of the user, such as the last authentication timestamp. :param dict headers: A `dict` containing the request headers :return: A `DetailedResponse` containing the result, headers and HTTP status code. :rtype: DetailedResponse """ if not account_id: raise ValueError('account_id must be provided') if not iam_id: raise ValueError('iam_id must be provided') headers = {} sdk_headers = get_sdk_headers( service_name=self.DEFAULT_SERVICE_NAME, service_version='V1', operation_id='update_user_profile', ) headers.update(sdk_headers) params = { 'include_activity': include_activity, } data = { 'firstname': firstname, 'lastname': lastname, 'state': state, 'email': email, 'phonenumber': phonenumber, 'altphonenumber': altphonenumber, 'photo': photo, } data = {k: v for (k, v) in data.items() if v is not None} data = json.dumps(data) headers['content-type'] = 'application/json' if 'headers' in kwargs: headers.update(kwargs.get('headers')) del kwargs['headers'] path_param_keys = ['account_id', 'iam_id'] path_param_values = self.encode_path_vars(account_id, iam_id) path_param_dict = dict(zip(path_param_keys, path_param_values)) url = '/v2/accounts/{account_id}/users/{iam_id}'.format(**path_param_dict) request = self.prepare_request( method='PATCH', url=url, headers=headers, params=params, data=data, ) response = self.send(request, **kwargs) return response
(self, account_id: str, iam_id: str, *, firstname: Optional[str] = None, lastname: Optional[str] = None, state: Optional[str] = None, email: Optional[str] = None, phonenumber: Optional[str] = None, altphonenumber: Optional[str] = None, photo: Optional[str] = None, include_activity: Optional[str] = None, **kwargs) -> ibm_cloud_sdk_core.detailed_response.DetailedResponse
720,070
ibm_platform_services.user_management_v1
update_user_settings
Partially update user settings. Update a user's settings by the user's IAM ID. You can use the IAM service token or a user token for authorization. To fully use this method, the user or service ID must have the editor or administrator role on the User Management service. Without these roles, a user can update only their own `language` or `notification_language` fields. If `self_manage` is `true`, the user can also update the `allowed_ip_addresses` field. :param str account_id: The account ID of the specified user. :param str iam_id: The user's IAM ID. :param str language: (optional) The console UI language. By default, this field is empty. :param str notification_language: (optional) The language for email and phone notifications. By default, this field is empty. :param str allowed_ip_addresses: (optional) A comma-separated list of IP addresses. :param bool self_manage: (optional) Whether user managed login is enabled. The default value is `false`. :param dict headers: A `dict` containing the request headers :return: A `DetailedResponse` containing the result, headers and HTTP status code. :rtype: DetailedResponse
def update_user_settings( self, account_id: str, iam_id: str, *, language: str = None, notification_language: str = None, allowed_ip_addresses: str = None, self_manage: bool = None, **kwargs, ) -> DetailedResponse: """ Partially update user settings. Update a user's settings by the user's IAM ID. You can use the IAM service token or a user token for authorization. To fully use this method, the user or service ID must have the editor or administrator role on the User Management service. Without these roles, a user can update only their own `language` or `notification_language` fields. If `self_manage` is `true`, the user can also update the `allowed_ip_addresses` field. :param str account_id: The account ID of the specified user. :param str iam_id: The user's IAM ID. :param str language: (optional) The console UI language. By default, this field is empty. :param str notification_language: (optional) The language for email and phone notifications. By default, this field is empty. :param str allowed_ip_addresses: (optional) A comma-separated list of IP addresses. :param bool self_manage: (optional) Whether user managed login is enabled. The default value is `false`. :param dict headers: A `dict` containing the request headers :return: A `DetailedResponse` containing the result, headers and HTTP status code. :rtype: DetailedResponse """ if not account_id: raise ValueError('account_id must be provided') if not iam_id: raise ValueError('iam_id must be provided') headers = {} sdk_headers = get_sdk_headers( service_name=self.DEFAULT_SERVICE_NAME, service_version='V1', operation_id='update_user_settings', ) headers.update(sdk_headers) data = { 'language': language, 'notification_language': notification_language, 'allowed_ip_addresses': allowed_ip_addresses, 'self_manage': self_manage, } data = {k: v for (k, v) in data.items() if v is not None} data = json.dumps(data) headers['content-type'] = 'application/json' if 'headers' in kwargs: headers.update(kwargs.get('headers')) del kwargs['headers'] path_param_keys = ['account_id', 'iam_id'] path_param_values = self.encode_path_vars(account_id, iam_id) path_param_dict = dict(zip(path_param_keys, path_param_values)) url = '/v2/accounts/{account_id}/users/{iam_id}/settings'.format(**path_param_dict) request = self.prepare_request( method='PATCH', url=url, headers=headers, data=data, ) response = self.send(request, **kwargs) return response
(self, account_id: str, iam_id: str, *, language: Optional[str] = None, notification_language: Optional[str] = None, allowed_ip_addresses: Optional[str] = None, self_manage: Optional[bool] = None, **kwargs) -> ibm_cloud_sdk_core.detailed_response.DetailedResponse
720,071
ibm_platform_services.user_management_v1
v3_remove_user
Remove user from account (Asynchronous). Remove users from an account by using the user's IAM ID. You must use a user token for authorization. Service IDs can't remove users from an account. If removing the user fails it will set the user's state to ERROR_WHILE_DELETING. To use this method, the requesting user must have the editor or administrator role on the User Management service. For more information, see the [Removing users](https://cloud.ibm.com/docs/account?topic=account-remove) documentation. :param str account_id: The account ID of the specified user. :param str iam_id: The user's IAM ID. :param dict headers: A `dict` containing the request headers :return: A `DetailedResponse` containing the result, headers and HTTP status code. :rtype: DetailedResponse
def v3_remove_user( self, account_id: str, iam_id: str, **kwargs, ) -> DetailedResponse: """ Remove user from account (Asynchronous). Remove users from an account by using the user's IAM ID. You must use a user token for authorization. Service IDs can't remove users from an account. If removing the user fails it will set the user's state to ERROR_WHILE_DELETING. To use this method, the requesting user must have the editor or administrator role on the User Management service. For more information, see the [Removing users](https://cloud.ibm.com/docs/account?topic=account-remove) documentation. :param str account_id: The account ID of the specified user. :param str iam_id: The user's IAM ID. :param dict headers: A `dict` containing the request headers :return: A `DetailedResponse` containing the result, headers and HTTP status code. :rtype: DetailedResponse """ if not account_id: raise ValueError('account_id must be provided') if not iam_id: raise ValueError('iam_id must be provided') headers = {} sdk_headers = get_sdk_headers( service_name=self.DEFAULT_SERVICE_NAME, service_version='V1', operation_id='v3_remove_user', ) headers.update(sdk_headers) if 'headers' in kwargs: headers.update(kwargs.get('headers')) del kwargs['headers'] path_param_keys = ['account_id', 'iam_id'] path_param_values = self.encode_path_vars(account_id, iam_id) path_param_dict = dict(zip(path_param_keys, path_param_values)) url = '/v3/accounts/{account_id}/users/{iam_id}'.format(**path_param_dict) request = self.prepare_request( method='DELETE', url=url, headers=headers, ) response = self.send(request, **kwargs) return response
(self, account_id: str, iam_id: str, **kwargs) -> ibm_cloud_sdk_core.detailed_response.DetailedResponse
720,078
ibm_platform_services.common
get_sdk_headers
Get the request headers to be sent in requests by the SDK
def get_sdk_headers(service_name, service_version, operation_id): # pylint: disable=unused-argument """ Get the request headers to be sent in requests by the SDK """ headers = {} headers[HEADER_NAME_USER_AGENT] = get_user_agent() return headers
(service_name, service_version, operation_id)
720,095
textwrap3
TextWrapper
Object for wrapping/filling text. The public interface consists of the wrap() and fill() methods; the other methods are just there for subclasses to override in order to tweak the default behaviour. If you want to completely replace the main wrapping algorithm, you'll probably have to override _wrap_chunks(). Several instance attributes control various aspects of wrapping: width (default: 70) the maximum width of wrapped lines (unless break_long_words is false) initial_indent (default: "") string that will be prepended to the first line of wrapped output. Counts towards the line's width. subsequent_indent (default: "") string that will be prepended to all lines save the first of wrapped output; also counts towards each line's width. expand_tabs (default: true) Expand tabs in input text to spaces before further processing. Each tab will become 0 .. 'tabsize' spaces, depending on its position in its line. If false, each tab is treated as a single character. tabsize (default: 8) Expand tabs in input text to 0 .. 'tabsize' spaces, unless 'expand_tabs' is false. replace_whitespace (default: true) Replace all whitespace characters in the input text by spaces after tab expansion. Note that if expand_tabs is false and replace_whitespace is true, every tab will be converted to a single space! fix_sentence_endings (default: false) Ensure that sentence-ending punctuation is always followed by two spaces. Off by default because the algorithm is (unavoidably) imperfect. break_long_words (default: true) Break words longer than 'width'. If false, those words will not be broken, and some lines might be longer than 'width'. break_on_hyphens (default: true) Allow breaking hyphenated words. If true, wrapping will occur preferably on whitespaces and right after hyphens part of compound words. drop_whitespace (default: true) Drop leading and trailing whitespace from lines. max_lines (default: None) Truncate wrapped lines. placeholder (default: ' [...]') Append to the last line of truncated text.
class TextWrapper: """ Object for wrapping/filling text. The public interface consists of the wrap() and fill() methods; the other methods are just there for subclasses to override in order to tweak the default behaviour. If you want to completely replace the main wrapping algorithm, you'll probably have to override _wrap_chunks(). Several instance attributes control various aspects of wrapping: width (default: 70) the maximum width of wrapped lines (unless break_long_words is false) initial_indent (default: "") string that will be prepended to the first line of wrapped output. Counts towards the line's width. subsequent_indent (default: "") string that will be prepended to all lines save the first of wrapped output; also counts towards each line's width. expand_tabs (default: true) Expand tabs in input text to spaces before further processing. Each tab will become 0 .. 'tabsize' spaces, depending on its position in its line. If false, each tab is treated as a single character. tabsize (default: 8) Expand tabs in input text to 0 .. 'tabsize' spaces, unless 'expand_tabs' is false. replace_whitespace (default: true) Replace all whitespace characters in the input text by spaces after tab expansion. Note that if expand_tabs is false and replace_whitespace is true, every tab will be converted to a single space! fix_sentence_endings (default: false) Ensure that sentence-ending punctuation is always followed by two spaces. Off by default because the algorithm is (unavoidably) imperfect. break_long_words (default: true) Break words longer than 'width'. If false, those words will not be broken, and some lines might be longer than 'width'. break_on_hyphens (default: true) Allow breaking hyphenated words. If true, wrapping will occur preferably on whitespaces and right after hyphens part of compound words. drop_whitespace (default: true) Drop leading and trailing whitespace from lines. max_lines (default: None) Truncate wrapped lines. placeholder (default: ' [...]') Append to the last line of truncated text. """ unicode_whitespace_trans = {} uspace = ord(' ') for x in _whitespace: unicode_whitespace_trans[ord(x)] = uspace # This funky little regex is just the trick for splitting # text up into word-wrappable chunks. E.g. # "Hello there -- you goof-ball, use the -b option!" # splits into # Hello/ /there/ /--/ /you/ /goof-/ball,/ /use/ /the/ /-b/ /option! # (after stripping out empty strings). word_punct = r'[\w!"\'&.,?]' letter = r'[^\d\W]' whitespace = r'[%s]' % re.escape(_whitespace) nowhitespace = '[^' + whitespace[1:] emdash = u'\u2014' wordsep_re = re.compile(r''' ( # any whitespace %(ws)s+ | # em-dash between words (?<=%(wp)s) -{2,} (?=\w) | # Unicode em-dash between words (?<=%(wp)s) %(emdash)s (?=\w) | # word, possibly hyphenated %(nws)s+? (?: # hyphenated word -(?: (?<=%(lt)s{2}-) | (?<=%(lt)s-%(lt)s-)) (?= %(lt)s -? %(lt)s) | # end of word (?=%(ws)s|\Z) | # em-dash (?<=%(wp)s) (?=-{2,}\w) | # Unicode em-dash (?<=%(wp)s) (?=%(emdash)s\w) ) )''' % {'wp': word_punct, 'lt': letter, 'ws': whitespace, 'nws': nowhitespace, 'emdash': emdash}, re.VERBOSE | re.UNICODE) del word_punct, letter, nowhitespace, emdash # NB re.UNICODE flag added for consistent behavior across Python 2 and 3 # Not really needed in Python 3, but without it, Python 2 foolishly does # not consider letters with diacritical marks (e.g. the very common # '\N{LATIN SMALL LETTER E WITH ACUTE}') to be "word" characters (`\w`) # NB Unicode em dash cannot be easily combined with -- case, because the # -{2,} admits the (slightly degenerate) --- etc. cases, which are # unique to simulate em dashes # If \N{EM DASH} finds favor, then direct handling of \N{HYPHEN} # and \N{NON-BREAKING HYPHEN} make sense as follow-ons # This less funky little regex just split on recognized spaces. E.g. # "Hello there -- you goof-ball, use the -b option!" # splits into # Hello/ /there/ /--/ /you/ /goof-ball,/ /use/ /the/ /-b/ /option!/ wordsep_simple_re = re.compile(r'(%s+)' % whitespace) del whitespace # XXX this is not locale- or charset-aware -- string.lowercase # is US-ASCII only (and therefore English-only) sentence_end_re = re.compile(r'[a-z]' # lowercase letter r'[\.\!\?]' # sentence-ending punct. r'[\"\']?' # optional end-of-quote r'\Z') # end of chunk def __init__(self, width=70, initial_indent="", subsequent_indent="", expand_tabs=True, replace_whitespace=True, fix_sentence_endings=False, break_long_words=True, drop_whitespace=True, break_on_hyphens=True, tabsize=8, max_lines=None, placeholder=' [...]'): self.width = width self.initial_indent = initial_indent self.subsequent_indent = subsequent_indent self.expand_tabs = expand_tabs self.replace_whitespace = replace_whitespace self.fix_sentence_endings = fix_sentence_endings self.break_long_words = break_long_words self.drop_whitespace = drop_whitespace self.break_on_hyphens = break_on_hyphens self.tabsize = tabsize self.max_lines = max_lines self.placeholder = placeholder # -- Private methods ----------------------------------------------- # (possibly useful for subclasses to override) def _munge_whitespace(self, text): """_munge_whitespace(text : string) -> string Munge whitespace in text: expand tabs and convert all other whitespace characters to spaces. Eg. " foo\\tbar\\n\\nbaz" becomes " foo bar baz". """ if self.expand_tabs: text = text.expandtabs(self.tabsize) if self.replace_whitespace: # text = text.translate(self.unicode_whitespace_trans) # shim for Python 2 str text = _translate(text, self.unicode_whitespace_trans) return text def _split(self, text): """_split(text : string) -> [string] Split the text to wrap into indivisible chunks. Chunks are not quite the same as words; see _wrap_chunks() for full details. As an example, the text Look, goof-ball -- use the -b option! breaks into the following chunks: 'Look,', ' ', 'goof-', 'ball', ' ', '--', ' ', 'use', ' ', 'the', ' ', '-b', ' ', 'option!' if break_on_hyphens is True, or in: 'Look,', ' ', 'goof-ball', ' ', '--', ' ', 'use', ' ', 'the', ' ', '-b', ' ', option!' otherwise. """ if self.break_on_hyphens is True: chunks = self.wordsep_re.split(text) else: chunks = self.wordsep_simple_re.split(text) chunks = [c for c in chunks if c] return chunks def _fix_sentence_endings(self, chunks): """_fix_sentence_endings(chunks : [string]) Correct for sentence endings buried in 'chunks'. Eg. when the original text contains "... foo.\\nBar ...", munge_whitespace() and split() will convert that to [..., "foo.", " ", "Bar", ...] which has one too few spaces; this method simply changes the one space to two. """ i = 0 patsearch = self.sentence_end_re.search while i < len(chunks)-1: if chunks[i+1] == " " and patsearch(chunks[i]): chunks[i+1] = " " i += 2 else: i += 1 def _handle_long_word(self, reversed_chunks, cur_line, cur_len, width): """_handle_long_word(chunks : [string], cur_line : [string], cur_len : int, width : int) Handle a chunk of text (most likely a word, not whitespace) that is too long to fit in any line. """ # Figure out when indent is larger than the specified width, and make # sure at least one character is stripped off on every pass if width < 1: space_left = 1 else: space_left = width - cur_len # If we're allowed to break long words, then do so: put as much # of the next chunk onto the current line as will fit. if self.break_long_words: cur_line.append(reversed_chunks[-1][:space_left]) reversed_chunks[-1] = reversed_chunks[-1][space_left:] # Otherwise, we have to preserve the long word intact. Only add # it to the current line if there's nothing already there -- # that minimizes how much we violate the width constraint. elif not cur_line: cur_line.append(reversed_chunks.pop()) # If we're not allowed to break long words, and there's already # text on the current line, do nothing. Next time through the # main loop of _wrap_chunks(), we'll wind up here again, but # cur_len will be zero, so the next line will be entirely # devoted to the long word that we can't handle right now. def _wrap_chunks(self, chunks): """_wrap_chunks(chunks : [string]) -> [string] Wrap a sequence of text chunks and return a list of lines of length 'self.width' or less. (If 'break_long_words' is false, some lines may be longer than this.) Chunks correspond roughly to words and the whitespace between them: each chunk is indivisible (modulo 'break_long_words'), but a line break can come between any two chunks. Chunks should not have internal whitespace; ie. a chunk is either all whitespace or a "word". Whitespace chunks will be removed from the beginning and end of lines, but apart from that whitespace is preserved. """ lines = [] if self.width <= 0: raise ValueError("invalid width %r (must be > 0)" % self.width) if self.max_lines is not None: if self.max_lines > 1: indent = self.subsequent_indent else: indent = self.initial_indent if len(indent) + len(self.placeholder.lstrip()) > self.width: raise ValueError("placeholder too large for max width") # Arrange in reverse order so items can be efficiently popped # from a stack of chucks. chunks.reverse() while chunks: # Start the list of chunks that will make up the current line. # cur_len is just the length of all the chunks in cur_line. cur_line = [] cur_len = 0 # Figure out which static string will prefix this line. if lines: indent = self.subsequent_indent else: indent = self.initial_indent # Maximum width for this line. width = self.width - len(indent) # First chunk on line is whitespace -- drop it, unless this # is the very beginning of the text (ie. no lines started yet). if self.drop_whitespace and chunks[-1].strip() == '' and lines: del chunks[-1] while chunks: l = len(chunks[-1]) # Can at least squeeze this chunk onto the current line. if cur_len + l <= width: cur_line.append(chunks.pop()) cur_len += l # Nope, this line is full. else: break # The current line is full, and the next chunk is too big to # fit on *any* line (not just this one). if chunks and len(chunks[-1]) > width: self._handle_long_word(chunks, cur_line, cur_len, width) cur_len = sum(map(len, cur_line)) # If the last chunk on this line is all whitespace, drop it. if self.drop_whitespace and cur_line and cur_line[-1].strip() == '': cur_len -= len(cur_line[-1]) del cur_line[-1] if cur_line: if (self.max_lines is None or len(lines) + 1 < self.max_lines or (not chunks or self.drop_whitespace and len(chunks) == 1 and not chunks[0].strip()) and cur_len <= width): # Convert current line back to a string and store it in # list of all lines (return value). lines.append(indent + ''.join(cur_line)) else: while cur_line: if (cur_line[-1].strip() and cur_len + len(self.placeholder) <= width): cur_line.append(self.placeholder) lines.append(indent + ''.join(cur_line)) break cur_len -= len(cur_line[-1]) del cur_line[-1] else: if lines: prev_line = lines[-1].rstrip() if (len(prev_line) + len(self.placeholder) <= self.width): lines[-1] = prev_line + self.placeholder break lines.append(indent + self.placeholder.lstrip()) break return lines def _split_chunks(self, text): text = self._munge_whitespace(text) return self._split(text) # -- Public interface ---------------------------------------------- def wrap(self, text): """wrap(text : string) -> [string] Reformat the single paragraph in 'text' so it fits in lines of no more than 'self.width' columns, and return a list of wrapped lines. Tabs in 'text' are expanded with string.expandtabs(), and all other whitespace characters (including newline) are converted to space. """ chunks = self._split_chunks(text) if self.fix_sentence_endings: self._fix_sentence_endings(chunks) return self._wrap_chunks(chunks) def fill(self, text): """fill(text : string) -> string Reformat the single paragraph in 'text' to fit in lines of no more than 'self.width' columns, and return a new string containing the entire wrapped paragraph. """ return "\n".join(self.wrap(text))
(width=70, initial_indent='', subsequent_indent='', expand_tabs=True, replace_whitespace=True, fix_sentence_endings=False, break_long_words=True, drop_whitespace=True, break_on_hyphens=True, tabsize=8, max_lines=None, placeholder=' [...]')
720,096
textwrap3
__init__
null
def __init__(self, width=70, initial_indent="", subsequent_indent="", expand_tabs=True, replace_whitespace=True, fix_sentence_endings=False, break_long_words=True, drop_whitespace=True, break_on_hyphens=True, tabsize=8, max_lines=None, placeholder=' [...]'): self.width = width self.initial_indent = initial_indent self.subsequent_indent = subsequent_indent self.expand_tabs = expand_tabs self.replace_whitespace = replace_whitespace self.fix_sentence_endings = fix_sentence_endings self.break_long_words = break_long_words self.drop_whitespace = drop_whitespace self.break_on_hyphens = break_on_hyphens self.tabsize = tabsize self.max_lines = max_lines self.placeholder = placeholder
(self, width=70, initial_indent='', subsequent_indent='', expand_tabs=True, replace_whitespace=True, fix_sentence_endings=False, break_long_words=True, drop_whitespace=True, break_on_hyphens=True, tabsize=8, max_lines=None, placeholder=' [...]')
720,098
textwrap3
_handle_long_word
_handle_long_word(chunks : [string], cur_line : [string], cur_len : int, width : int) Handle a chunk of text (most likely a word, not whitespace) that is too long to fit in any line.
def _handle_long_word(self, reversed_chunks, cur_line, cur_len, width): """_handle_long_word(chunks : [string], cur_line : [string], cur_len : int, width : int) Handle a chunk of text (most likely a word, not whitespace) that is too long to fit in any line. """ # Figure out when indent is larger than the specified width, and make # sure at least one character is stripped off on every pass if width < 1: space_left = 1 else: space_left = width - cur_len # If we're allowed to break long words, then do so: put as much # of the next chunk onto the current line as will fit. if self.break_long_words: cur_line.append(reversed_chunks[-1][:space_left]) reversed_chunks[-1] = reversed_chunks[-1][space_left:] # Otherwise, we have to preserve the long word intact. Only add # it to the current line if there's nothing already there -- # that minimizes how much we violate the width constraint. elif not cur_line: cur_line.append(reversed_chunks.pop()) # If we're not allowed to break long words, and there's already # text on the current line, do nothing. Next time through the # main loop of _wrap_chunks(), we'll wind up here again, but # cur_len will be zero, so the next line will be entirely # devoted to the long word that we can't handle right now.
(self, reversed_chunks, cur_line, cur_len, width)
720,099
textwrap3
_munge_whitespace
_munge_whitespace(text : string) -> string Munge whitespace in text: expand tabs and convert all other whitespace characters to spaces. Eg. " foo\tbar\n\nbaz" becomes " foo bar baz".
def _munge_whitespace(self, text): """_munge_whitespace(text : string) -> string Munge whitespace in text: expand tabs and convert all other whitespace characters to spaces. Eg. " foo\\tbar\\n\\nbaz" becomes " foo bar baz". """ if self.expand_tabs: text = text.expandtabs(self.tabsize) if self.replace_whitespace: # text = text.translate(self.unicode_whitespace_trans) # shim for Python 2 str text = _translate(text, self.unicode_whitespace_trans) return text
(self, text)
720,105
textwrap3
_translate
Shim for Python 2 str translate, which uses an archic form of str.translate that expects a string or buffer based table. But in Python 2, unicode.translate is compatible with Python 3 model of a dict-based mapping table, so wash through the unicode type, then back-map to str type. Could alternatively just shift up to unicode, which is what most strings should be anyway, but that path is fraught with danger of causing downstream errors.
def _translate(s, mapping): """ Shim for Python 2 str translate, which uses an archic form of str.translate that expects a string or buffer based table. But in Python 2, unicode.translate is compatible with Python 3 model of a dict-based mapping table, so wash through the unicode type, then back-map to str type. Could alternatively just shift up to unicode, which is what most strings should be anyway, but that path is fraught with danger of causing downstream errors. """ if _PY2 and isinstance(s, str): return str(unicode(s).translate(mapping)) else: return s.translate(mapping) # default path
(s, mapping)
720,106
textwrap3
dedent
Remove any common leading whitespace from every line in `text`. This can be used to make triple-quoted strings line up with the left edge of the display, while still presenting them in the source code in indented form. Note that tabs and spaces are both treated as whitespace, but they are not equal: the lines " hello" and "\thello" are considered to have no common leading whitespace. (This behaviour is new in Python 2.5; older versions of this module incorrectly expanded tabs before searching for common leading whitespace.)
def dedent(text): """Remove any common leading whitespace from every line in `text`. This can be used to make triple-quoted strings line up with the left edge of the display, while still presenting them in the source code in indented form. Note that tabs and spaces are both treated as whitespace, but they are not equal: the lines " hello" and "\\thello" are considered to have no common leading whitespace. (This behaviour is new in Python 2.5; older versions of this module incorrectly expanded tabs before searching for common leading whitespace.) """ # Look for the longest leading string of spaces and tabs common to # all lines. debug = False margin = None text = _whitespace_only_re.sub('', text) indents = _leading_whitespace_re.findall(text) for indent in indents: if margin is None: margin = indent # Current line more deeply indented than previous winner: # no change (previous winner is still on top). elif indent.startswith(margin): pass # Current line consistent with and no deeper than previous winner: # it's the new winner. elif margin.startswith(indent): margin = indent # Find the largest common whitespace between current line and previous # winner. else: for i, (x, y) in enumerate(zip(margin, indent)): if x != y: margin = margin[:i] break # sanity check (testing/debugging only) if 0 and margin: # pragma: no cover for line in text.split("\n"): assert not line or line.startswith(margin), \ "line = %r, margin = %r" % (line, margin) if margin: text = re.sub(r'(?m)^' + margin, '', text) return text
(text)
720,110
textwrap3
shorten
Collapse and truncate the given text to fit in the given width. The text first has its whitespace collapsed. If it then fits in the *width*, it is returned as is. Otherwise, as many words as possible are joined and then the placeholder is appended:: >>> textwrap.shorten("Hello world!", width=12) 'Hello world!' >>> textwrap.shorten("Hello world!", width=11) 'Hello [...]'
def shorten(text, width, **kwargs): """Collapse and truncate the given text to fit in the given width. The text first has its whitespace collapsed. If it then fits in the *width*, it is returned as is. Otherwise, as many words as possible are joined and then the placeholder is appended:: >>> textwrap.shorten("Hello world!", width=12) 'Hello world!' >>> textwrap.shorten("Hello world!", width=11) 'Hello [...]' """ w = TextWrapper(width=width, max_lines=1, **kwargs) return w.fill(' '.join(text.strip().split()))
(text, width, **kwargs)
720,114
dagster_spark.types
SparkOpError
null
class SparkOpError(Exception): pass
null
720,117
dagster_spark.utils
construct_spark_shell_command
Constructs the spark-submit command for a Spark job.
def construct_spark_shell_command( application_jar, main_class, master_url=None, spark_conf=None, deploy_mode=None, application_arguments=None, spark_home=None, ): """Constructs the spark-submit command for a Spark job.""" check.opt_str_param(master_url, "master_url") check.str_param(application_jar, "application_jar") spark_conf = check.opt_dict_param(spark_conf, "spark_conf") check.opt_str_param(deploy_mode, "deploy_mode") check.opt_str_param(application_arguments, "application_arguments") check.opt_str_param(spark_home, "spark_home") spark_home = spark_home if spark_home else os.environ.get("SPARK_HOME") if spark_home is None: raise SparkOpError( "No spark home set. You must either pass spark_home in config or " "set $SPARK_HOME in your environment (got None)." ) master_url = ["--master", master_url] if master_url else [] deploy_mode = ["--deploy-mode", deploy_mode] if deploy_mode else [] spark_shell_cmd = ( [f"{spark_home}/bin/spark-submit", "--class", main_class] + master_url + deploy_mode + parse_spark_config(spark_conf) + [application_jar] + [application_arguments] ) return spark_shell_cmd
(application_jar, main_class, master_url=None, spark_conf=None, deploy_mode=None, application_arguments=None, spark_home=None)
720,118
dagster_spark.ops
create_spark_op
null
def create_spark_op( name, main_class, description=None, required_resource_keys=frozenset(["spark"]) ): check.str_param(name, "name") check.str_param(main_class, "main_class") check.opt_str_param(description, "description", "A parameterized Spark job.") check.set_param(required_resource_keys, "required_resource_keys") @op( name=name, description=description, config_schema=define_spark_config(), ins={"start": In(Nothing)}, out=Out(Nothing), tags={"kind": "spark", "main_class": main_class}, required_resource_keys=required_resource_keys, ) def spark_op(context): context.resources.spark.run_spark_job(context.op_config, main_class) return spark_op
(name, main_class, description=None, required_resource_keys=frozenset({'spark'}))
720,119
dagster_spark.configs
define_spark_config
Spark configuration. See the Spark documentation for reference: https://spark.apache.org/docs/latest/submitting-applications.html
def define_spark_config(): """Spark configuration. See the Spark documentation for reference: https://spark.apache.org/docs/latest/submitting-applications.html """ master_url = Field( StringSource, description="The master URL for the cluster (e.g. spark://23.195.26.187:7077)", is_required=True, ) deploy_mode = Field( SparkDeployMode, description="""Whether to deploy your driver on the worker nodes (cluster) or locally as an external client (client) (default: client). A common deployment strategy is to submit your application from a gateway machine that is physically co-located with your worker machines (e.g. Master node in a standalone EC2 cluster). In this setup, client mode is appropriate. In client mode, the driver is launched directly within the spark-submit process which acts as a client to the cluster. The input and output of the application is attached to the console. Thus, this mode is especially suitable for applications that involve the REPL (e.g. Spark shell).""", is_required=False, ) application_jar = Field( StringSource, description="""Path to a bundled jar including your application and all dependencies. The URL must be globally visible inside of your cluster, for instance, an hdfs:// path or a file:// path that is present on all nodes. """, is_required=True, ) application_arguments = Field( StringSource, description="Arguments passed to the main method of your main class, if any", is_required=False, ) spark_home = Field( StringSource, description=( "The path to your spark installation. Defaults to $SPARK_HOME at runtime if not" " provided." ), is_required=False, ) return { "master_url": master_url, "deploy_mode": deploy_mode, "application_jar": application_jar, "spark_conf": spark_config(), "spark_home": spark_home, "application_arguments": application_arguments, }
()
720,125
pansi
ANSI
null
class ANSI(Mapping, object): def __init__(self, **codes): self.__codes = dict(codes) def __getitem__(self, key): return self.__codes[key] def __len__(self): return len(self.__codes) # pragma: no cover def __iter__(self): return iter(self.__codes) # pragma: no cover def __dir__(self): return list(self.__codes) # pragma: no cover def __getattr__(self, name): try: return self.__codes[name] except KeyError: raise AttributeError(name)
(**codes)
720,127
pansi
__dir__
null
def __dir__(self): return list(self.__codes) # pragma: no cover
(self)
720,129
pansi
__getattr__
null
def __getattr__(self, name): try: return self.__codes[name] except KeyError: raise AttributeError(name)
(self, name)
720,130
pansi
__getitem__
null
def __getitem__(self, key): return self.__codes[key]
(self, key)
720,131
pansi
__init__
null
def __init__(self, **codes): self.__codes = dict(codes)
(self, **codes)
720,132
pansi
__iter__
null
def __iter__(self): return iter(self.__codes) # pragma: no cover
(self)
720,133
pansi
__len__
null
def __len__(self): return len(self.__codes) # pragma: no cover
(self)
720,148
pansi
RGB
null
class RGB(object): def __init__(self, bg=False): if bg: self.__template = "\x1b[48;2;%s;%s;%sm" else: self.__template = "\x1b[38;2;%s;%s;%sm" def __getitem__(self, code): if len(code) == 4 and code[0] == "#": # rgb[#XXX] r = int(code[1], 16) * 17 g = int(code[2], 16) * 17 b = int(code[3], 16) * 17 elif len(code) == 7 and code[0] == "#": # rgb[#XXXXXX] r = int(code[1:3], 16) g = int(code[3:5], 16) b = int(code[5:7], 16) else: raise ValueError("Unknown hex code %r" % code) return self.__template % (r, g, b)
(bg=False)
720,149
pansi
__getitem__
null
def __getitem__(self, code): if len(code) == 4 and code[0] == "#": # rgb[#XXX] r = int(code[1], 16) * 17 g = int(code[2], 16) * 17 b = int(code[3], 16) * 17 elif len(code) == 7 and code[0] == "#": # rgb[#XXXXXX] r = int(code[1:3], 16) g = int(code[3:5], 16) b = int(code[5:7], 16) else: raise ValueError("Unknown hex code %r" % code) return self.__template % (r, g, b)
(self, code)
720,150
pansi
__init__
null
def __init__(self, bg=False): if bg: self.__template = "\x1b[48;2;%s;%s;%sm" else: self.__template = "\x1b[38;2;%s;%s;%sm"
(self, bg=False)
720,151
poetrip.pipfile
PipFile
null
class PipFile: _SOURCE_DEFAULT: Dict[str, str] = { "url": "https://pypi.org/simple", "verify_ssl": True, "name": "pypi" } def __init__( self, source: dict = None, requires: dict = None, packages: dict = None, dev_packages: dict = None ): self._source: dict = source or self._SOURCE_DEFAULT self._requires: dict = requires or {} self._packages: dict = packages or {} self._dev_packages: dict = dev_packages or {} @property def attributes(self) -> Dict[str, Any]: return { 'source': [self._source], 'requires': self._requires, 'packages': self._packages, 'dev-packages': self._dev_packages } def to_file(self, filename: str = 'Pipfile') -> None: """Create a Pipfile on disk.""" with open(filename, 'w') as file: toml.dump(self.attributes, file)
(source: dict = None, requires: dict = None, packages: dict = None, dev_packages: dict = None)
720,152
poetrip.pipfile
__init__
null
def __init__( self, source: dict = None, requires: dict = None, packages: dict = None, dev_packages: dict = None ): self._source: dict = source or self._SOURCE_DEFAULT self._requires: dict = requires or {} self._packages: dict = packages or {} self._dev_packages: dict = dev_packages or {}
(self, source: Optional[dict] = None, requires: Optional[dict] = None, packages: Optional[dict] = None, dev_packages: Optional[dict] = None)
720,153
poetrip.pipfile
to_file
Create a Pipfile on disk.
def to_file(self, filename: str = 'Pipfile') -> None: """Create a Pipfile on disk.""" with open(filename, 'w') as file: toml.dump(self.attributes, file)
(self, filename: str = 'Pipfile') -> NoneType
720,154
poetrip.pyproject
PyProject
null
class PyProject: def __init__( self, infos: dict = None, dependencies: dict = None, dev_dependencies: dict = None ): self._infos = infos or {} self._dependencies = dependencies or {} self._dev_dependencies = dev_dependencies or {} @classmethod def from_file(cls, filename: str = 'pyproject.toml') -> 'PyProject': """Load a PyProject file from a filename.""" toml_content = dict(toml.load(filename)) poetry_attributes: dict = toml_content['tool']['poetry'] return cls( dependencies=poetry_attributes.pop('dependencies'), dev_dependencies=poetry_attributes.pop('dev-dependencies'), infos=poetry_attributes ) @property def attributes(self) -> Dict[str, Any]: return {'tool': {'poetry': { **self._infos, 'dependencies': self._dependencies, 'dev-dependencies': self._dev_dependencies }}} def _get_requires(self) -> dict: python_version: str = self._dependencies.pop('python') return {'python-version': python_version} def to_pipfile(self) -> 'PipFile': return PipFile( requires=self._get_requires(), packages=self._dependencies, dev_packages=self._dev_dependencies )
(infos: dict = None, dependencies: dict = None, dev_dependencies: dict = None)
720,155
poetrip.pyproject
__init__
null
def __init__( self, infos: dict = None, dependencies: dict = None, dev_dependencies: dict = None ): self._infos = infos or {} self._dependencies = dependencies or {} self._dev_dependencies = dev_dependencies or {}
(self, infos: Optional[dict] = None, dependencies: Optional[dict] = None, dev_dependencies: Optional[dict] = None)
720,156
poetrip.pyproject
_get_requires
null
def _get_requires(self) -> dict: python_version: str = self._dependencies.pop('python') return {'python-version': python_version}
(self) -> dict
720,157
poetrip.pyproject
to_pipfile
null
def to_pipfile(self) -> 'PipFile': return PipFile( requires=self._get_requires(), packages=self._dependencies, dev_packages=self._dev_dependencies )
(self) -> poetrip.pipfile.PipFile
720,161
sphinx_rtd_theme
config_initiated
null
def config_initiated(app, config): theme_options = config.html_theme_options or {} if theme_options.get('canonical_url'): logger.warning( _('The canonical_url option is deprecated, use the html_baseurl option from Sphinx instead.') )
(app, config)
720,162
sphinx_rtd_theme
extend_html_context
null
def extend_html_context(app, pagename, templatename, context, doctree): # Add ``sphinx_version_info`` tuple for use in Jinja templates context['sphinx_version_info'] = sphinx_version
(app, pagename, templatename, context, doctree)
720,164
sphinx_rtd_theme
get_html_theme_path
Return list of HTML theme paths.
def get_html_theme_path(): """Return list of HTML theme paths.""" cur_dir = path.abspath(path.dirname(path.dirname(__file__))) return cur_dir
()
720,166
sphinx_rtd_theme
setup
null
def setup(app): if python_version[0] < 3: logger.error("Python 2 is not supported with sphinx_rtd_theme, update to Python 3.") app.require_sphinx('5.0') if app.config.html4_writer: logger.error("'html4_writer' is not supported with sphinx_rtd_theme.") # Since Sphinx 6, jquery isn't bundled anymore and we need to ensure that # the sphinxcontrib-jquery extension is enabled. # See: https://dev.readthedocs.io/en/latest/design/sphinx-jquery.html if sphinx_version >= (6, 0, 0): # Documentation of Sphinx guarantees that an extension is added and # enabled at most once. # See: https://www.sphinx-doc.org/en/master/extdev/appapi.html#sphinx.application.Sphinx.setup_extension app.setup_extension("sphinxcontrib.jquery") # However, we need to call the extension's callback since setup_extension doesn't do it # See: https://github.com/sphinx-contrib/jquery/issues/23 from sphinxcontrib.jquery import add_js_files as jquery_add_js_files jquery_add_js_files(app, app.config) # Register the theme that can be referenced without adding a theme path app.add_html_theme('sphinx_rtd_theme', path.abspath(path.dirname(__file__))) # Add Sphinx message catalog for newer versions of Sphinx # See http://www.sphinx-doc.org/en/master/extdev/appapi.html#sphinx.application.Sphinx.add_message_catalog rtd_locale_path = path.join(path.abspath(path.dirname(__file__)), 'locale') app.add_message_catalog('sphinx', rtd_locale_path) app.connect('config-inited', config_initiated) # sphinx emits the permalink icon for headers, so choose one more in keeping with our theme app.config.html_permalinks_icon = "\uf0c1" # Extend the default context when rendering the templates. app.connect("html-page-context", extend_html_context) return {'parallel_read_safe': True, 'parallel_write_safe': True}
(app)
720,171
sqlparse
format
Format *sql* according to *options*. Available options are documented in :ref:`formatting`. In addition to the formatting options this function accepts the keyword "encoding" which determines the encoding of the statement. :returns: The formatted SQL statement as string.
def format(sql, encoding=None, **options): """Format *sql* according to *options*. Available options are documented in :ref:`formatting`. In addition to the formatting options this function accepts the keyword "encoding" which determines the encoding of the statement. :returns: The formatted SQL statement as string. """ stack = engine.FilterStack() options = formatter.validate_options(options) stack = formatter.build_filter_stack(stack, options) stack.postprocess.append(filters.SerializerUnicode()) return ''.join(stack.run(sql, encoding))
(sql, encoding=None, **options)
720,175
sqlparse
parse
Parse sql and return a list of statements. :param sql: A string containing one or more SQL statements. :param encoding: The encoding of the statement (optional). :returns: A tuple of :class:`~sqlparse.sql.Statement` instances.
def parse(sql, encoding=None): """Parse sql and return a list of statements. :param sql: A string containing one or more SQL statements. :param encoding: The encoding of the statement (optional). :returns: A tuple of :class:`~sqlparse.sql.Statement` instances. """ return tuple(parsestream(sql, encoding))
(sql, encoding=None)
720,176
sqlparse
parsestream
Parses sql statements from file-like object. :param stream: A file-like object. :param encoding: The encoding of the stream contents (optional). :returns: A generator of :class:`~sqlparse.sql.Statement` instances.
def parsestream(stream, encoding=None): """Parses sql statements from file-like object. :param stream: A file-like object. :param encoding: The encoding of the stream contents (optional). :returns: A generator of :class:`~sqlparse.sql.Statement` instances. """ stack = engine.FilterStack() stack.enable_grouping() return stack.run(stream, encoding)
(stream, encoding=None)
720,177
sqlparse
split
Split *sql* into single statements. :param sql: A string containing one or more SQL statements. :param encoding: The encoding of the statement (optional). :param strip_semicolon: If True, remove trainling semicolons (default: False). :returns: A list of strings.
def split(sql, encoding=None, strip_semicolon=False): """Split *sql* into single statements. :param sql: A string containing one or more SQL statements. :param encoding: The encoding of the statement (optional). :param strip_semicolon: If True, remove trainling semicolons (default: False). :returns: A list of strings. """ stack = engine.FilterStack(strip_semicolon=strip_semicolon) return [str(stmt).strip() for stmt in stack.run(sql, encoding)]
(sql, encoding=None, strip_semicolon=False)
720,181
mldesigner._get_io_context
IOContext
Component IO context, includes outputs information and support operations on them (e.g. mark early available output ready). You can use `get_io_context` to get this during runtime.
class IOContext: """Component IO context, includes outputs information and support operations on them (e.g. mark early available output ready). You can use `get_io_context` to get this during runtime. """ def __init__(self): self._outputs = OutputContext() @property def outputs(self) -> OutputContext: return self._outputs
()
720,182
mldesigner._get_io_context
__init__
null
def __init__(self): self._outputs = OutputContext()
(self)
720,183
mldesigner._input_output
Input
Define an input of a component. Default to be a uri_folder Input. :param type: The type of the data input. Possible values include: 'uri_folder', 'uri_file', 'mltable', 'mlflow_model', 'custom_model', 'integer', 'number', 'string', 'boolean' :type type: str :param path: The path to which the input is pointing. Could be local data, cloud data, a registered name, etc. :type path: str :param mode: The mode of the data input. Possible values are: 'ro_mount': Read-only mount the data, 'download': Download the data to the compute target, 'direct': Pass in the URI as a string :type mode: str :param min: The min value -- if a smaller value is passed to a job, the job execution will fail :type min: Union[integer, float] :param max: The max value -- if a larger value is passed to a job, the job execution will fail :type max: Union[integer, float] :param optional: Determine if this input is optional :type optional: bool :param description: Description of the input :type description: str
class Input(_IOBase): """Define an input of a component. Default to be a uri_folder Input. :param type: The type of the data input. Possible values include: 'uri_folder', 'uri_file', 'mltable', 'mlflow_model', 'custom_model', 'integer', 'number', 'string', 'boolean' :type type: str :param path: The path to which the input is pointing. Could be local data, cloud data, a registered name, etc. :type path: str :param mode: The mode of the data input. Possible values are: 'ro_mount': Read-only mount the data, 'download': Download the data to the compute target, 'direct': Pass in the URI as a string :type mode: str :param min: The min value -- if a smaller value is passed to a job, the job execution will fail :type min: Union[integer, float] :param max: The max value -- if a larger value is passed to a job, the job execution will fail :type max: Union[integer, float] :param optional: Determine if this input is optional :type optional: bool :param description: Description of the input :type description: str """ _EMPTY = Parameter.empty _IO_KEYS = ["path", "type", "mode", "description", "min", "max", "enum", "optional", "default"] @overload def __init__( self, *, type: Literal[ "uri_folder", "uri_file", "mltable", "mlflow_model", "custom_model", "integer", "number", "string", "boolean", ] = "uri_folder", path: str = None, mode: str = None, optional: bool = None, description: str = None, **kwargs, ): """Initialize an input of a component. :param path: The path to which the input is pointing. Could be local data, cloud data, a registered name, etc. :type path: str :param type: The type of the data input. Possible values include: 'uri_folder', 'uri_file', 'mltable', 'mlflow_model', 'custom_model', and user-defined types. :type type: str :param mode: The mode of the data input. Possible values are: 'ro_mount': Read-only mount the data, 'download': Download the data to the compute target, 'direct': Pass in the URI as a string :type mode: str :param optional: Determine if this input is optional :type optional: bool :param description: Description of the input :type description: str """ @overload def __init__( self, *, type: Literal["number"] = "number", min: float = None, max: float = None, optional: bool = None, description: str = None, **kwargs, ): """Initialize a number input :param type: The type of the data input. Can only be set to "number". :type type: str :param min: The min value -- if a smaller value is passed to a job, the job execution will fail :type min: float :param max: The max value -- if a larger value is passed to a job, the job execution will fail :type max: float :param optional: Determine if this input is optional :type optional: bool :param description: Description of the input :type description: str """ @overload def __init__( self, *, type: Literal["integer"] = "integer", min: int = None, max: int = None, optional: bool = None, description: str = None, **kwargs, ): """Initialize an integer input :param type: The type of the data input. Can only be set to "integer". :type type: str :param min: The min value -- if a smaller value is passed to a job, the job execution will fail :type min: integer :param max: The max value -- if a larger value is passed to a job, the job execution will fail :type max: integer :param optional: Determine if this input is optional :type optional: bool :param description: Description of the input :type description: str """ @overload def __init__( self, *, type: Literal["string"] = "string", optional: bool = None, description: str = None, **kwargs, ): """Initialize a string input. :param type: The type of the data input. Can only be set to "string". :type type: str :param optional: Determine if this input is optional :type optional: bool :param description: Description of the input :type description: str """ @overload def __init__( self, *, type: Literal["boolean"] = "boolean", optional: bool = None, description: str = None, **kwargs, ): """Initialize a bool input. :param type: The type of the data input. Can only be set to "boolean". :type type: str :param optional: Determine if this input is optional :type optional: bool :param description: Description of the input :type description: str """ def __init__( self, *, type: str = "uri_folder", path: str = None, mode: str = None, min: Union[int, float] = None, max: Union[int, float] = None, enum=None, optional: bool = None, description: str = None, **kwargs, ): # As an annotation, it is not allowed to initialize the _port_name. # The _port_name will be updated by the annotated variable name. self._is_primitive_type = type in IoConstants.PRIMITIVE_STR_2_TYPE self.path = path self.mode = None if self._is_primitive_type else mode self.min = min self.max = max self.enum = enum self.optional = optional self.default = kwargs.pop("default", None) super().__init__(port_name=None, type=type, description=description, **kwargs) # normalize properties like ["default", "min", "max", "optional"] self._normalize_self_properties() def _to_io_entity_args_dict(self): """Convert the Input object to a kwargs dict for azure.ai.ml.entity.Input.""" keys = self._IO_KEYS result = {key: getattr(self, key, None) for key in keys} result = {**self._kwargs, **result} return _remove_empty_values(result) @classmethod def _get_input_by_type(cls, t: type, optional=None): if t in IoConstants.PRIMITIVE_TYPE_2_STR: return cls(type=IoConstants.PRIMITIVE_TYPE_2_STR[t], optional=optional) return None @classmethod def _get_default_unknown_input(cls, optional=None): # Set type as None here to avoid schema validation failed return cls(type=None, optional=optional) def _normalize_self_properties(self): # parse value from string to it's original type. eg: "false" -> False if self.type in IoConstants.PARAM_PARSERS: for key in ["default", "min", "max"]: if getattr(self, key) is not None: origin_value = getattr(self, key) new_value = IoConstants.PARAM_PARSERS[self.type](origin_value) setattr(self, key, new_value) self.optional = IoConstants.PARAM_PARSERS["boolean"](getattr(self, "optional", "false")) self.optional = True if self.optional is True else None
(*, type: str = 'uri_folder', path: str = None, mode: str = None, min: Union[int, float] = None, max: Union[int, float] = None, enum=None, optional: bool = None, description: str = None, **kwargs)
720,184
mldesigner._input_output
__init__
null
def __init__( self, *, type: str = "uri_folder", path: str = None, mode: str = None, min: Union[int, float] = None, max: Union[int, float] = None, enum=None, optional: bool = None, description: str = None, **kwargs, ): # As an annotation, it is not allowed to initialize the _port_name. # The _port_name will be updated by the annotated variable name. self._is_primitive_type = type in IoConstants.PRIMITIVE_STR_2_TYPE self.path = path self.mode = None if self._is_primitive_type else mode self.min = min self.max = max self.enum = enum self.optional = optional self.default = kwargs.pop("default", None) super().__init__(port_name=None, type=type, description=description, **kwargs) # normalize properties like ["default", "min", "max", "optional"] self._normalize_self_properties()
(self, *, type: str = 'uri_folder', path: Optional[str] = None, mode: Optional[str] = None, min: Union[int, float, NoneType] = None, max: Union[int, float, NoneType] = None, enum=None, optional: Optional[bool] = None, description: Optional[str] = None, **kwargs)
720,185
mldesigner._input_output
_normalize_self_properties
null
def _normalize_self_properties(self): # parse value from string to it's original type. eg: "false" -> False if self.type in IoConstants.PARAM_PARSERS: for key in ["default", "min", "max"]: if getattr(self, key) is not None: origin_value = getattr(self, key) new_value = IoConstants.PARAM_PARSERS[self.type](origin_value) setattr(self, key, new_value) self.optional = IoConstants.PARAM_PARSERS["boolean"](getattr(self, "optional", "false")) self.optional = True if self.optional is True else None
(self)
720,186
mldesigner._input_output
_to_io_entity_args_dict
Convert the Input object to a kwargs dict for azure.ai.ml.entity.Input.
def _to_io_entity_args_dict(self): """Convert the Input object to a kwargs dict for azure.ai.ml.entity.Input.""" keys = self._IO_KEYS result = {key: getattr(self, key, None) for key in keys} result = {**self._kwargs, **result} return _remove_empty_values(result)
(self)
720,187
mldesigner._input_output
Meta
This is the meta data of Inputs/Outputs.
class Meta(object): """This is the meta data of Inputs/Outputs.""" def __init__( self, type=None, description=None, min=None, max=None, **kwargs, ): self.type = type self.description = description self._min = min self._max = max self._default = kwargs.pop("default", None) self._kwargs = kwargs def _to_io_entity_args_dict(self): """Convert the object to a kwargs dict for azure.ai.ml.entity.Output.""" keys = set(Output._IO_KEYS + Input._IO_KEYS) result = {key: getattr(self, key, None) for key in keys} result.update(self._kwargs) if IoConstants.PRIMITIVE_TYPE_2_STR.get(self.type) is not None: result["type"] = IoConstants.PRIMITIVE_TYPE_2_STR.get(self.type) return _remove_empty_values(result) @property def max(self) -> Optional[Union[int, float]]: """Return the maximum value of the parameter for a numeric parameter.""" return self._max @property def min(self) -> Optional[Union[int, float]]: """Return the minimum value of the parameter for a numeric parameter.""" return self._min @property def default(self): """Return the default value of the parameter.""" return self._default
(type=None, description=None, min=None, max=None, **kwargs)
720,188
mldesigner._input_output
__init__
null
def __init__( self, type=None, description=None, min=None, max=None, **kwargs, ): self.type = type self.description = description self._min = min self._max = max self._default = kwargs.pop("default", None) self._kwargs = kwargs
(self, type=None, description=None, min=None, max=None, **kwargs)
720,189
mldesigner._input_output
_to_io_entity_args_dict
Convert the object to a kwargs dict for azure.ai.ml.entity.Output.
def _to_io_entity_args_dict(self): """Convert the object to a kwargs dict for azure.ai.ml.entity.Output.""" keys = set(Output._IO_KEYS + Input._IO_KEYS) result = {key: getattr(self, key, None) for key in keys} result.update(self._kwargs) if IoConstants.PRIMITIVE_TYPE_2_STR.get(self.type) is not None: result["type"] = IoConstants.PRIMITIVE_TYPE_2_STR.get(self.type) return _remove_empty_values(result)
(self)
720,190
mldesigner._input_output
Output
Define an output of a component. :param type: The type of the data output. Possible values include: 'uri_folder', 'uri_file', 'mltable', 'mlflow_model', 'custom_model', and user-defined types. :type type: str :param path: The path to which the output is pointing. Needs to point to a cloud path. :type path: str :param mode: The mode of the data output. Possible values are: 'rw_mount': Read-write mount the data, 'upload': Upload the data from the compute target, 'direct': Pass in the URI as a string :type mode: str :param description: Description of the output :type description: str
class Output(_IOBase): """Define an output of a component. :param type: The type of the data output. Possible values include: 'uri_folder', 'uri_file', 'mltable', 'mlflow_model', 'custom_model', and user-defined types. :type type: str :param path: The path to which the output is pointing. Needs to point to a cloud path. :type path: str :param mode: The mode of the data output. Possible values are: 'rw_mount': Read-write mount the data, 'upload': Upload the data from the compute target, 'direct': Pass in the URI as a string :type mode: str :param description: Description of the output :type description: str """ _IO_KEYS = ["path", "type", "mode", "description", "early_available"] @overload def __init__( self, *, type: Literal[ "uri_folder", "uri_file", "mltable", "mlflow_model", "custom_model", "integer", "number", "string", "boolean", ] = "uri_folder", path=None, mode=None, description=None, early_available=None, ): """Define an output of a component. :param path: The path to which the output is pointing. Needs to point to a cloud path. :type path: str :param type: The type of the data output. Possible values include: 'uri_folder', 'uri_file', 'mltable', 'mlflow_model', 'custom_model', and user-defined types. :type type: str :param mode: The mode of the data output. Possible values are: 'rw_mount': Read-write mount the data, 'upload': Upload the data from the compute target, 'direct': Pass in the URI as a string :type mode: str :param description: Description of the output :type description: str :param early_available: Determine the Output is early available or not. :type early_available: bool """ def __init__( self, *, type: str = "uri_folder", path=None, mode=None, description=None, early_available=None, **kwargs, ): # As an annotation, it is not allowed to initialize the _port_name. # The _port_name will be updated by the annotated variable name. self.path = path self.mode = mode self.early_available = early_available super().__init__(port_name=None, type=type, description=description, **kwargs) self._is_primitive_type = self.type in IoConstants.PRIMITIVE_STR_2_TYPE # early available output value and ready flag self._value = None self._ready = None def _to_io_entity_args_dict(self): """Convert the Output object to a kwargs dict for azure.ai.ml.entity.Output.""" keys = self._IO_KEYS result = {key: getattr(self, key) for key in keys} result.update(self._kwargs) return _remove_empty_values(result) def ready(self) -> None: """Mark early available output ready.""" execute_logger = _LoggerFactory.get_logger("execute", target_stdout=True) # validate if self._ready is True: execute_logger.warning( "Output '%s' has already been marked as ready, ignore current operation.", self._port_name ) return if self._value is None: error_message = f"Early available output {self._port_name!r} is not ready yet, please assign value for it." raise UserErrorException(error_message) # validate AzureML limits (https://aka.ms/azure-machine-learning-limits), # length of property key (100 characters) and length of property value (1000 characters). # note that we have prefix (azureml.pipeline.control.) in key, so there are 75 characters left. if len(self._port_name) > 75: error_message = ( f"Early available output {self._port_name!r} port name is too long, the limit is 75 characters." ) raise UserErrorException(error_message) if isinstance(self._value, str) and len(self._value) > 1000: error_message = ( f"Early available output {self._port_name!r} content is too long, the limit is 1000 characters. " f"Got {len(self._value)} characters, please control the size." ) raise UserErrorException(error_message) # write content to uri_file from mldesigner._component_executor import _parsed_args # if component is executed without mldesigner, cannot know where is the target file if _parsed_args is not None: execute_logger.info("Write early available output content '%s' to file", self._value) Path(_parsed_args[self._port_name]).write_text(str(self._value)) # write content to RH early_available_control_output_key = f"azureml.pipeline.control.{self._port_name}" _write_properties_to_run_history( properties={early_available_control_output_key: self._value}, operation_name=RunHistoryOperations.MARK_OUTPUT_READY, ) self._ready = True
(*, type: str = 'uri_folder', path=None, mode=None, description=None, early_available=None, **kwargs)
720,191
mldesigner._input_output
__init__
null
def __init__( self, *, type: str = "uri_folder", path=None, mode=None, description=None, early_available=None, **kwargs, ): # As an annotation, it is not allowed to initialize the _port_name. # The _port_name will be updated by the annotated variable name. self.path = path self.mode = mode self.early_available = early_available super().__init__(port_name=None, type=type, description=description, **kwargs) self._is_primitive_type = self.type in IoConstants.PRIMITIVE_STR_2_TYPE # early available output value and ready flag self._value = None self._ready = None
(self, *, type: str = 'uri_folder', path=None, mode=None, description=None, early_available=None, **kwargs)
720,192
mldesigner._input_output
_to_io_entity_args_dict
Convert the Output object to a kwargs dict for azure.ai.ml.entity.Output.
def _to_io_entity_args_dict(self): """Convert the Output object to a kwargs dict for azure.ai.ml.entity.Output.""" keys = self._IO_KEYS result = {key: getattr(self, key) for key in keys} result.update(self._kwargs) return _remove_empty_values(result)
(self)
720,193
mldesigner._input_output
ready
Mark early available output ready.
def ready(self) -> None: """Mark early available output ready.""" execute_logger = _LoggerFactory.get_logger("execute", target_stdout=True) # validate if self._ready is True: execute_logger.warning( "Output '%s' has already been marked as ready, ignore current operation.", self._port_name ) return if self._value is None: error_message = f"Early available output {self._port_name!r} is not ready yet, please assign value for it." raise UserErrorException(error_message) # validate AzureML limits (https://aka.ms/azure-machine-learning-limits), # length of property key (100 characters) and length of property value (1000 characters). # note that we have prefix (azureml.pipeline.control.) in key, so there are 75 characters left. if len(self._port_name) > 75: error_message = ( f"Early available output {self._port_name!r} port name is too long, the limit is 75 characters." ) raise UserErrorException(error_message) if isinstance(self._value, str) and len(self._value) > 1000: error_message = ( f"Early available output {self._port_name!r} content is too long, the limit is 1000 characters. " f"Got {len(self._value)} characters, please control the size." ) raise UserErrorException(error_message) # write content to uri_file from mldesigner._component_executor import _parsed_args # if component is executed without mldesigner, cannot know where is the target file if _parsed_args is not None: execute_logger.info("Write early available output content '%s' to file", self._value) Path(_parsed_args[self._port_name]).write_text(str(self._value)) # write content to RH early_available_control_output_key = f"azureml.pipeline.control.{self._port_name}" _write_properties_to_run_history( properties={early_available_control_output_key: self._value}, operation_name=RunHistoryOperations.MARK_OUTPUT_READY, ) self._ready = True
(self) -> NoneType
720,194
mldesigner._get_io_context
OutputContext
Component outputs context, output can be accessed with `.<name>`.
class OutputContext: """Component outputs context, output can be accessed with `.<name>`.""" def __init__(self): self._outputs: typing.Dict[str, Output] = dict() def __setattr__(self, name: str, value: str): if name == "_outputs": super(OutputContext, self).__setattr__(name, value) else: # note: we cannot know the Output type now, so specify `string` here; # and we cannot validate value type, neither. self._outputs[name] = Output(type="string", early_available=True) # update name and value for later writing run history self._outputs[name]._port_name = name self._outputs[name]._value = value # pylint: disable=protected-access self._outputs[name]._ready = False # pylint: disable=protected-access def __getattr__(self, name: str) -> Output: if name == "_outputs": return super(OutputContext, self).__getattribute__(name) if name not in self._outputs.keys(): error_message = f"Output {name!r} not found, please check the spelling of the name." raise UserErrorException(error_message) return self._outputs[name]
()
720,195
mldesigner._get_io_context
__getattr__
null
def __getattr__(self, name: str) -> Output: if name == "_outputs": return super(OutputContext, self).__getattribute__(name) if name not in self._outputs.keys(): error_message = f"Output {name!r} not found, please check the spelling of the name." raise UserErrorException(error_message) return self._outputs[name]
(self, name: str) -> mldesigner._input_output.Output
720,196
mldesigner._get_io_context
__init__
null
def __init__(self): self._outputs: typing.Dict[str, Output] = dict()
(self)
720,197
mldesigner._get_io_context
__setattr__
null
def __setattr__(self, name: str, value: str): if name == "_outputs": super(OutputContext, self).__setattr__(name, value) else: # note: we cannot know the Output type now, so specify `string` here; # and we cannot validate value type, neither. self._outputs[name] = Output(type="string", early_available=True) # update name and value for later writing run history self._outputs[name]._port_name = name self._outputs[name]._value = value # pylint: disable=protected-access self._outputs[name]._ready = False # pylint: disable=protected-access
(self, name: str, value: str)
720,198
mldesigner._get_root_pipeline_context
PipelineContext
Pipeline context, including root pipeline job name and init and/or execution stage information. You can use `get_root_pipeline_context` to get this during pipeline runtime. :param root_job_name: Root pipeline job name. :type root_job_name: str :param initialization_stage: Initialization stage information. :type initialization_stage: PipelineStage :param execution_stage: Execution stage information. :type execution_stage: PipelineStage
class PipelineContext: """Pipeline context, including root pipeline job name and init and/or execution stage information. You can use `get_root_pipeline_context` to get this during pipeline runtime. :param root_job_name: Root pipeline job name. :type root_job_name: str :param initialization_stage: Initialization stage information. :type initialization_stage: PipelineStage :param execution_stage: Execution stage information. :type execution_stage: PipelineStage """ def __init__( self, root_job_name: str, initialization_stage: typing.Optional[PipelineStage], execution_stage: typing.Optional[PipelineStage], ): self.root_job_name = root_job_name self.stages = { STAGE_INIT: initialization_stage, STAGE_EXECUTION: execution_stage, } @staticmethod def _from_job_properties(properties: typing.Dict) -> "PipelineContext": try: root_job_name = properties["rootRunId"] stages = json.loads(properties["properties"]["azureml.pipelines.stages"]) init_stage = PipelineStage._from_stage(stages.get(STAGE_INIT)) execution_stage = PipelineStage._from_stage(stages.get(STAGE_EXECUTION)) return PipelineContext(root_job_name, init_stage, execution_stage) except (KeyError, json.decoder.JSONDecodeError, ValueError) as e: raise SystemErrorException("Parse pipeline job properties failed.") from e
(root_job_name: str, initialization_stage: Optional[mldesigner._get_root_pipeline_context.PipelineStage], execution_stage: Optional[mldesigner._get_root_pipeline_context.PipelineStage])
720,199
mldesigner._get_root_pipeline_context
__init__
null
def __init__( self, root_job_name: str, initialization_stage: typing.Optional[PipelineStage], execution_stage: typing.Optional[PipelineStage], ): self.root_job_name = root_job_name self.stages = { STAGE_INIT: initialization_stage, STAGE_EXECUTION: execution_stage, }
(self, root_job_name: str, initialization_stage: Optional[mldesigner._get_root_pipeline_context.PipelineStage], execution_stage: Optional[mldesigner._get_root_pipeline_context.PipelineStage])
720,200
mldesigner._get_root_pipeline_context
_from_job_properties
null
@staticmethod def _from_job_properties(properties: typing.Dict) -> "PipelineContext": try: root_job_name = properties["rootRunId"] stages = json.loads(properties["properties"]["azureml.pipelines.stages"]) init_stage = PipelineStage._from_stage(stages.get(STAGE_INIT)) execution_stage = PipelineStage._from_stage(stages.get(STAGE_EXECUTION)) return PipelineContext(root_job_name, init_stage, execution_stage) except (KeyError, json.decoder.JSONDecodeError, ValueError) as e: raise SystemErrorException("Parse pipeline job properties failed.") from e
(properties: Dict) -> mldesigner._get_root_pipeline_context.PipelineContext
720,201
mldesigner._get_root_pipeline_context
PipelineStage
Pipeline stage, valid stages are "Initialization" and "Execution". :param start_time: Stage start time, and you can get this in the string format of ISO 8601 by calling `pipeline_stage.start_time.isoformat()`. :type start_time: datetime.datetime :param end_time: Stage end time, similar to start_time. :type end_time: datetime.datetime :param status: Stage status. :type status: str
class PipelineStage: """Pipeline stage, valid stages are "Initialization" and "Execution". :param start_time: Stage start time, and you can get this in the string format of ISO 8601 by calling `pipeline_stage.start_time.isoformat()`. :type start_time: datetime.datetime :param end_time: Stage end time, similar to start_time. :type end_time: datetime.datetime :param status: Stage status. :type status: str """ def __init__(self, start_time: str, end_time: str, status: str): self.start_time = self._parse_time(start_time) self.end_time = self._parse_time(end_time) self.status = status @staticmethod def _parse_time(time_string: str) -> datetime.datetime: # %f for 6 digits, but backend may return different digit ms ms_start_index, ms_end_index = time_string.index("."), time_string.index("+") ms = time_string[ms_start_index + 1 : ms_end_index] normalized_ms = ms.ljust(6, "0")[:6] normalized_date_string = f"{time_string[:ms_start_index + 1]}{normalized_ms}{time_string[ms_end_index:]}" return datetime.datetime.strptime(normalized_date_string, TIME_FORMAT) @staticmethod def _from_stage(stage: typing.Optional[typing.Dict[str, str]]) -> typing.Optional["PipelineStage"]: if stage is None: return None return PipelineStage(stage["StartTime"], stage["EndTime"], stage["Status"])
(start_time: str, end_time: str, status: str)
720,202
mldesigner._get_root_pipeline_context
__init__
null
def __init__(self, start_time: str, end_time: str, status: str): self.start_time = self._parse_time(start_time) self.end_time = self._parse_time(end_time) self.status = status
(self, start_time: str, end_time: str, status: str)
720,203
mldesigner._get_root_pipeline_context
_from_stage
null
@staticmethod def _from_stage(stage: typing.Optional[typing.Dict[str, str]]) -> typing.Optional["PipelineStage"]: if stage is None: return None return PipelineStage(stage["StartTime"], stage["EndTime"], stage["Status"])
(stage: Optional[Dict[str, str]]) -> Optional[mldesigner._get_root_pipeline_context.PipelineStage]
720,204
mldesigner._get_root_pipeline_context
_parse_time
null
@staticmethod def _parse_time(time_string: str) -> datetime.datetime: # %f for 6 digits, but backend may return different digit ms ms_start_index, ms_end_index = time_string.index("."), time_string.index("+") ms = time_string[ms_start_index + 1 : ms_end_index] normalized_ms = ms.ljust(6, "0")[:6] normalized_date_string = f"{time_string[:ms_start_index + 1]}{normalized_ms}{time_string[ms_end_index:]}" return datetime.datetime.strptime(normalized_date_string, TIME_FORMAT)
(time_string: str) -> datetime.datetime
720,205
mldesigner._utils
check_main_package
null
def check_main_package(logger=None): if logger is None: logger = _LoggerFactory.get_logger("mldesigner") version = get_package_version("azure-ai-ml") target_version = "1.2.0" version_to_check = pkg_resources.parse_version(target_version) msg = ( f"Mldesigner requires azure-ai-ml >= {target_version} package to be fully functional." f"It's highly recommended to install the latest azure-ai-ml package." ) if version: if not version.startswith("0.0."): # public version if pkg_resources.parse_version(version) <= version_to_check: logger.warning(msg) else: logger.warning(msg)
(logger=None)
720,219
mldesigner._component
command_component
Return a decorator which is used to declare a component with @command_component. A component is a reusable unit in an Azure Machine Learning workspace. With the decorator @command_component, a function could be registered as a component in the workspace. Then the component could be used to construct an Azure Machine Learning pipeline. .. note:: The following example shows how to use @command_component to declare a simple component. .. code-block:: python @command_component() def your_component_function(output:Output(), input: Input(), param='str_param'): pass The following example shows how to declare a component with detailed meta data. .. code-block:: python @command_component(name=name, version=version, description=description) def your_component_function(output: Output(), input: Input(), param='str_param'): pass The following example shows how to consume the declared component function with dsl.pipeline. .. code-block:: python from azure.ai.ml import MLClient from azure.ai.ml.dsl import pipeline # define pipeline with mldesigner.command_component function @pipeline() def your_pipeline_func(input, param): your_component_function(input=input, param=param) # create the pipeline pipeline = your_pipeline_func(your_input, 'your_str') # Submit pipeline through MLClient ml_client = MLClient(cred, "my-sub", "my-rg", "my-ws") ml_client.create_or_update(pipeline) :param func: The user component function to be decorated. :type func: types.FunctionType :param name: The name of the component. If None is set, function name is used. :type name: str :param version: Version of the component. :type version: str :param display_name: Display name of the component. :type display_name: str :param description: The description of the component. If None is set, the doc string is used. :type description: str :param is_deterministic: Specify whether the component will always generate the same result. The default value is None, the component will be reused by default behavior, the same for True value. If False, this component will never be reused. :type is_deterministic: bool :param tags: Tags of the component. :type tags: dict :param environment: Environment config of component, could be a yaml file path, a dict or an Environment object. If None, a default conda with 'azure-ai-ml' will be used. :type environment: Union[str, os.PathLike, dict, azure.ai.ml.entities.Environment] :param distribution: The distribution config of component, e.g. distribution={'type': 'mpi'}. :type distribution: Union[dict, PyTorchDistribution, MpiDistribution, TensorFlowDistribution] :param resources: Compute Resource configuration for the component. :type resources: Union[dict, ResourceConfiguration] :param code: The source directory of component, with default value '.'. i.e. The directory of mldesigner component file. :type code: Union[str, PathLike] :return: The decorated function which could be used to create component directly.
def command_component( func=None, *, name=None, version=None, display_name=None, description=None, is_deterministic=None, tags=None, environment: Union[str, dict, PathLike, "Environment"] = None, distribution: Union[dict, "PyTorchDistribution", "MpiDistribution", "TensorFlowDistribution"] = None, resources: Union[dict, "ResourceConfiguration"] = None, code: Union[str, PathLike] = None, ): """Return a decorator which is used to declare a component with @command_component. A component is a reusable unit in an Azure Machine Learning workspace. With the decorator @command_component, a function could be registered as a component in the workspace. Then the component could be used to construct an Azure Machine Learning pipeline. .. note:: The following example shows how to use @command_component to declare a simple component. .. code-block:: python @command_component() def your_component_function(output:Output(), input: Input(), param='str_param'): pass The following example shows how to declare a component with detailed meta data. .. code-block:: python @command_component(name=name, version=version, description=description) def your_component_function(output: Output(), input: Input(), param='str_param'): pass The following example shows how to consume the declared component function with dsl.pipeline. .. code-block:: python from azure.ai.ml import MLClient from azure.ai.ml.dsl import pipeline # define pipeline with mldesigner.command_component function @pipeline() def your_pipeline_func(input, param): your_component_function(input=input, param=param) # create the pipeline pipeline = your_pipeline_func(your_input, 'your_str') # Submit pipeline through MLClient ml_client = MLClient(cred, "my-sub", "my-rg", "my-ws") ml_client.create_or_update(pipeline) :param func: The user component function to be decorated. :type func: types.FunctionType :param name: The name of the component. If None is set, function name is used. :type name: str :param version: Version of the component. :type version: str :param display_name: Display name of the component. :type display_name: str :param description: The description of the component. If None is set, the doc string is used. :type description: str :param is_deterministic: Specify whether the component will always generate the same result. The default value is None, the component will be reused by default behavior, the same for True value. If False, this component will never be reused. :type is_deterministic: bool :param tags: Tags of the component. :type tags: dict :param environment: Environment config of component, could be a yaml file path, a dict or an Environment object. If None, a default conda with 'azure-ai-ml' will be used. :type environment: Union[str, os.PathLike, dict, azure.ai.ml.entities.Environment] :param distribution: The distribution config of component, e.g. distribution={'type': 'mpi'}. :type distribution: Union[dict, PyTorchDistribution, MpiDistribution, TensorFlowDistribution] :param resources: Compute Resource configuration for the component. :type resources: Union[dict, ResourceConfiguration] :param code: The source directory of component, with default value '.'. i.e. The directory of mldesigner component file. :type code: Union[str, PathLike] :return: The decorated function which could be used to create component directly. """ _validate_component_name(name=name) # Get the directory of decorator to resolve absolute code path in environment # Note: The decorator defined source directory may be different from mldesigner component source directory. decorator_defined_source_dir = _resolve_source_directory() # If is in mldesigner component execution process, skip resolve file path. EXECUTOR_CLASS = ExecutorBase._get_executor_class() environment = EXECUTOR_CLASS._refine_environment(environment, decorator_defined_source_dir) if code: # Resolve code source immediately if defined with code. code = Path(decorator_defined_source_dir / code).resolve().absolute().as_posix() entity_args = { k: v for k, v in locals().items() if v is not None and k in inspect.signature(command_component).parameters } # func is not necessary for component entity entity_args.pop("func", None) def component_func_decorator(func: _TFunc) -> _TFunc: nonlocal entity_args executor, raw_func, entity_args = _create_executor( EXECUTOR_CLASS=EXECUTOR_CLASS, func=func, code=code, entity_args=entity_args ) _component_func = None @functools.wraps(raw_func) def wrapper(*args, **kwargs): nonlocal _component_func, executor if executor._is_variable_inputs: variable_inputs_executor = copy.copy(executor) variable_inputs_executor, func_kwargs = _update_executor_inputs_by_values( kwargs, raw_func, variable_inputs_executor ) if EXECUTOR_CLASS == ComponentExecutor: # Convert inputs to key-value dict. variable_inputs_dict = ( inspect.signature(variable_inputs_executor._func).bind_partial(*args, **kwargs).arguments ) kwargs_param = next( filter( lambda param: param.kind in [param.VAR_KEYWORD], inspect.signature(variable_inputs_executor._func).parameters.values(), ) ) inputs_kwargs = variable_inputs_dict.pop(kwargs_param.name, {}) variable_inputs_dict.update(inputs_kwargs) variable_inputs_executor._execution_args = dict(variable_inputs_dict) return variable_inputs_executor _variable_inputs_component_func = EXECUTOR_CLASS._get_generate_component_function( variable_inputs_executor.component ) return _variable_inputs_component_func(*args, **func_kwargs) # pylint: disable=not-callable if not _component_func: _component_func = ( # If used in standalone mode, return the executor, otherwise return a component function. executor if EXECUTOR_CLASS == ComponentExecutor else EXECUTOR_CLASS._get_generate_component_function(executor.component) ) return _component_func(*args, **kwargs) wrapper._is_mldesigner_component = True wrapper._executor = executor if EXECUTOR_CLASS != ComponentExecutor: wrapper.component = executor.component return wrapper # enable using decorator without "()" if all arguments are default values if func is not None: return component_func_decorator(func) return component_func_decorator
(func=None, *, name=None, version=None, display_name=None, description=None, is_deterministic=None, tags=None, environment: Union[str, dict, os.PathLike, ForwardRef('Environment')] = None, distribution: Union[dict, ForwardRef('PyTorchDistribution'), ForwardRef('MpiDistribution'), ForwardRef('TensorFlowDistribution')] = None, resources: Union[dict, ForwardRef('ResourceConfiguration')] = None, code: Union[str, os.PathLike] = None)
720,220
mldesigner._compile._compile
compile
Compile sdk-defined components/pipelines to yaml files, or build yaml components/pipelines with snapshot. A component can be defined through sdk using @mldesigner.command_component decorator, and a pipeline can be defined using @dsl.pipeline decorator. Such components or pipelines can be compiled into yaml files with mldesigner compile function. When the input is already a yaml component/pipeline, "output" parameter is required and mldesigner will build a fully-resolved component/pipeline into this specified output folder. :param source: Source file or objects to be compiled. Could one of below types: * SDK-defined component/pipeline function: The decorated function object. * File path with suffix '.py' : The file that contains sdk-defined components or pipelines. * File path with suffix '.yaml'(or .yml) : The component/pipeline yaml file. :type source: Union[str, FunctionType] :param name: The name of target component/pipeline to be compiled. :type name: str :param output: The folder in which to put compiled results. * If not specified, compiled files are in the same folder with source file. * If specified, compiled component with its snapshot are in ./{output_folder}/{component_name}/ :type output: str :param ignore_file: The file path that contains ignore patterns, determines what files will be ignored during compilation. Only supports '.gitignore' and '.amlignore' file. By default, the compilation will use ignore files, in the component code folder. If specified, the specified ignore file will be used COMBINED with original ignore files in the component code folder. :type ignore_file: Union[str, Path] :param debug: Determines whether to show detailed debug information, default to be false. :type debug: bool
def compile( source: Union[str, FunctionType], *, name=None, output=None, ignore_file=None, debug=False, **kwargs, ): """Compile sdk-defined components/pipelines to yaml files, or build yaml components/pipelines with snapshot. A component can be defined through sdk using @mldesigner.command_component decorator, and a pipeline can be defined using @dsl.pipeline decorator. Such components or pipelines can be compiled into yaml files with mldesigner compile function. When the input is already a yaml component/pipeline, "output" parameter is required and mldesigner will build a fully-resolved component/pipeline into this specified output folder. :param source: Source file or objects to be compiled. Could one of below types: * SDK-defined component/pipeline function: The decorated function object. * File path with suffix '.py' : The file that contains sdk-defined components or pipelines. * File path with suffix '.yaml'(or .yml) : The component/pipeline yaml file. :type source: Union[str, FunctionType] :param name: The name of target component/pipeline to be compiled. :type name: str :param output: The folder in which to put compiled results. * If not specified, compiled files are in the same folder with source file. * If specified, compiled component with its snapshot are in ./{output_folder}/{component_name}/ :type output: str :param ignore_file: The file path that contains ignore patterns, determines what files will be ignored during compilation. Only supports '.gitignore' and '.amlignore' file. By default, the compilation will use ignore files, in the component code folder. If specified, the specified ignore file will be used COMBINED with original ignore files in the component code folder. :type ignore_file: Union[str, Path] :param debug: Determines whether to show detailed debug information, default to be false. :type debug: bool """ # import locally so generate package interface don't depend on azure-ai-ml try: from mldesigner._compile._compile_impl import _compile except ImportError as e: err_msg = ( "Please install compile extra dependencies by running `pip install mldesigner[pipeline]`, " f"currently got {e}" ) raise UserErrorException(err_msg) return _compile(source=source, name=name, output=output, ignore_file=ignore_file, debug=debug, **kwargs)
(source: Union[str, function], *, name=None, output=None, ignore_file=None, debug=False, **kwargs)
720,221
mldesigner._execute._execute
execute
Execute a mldesigner component node. A mldesigner component node can be generated by calling the @command_component decorated function. The necessary inputs and outputs are passed during the calling process. Returned result will be a dictionary that contains function execution result, output file/folder paths if defined. .. note:: The following code shows a component function definition. .. code-block:: python @command_component() def train_model( input_data : Input(type="uri_folder"), lr: int, max_epocs = 10, my_output_file: Output(type="uri_file") = None, my_output_folder: Output(type="uri_folder") = None, ): pass Now let's see how to execute above component .. code-block:: python from mldesigner import execute from xxx import train_model node = train_model(input_data="path0", lr=0.5) res = execute(node) The returned result will be a dict: res -> { "my_output_file" : "{working_directory}/train_model_{timestamp}/my_output_file", "my_output_folder": "{working_directory}/train_model_{timestamp}/my_output_folder", } :param source: The component source file or an executable component node. :type source: Union[str, PathLike, ExecutorBase, "Command", "Parallel"] :return: The return dict that contains function execution result, output file/folder paths
def execute(source: Union[ExecutorBase, "Command", "Parallel"]): """Execute a mldesigner component node. A mldesigner component node can be generated by calling the @command_component decorated function. The necessary inputs and outputs are passed during the calling process. Returned result will be a dictionary that contains function execution result, output file/folder paths if defined. .. note:: The following code shows a component function definition. .. code-block:: python @command_component() def train_model( input_data : Input(type="uri_folder"), lr: int, max_epocs = 10, my_output_file: Output(type="uri_file") = None, my_output_folder: Output(type="uri_folder") = None, ): pass Now let's see how to execute above component .. code-block:: python from mldesigner import execute from xxx import train_model node = train_model(input_data="path0", lr=0.5) res = execute(node) The returned result will be a dict: res -> { "my_output_file" : "{working_directory}/train_model_{timestamp}/my_output_file", "my_output_folder": "{working_directory}/train_model_{timestamp}/my_output_folder", } :param source: The component source file or an executable component node. :type source: Union[str, PathLike, ExecutorBase, "Command", "Parallel"] :return: The return dict that contains function execution result, output file/folder paths """ return _execute(source)
(source: Union[mldesigner._component_executor.ExecutorBase, ForwardRef('Command'), ForwardRef('Parallel')])
720,222
mldesigner._generate._generate_package
generate
For a set of source assets, generate a python module which contains component consumption functions and import it for use. Supported source types: - components: component consumption functions :param source: List[source_identifier], dict[module_relative_path, List[source_identifier]] or str * None: we will generate a module with ml_client.from_config() if source not specified, not supported for now. * list example: specify as source pattern list and we will generate modules .. code-block:: python # workspace source assets, module name will be workspace name source = ["azureml://subscriptions/{subscription_id}/resourcegroups/{resource_group}/ workspaces/{workspace_name}"] # registry source assets, module name will be registry name source = ["azureml://registries/HuggingFace"] # local source assets, module name will be "components" source = ["components/**/component_spec.yaml"] * dict example: component module name relative path as key and List[source_identifier] as value .. code-block:: python # module name with a source identifier source = {"path/to/component/module": "azureml://subscriptions/{subscription_id}/" "resourcegroups/{resource_group}/workspaces/{workspace_name}"} # module name with a list of source identifier source = {"path/to/component/module": ["azureml://subscriptions/{subscription_id}/" "resourcegroups/{resource_group}/workspaces/{workspace_name}", "components/**/component_spec.yaml"]} * str example: mldesigner.yaml, config file which contains the source dict .. note:: module_relative_path: relative path of generate component module * When package name not provided, component module name relative path will relative to current folder * When package name is provided, component module name relative path will relative to generated package folder components: single or list of glob string which specify a set of components. Example values: * source assets from workspace 1. all source assets ``azureml://subscriptions/{subscription_id}/resource_group/{resource_group}/ workspaces/{workspace_name}`` 2. components with name filter ``azureml://subscriptions/{subscription_id}/resource_group/{resource_group} /workspaces/{workspace_name}/components/microsoft_samples_*`` * components from local yaml ``components/**/component_spec.yaml`` * components from registries For registry concept, please see: `https://aka.ms/azuremlsharing`. azureml://registries/HuggingFace # All source assets in registry HuggingFace. azureml://registries/HuggingFace/components/Microsoft* :type source: typing.Union[list, dict, str] :param package_name: name of the generated python package. Example: cool-component-package * If specified: we generate the module file to specified package. * If the cool-component-package folder does not exists, we will create a new skeleton package under ./cool-component-package and print info in command line and ask user to do: ``pip install -e ./cool-component-package`` Then next user can do: 'from cool.component.package import component_func' * If the folder exists, we will try to update component folders inside . * If not specified, we generate the module directory under current directory. :type package_name: str :param force_regenerate: whether to force regenerate the python module file. * If True, will always regenerate component folder. * If False, will reuse previous generated file. If the existing file not valid, raise import error. :type force_regenerate: bool :param kwargs: A dictionary of additional configuration parameters. :type kwargs: dict
def generate( *, source: Union[list, dict, str], package_name: str = None, force_regenerate: bool = False, **kwargs, ) -> None: """For a set of source assets, generate a python module which contains component consumption functions and import it for use. Supported source types: - components: component consumption functions :param source: List[source_identifier], dict[module_relative_path, List[source_identifier]] or str * None: we will generate a module with ml_client.from_config() if source not specified, not supported for now. * list example: specify as source pattern list and we will generate modules .. code-block:: python # workspace source assets, module name will be workspace name source = ["azureml://subscriptions/{subscription_id}/resourcegroups/{resource_group}/ workspaces/{workspace_name}"] # registry source assets, module name will be registry name source = ["azureml://registries/HuggingFace"] # local source assets, module name will be "components" source = ["components/**/component_spec.yaml"] * dict example: component module name relative path as key and List[source_identifier] as value .. code-block:: python # module name with a source identifier source = {"path/to/component/module": "azureml://subscriptions/{subscription_id}/" "resourcegroups/{resource_group}/workspaces/{workspace_name}"} # module name with a list of source identifier source = {"path/to/component/module": ["azureml://subscriptions/{subscription_id}/" "resourcegroups/{resource_group}/workspaces/{workspace_name}", "components/**/component_spec.yaml"]} * str example: mldesigner.yaml, config file which contains the source dict .. note:: module_relative_path: relative path of generate component module * When package name not provided, component module name relative path will relative to current folder * When package name is provided, component module name relative path will relative to generated package folder components: single or list of glob string which specify a set of components. Example values: * source assets from workspace 1. all source assets ``azureml://subscriptions/{subscription_id}/resource_group/{resource_group}/ workspaces/{workspace_name}`` 2. components with name filter ``azureml://subscriptions/{subscription_id}/resource_group/{resource_group} /workspaces/{workspace_name}/components/microsoft_samples_*`` * components from local yaml ``components/**/component_spec.yaml`` * components from registries For registry concept, please see: `https://aka.ms/azuremlsharing`. azureml://registries/HuggingFace # All source assets in registry HuggingFace. azureml://registries/HuggingFace/components/Microsoft* :type source: typing.Union[list, dict, str] :param package_name: name of the generated python package. Example: cool-component-package * If specified: we generate the module file to specified package. * If the cool-component-package folder does not exists, we will create a new skeleton package under ./cool-component-package and print info in command line and ask user to do: ``pip install -e ./cool-component-package`` Then next user can do: 'from cool.component.package import component_func' * If the folder exists, we will try to update component folders inside . * If not specified, we generate the module directory under current directory. :type package_name: str :param force_regenerate: whether to force regenerate the python module file. * If True, will always regenerate component folder. * If False, will reuse previous generated file. If the existing file not valid, raise import error. :type force_regenerate: bool :param kwargs: A dictionary of additional configuration parameters. :type kwargs: dict """ # import locally so generate package interface don't depend on azure-ai-ml try: from mldesigner._generate._generate_package_impl import _generate except ImportError as e: err_msg = ( "Please install generate extra dependencies by running `pip install mldesigner[pipeline]`, " f"currently got {e}" ) raise UserErrorException(err_msg) return _generate(source=source, package_name=package_name, force_regenerate=force_regenerate, **kwargs)
(*, source: Union[list, dict, str], package_name: Optional[str] = None, force_regenerate: bool = False, **kwargs) -> NoneType
720,223
mldesigner._get_io_context
get_io_context
Get `IOContext` that contains component outputs information during runtime. Outputs can be accessed via `.outputs` from `IOContext` object. Early available output can be marked as ready with below code. .. code-block:: python from mldesigner import get_io_context ctx = get_io_context() ctx.outputs.output = "meta.txt" ctx.outputs.output.ready() # multiple outputs to mark ready # omit the lines assign values ctx.outputs.output1.ready() ctx.outputs.output2.ready()
def get_io_context() -> IOContext: """Get `IOContext` that contains component outputs information during runtime. Outputs can be accessed via `.outputs` from `IOContext` object. Early available output can be marked as ready with below code. .. code-block:: python from mldesigner import get_io_context ctx = get_io_context() ctx.outputs.output = "meta.txt" ctx.outputs.output.ready() # multiple outputs to mark ready # omit the lines assign values ctx.outputs.output1.ready() ctx.outputs.output2.ready() """ return IOContext()
() -> mldesigner._get_io_context.IOContext
720,224
mldesigner._get_root_pipeline_context
get_root_pipeline_context
Get root pipeline job information, including init/execution stage status and start/end time. Both init and execution stage are optional: for pipeline job without init job, init stage will be None; for pipeline job whose init job fails, execution stage will be None. This function will only work during runtime. .. code-block:: python from mldesigner import get_root_pipeline_context context = get_root_pipeline_context() root_job_name = context.root_job_name print("root pipeline job name:", root_job_name) # stage info, including statue, start/end time # note that init_stage and execution_stage can be None in some scenarios init_stage = context.stages["Initialization"] execution_stage = context.stages["Execution"] print("execution stage status:", execution_stage.status) print("execution stage start time:", execution_stage.start_time) print("execution stage end time:", execution_stage.end_time)
def get_root_pipeline_context() -> PipelineContext: """Get root pipeline job information, including init/execution stage status and start/end time. Both init and execution stage are optional: for pipeline job without init job, init stage will be None; for pipeline job whose init job fails, execution stage will be None. This function will only work during runtime. .. code-block:: python from mldesigner import get_root_pipeline_context context = get_root_pipeline_context() root_job_name = context.root_job_name print("root pipeline job name:", root_job_name) # stage info, including statue, start/end time # note that init_stage and execution_stage can be None in some scenarios init_stage = context.stages["Initialization"] execution_stage = context.stages["Execution"] print("execution stage status:", execution_stage.status) print("execution stage start time:", execution_stage.start_time) print("execution stage end time:", execution_stage.end_time) """ def _get_root_job_properties() -> typing.Dict: try: import mlflow from mlflow.tracking import MlflowClient from mlflow.utils.rest_utils import http_request except ImportError as e: error_message = "mlflow is required for `get_root_pipeline_context`, please install mlflow first." raise ImportError(error_message) from e def _get_job_properties(_cred, _experiment_id: str, _job_id: str) -> typing.Dict: return http_request( host_creds=_cred, endpoint="/experimentids/{}/runs/{}".format(_experiment_id, _job_id), method="GET", ).json() with mlflow.start_run() as run: client = MlflowClient() # get auth & update host to run history cred = client._tracking_client.store.get_host_creds() cred.host = cred.host.replace( "api.azureml.ms", "experiments.azureml.net", ).replace("mlflow/v1.0", "history/v1.0") # get finalize job properties first to get root job id finalize_job_properties = _get_job_properties(cred, run.info.experiment_id, run.info.run_id) root_job_id = finalize_job_properties["rootRunId"] return _get_job_properties(cred, run.info.experiment_id, root_job_id) return PipelineContext._from_job_properties(_get_root_job_properties())
() -> mldesigner._get_root_pipeline_context.PipelineContext
720,225
mldesigner._reference_component
reference_component
Reference an existing component with a function and return a component node built with given params. The referenced component can be defined with local yaml file or in remote with name and version. The returned component node type are hint with function return annotation and default to Command. If the referenced component is local component, it'll be registered as anonymous component in pipeline's workspace. If the referenced component is workspace component, we assume it has been registered in pipeline's workspace. If the referenced component is registry component, it'll still be referenced from registry in pipeline. Eg: Both .. code-block:: python @reference_component() def my_func(): ... and .. code-block:: python @reference_component() def my_func() -> Command: ... with return a Command node. .. code-block:: python @reference_component() def my_func() -> Parallel: ... will return a Parallel node. :param path: Path to local component file. :type path: str :param name: Name of component to load. :type name: str :param version: Version of component to load. :type version: str :param registry: Registry of component's source. None means it's not a registry component. :type registry: str :return: Component node. :rtype: Union[Command, Parallel]
def reference_component(path: Union[PathLike, str] = None, name=None, version=None, registry=None, **kwargs) -> _TFunc: """Reference an existing component with a function and return a component node built with given params. The referenced component can be defined with local yaml file or in remote with name and version. The returned component node type are hint with function return annotation and default to Command. If the referenced component is local component, it'll be registered as anonymous component in pipeline's workspace. If the referenced component is workspace component, we assume it has been registered in pipeline's workspace. If the referenced component is registry component, it'll still be referenced from registry in pipeline. Eg: Both .. code-block:: python @reference_component() def my_func(): ... and .. code-block:: python @reference_component() def my_func() -> Command: ... with return a Command node. .. code-block:: python @reference_component() def my_func() -> Parallel: ... will return a Parallel node. :param path: Path to local component file. :type path: str :param name: Name of component to load. :type name: str :param version: Version of component to load. :type version: str :param registry: Registry of component's source. None means it's not a registry component. :type registry: str :return: Component node. :rtype: Union[Command, Parallel] """ def component_decorator(func: _TFunc) -> _TFunc: @wraps(func) def wrapper(*args, **inner_kwargs): from mldesigner._azure_ai_ml import Command from mldesigner._component_loader import ComponentLoader, ComponentsConfig from mldesigner._exceptions import UserErrorException from mldesigner._generate._generators._constants import COMPONENT_TO_NODE if args: raise UserErrorException( message="`reference_component` wrapped function only accept keyword parameters." ) # handle params case insensitively, raise error when unknown kwargs are passed _assert_arg_valid(inner_kwargs, func.__code__.co_varnames, func_name=func.__name__) # currently component loader only support load 1 component in reference_component # create a component loader only contain 1 component config with function name as key component_loader = ComponentLoader( components_config=ComponentsConfig.create_single_component_config( key=func.__name__, path=path, name=name, version=version, registry=registry ), default_component_loader_config_path=DEFAULT_COMPONENT_LOADER_CONFIG_PATH, ) component = component_loader.load_component(name=func.__name__) if hasattr(component, "type") and component.type in COMPONENT_TO_NODE: node = component(**inner_kwargs) else: result_cls = get_type_hints(func).get("return", Command) # Get component outputs result_properties = get_type_hints(result_cls) outputs = {} if "outputs" in result_properties: from mldesigner._azure_ai_ml import Output outputs_cls = get_type_hints(result_cls)["outputs"] # Get output type by source code. output_types = {k: _convert_internal_type(v) for k, v in _detect_output_types(outputs_cls)} annotation_outputs = get_type_hints(outputs_cls) outputs = {k: Output(type=output_types.get(k, None)) for k, v in annotation_outputs.items()} # supported return annotations, traverse in order # Note: make sure no base node in supported_cls supported_cls = COMPONENT_TO_NODE.values() for cls in supported_cls: if issubclass(result_cls, cls): result_cls = cls if result_cls not in supported_cls: msg = ( f"Return annotation of `reference_component` wrapped function can only be {supported_cls} " f"or its subclass, got {result_cls} instead." ) raise UserErrorException(message=msg) # This node will be init with inner_kwargs and push to pipeline stack node = result_cls(component=component, inputs=inner_kwargs, outputs=outputs, _from_component_func=True) # Update remote component output types if name and version: for output_name, output in node.outputs.items(): if output_name in outputs: # TODO enable update primitive type output output.type = outputs[output_name].type node = component_loader.apply_post_load_func(node=node) return node return wrapper return component_decorator
(path: Union[os.PathLike, str, NoneType] = None, name=None, version=None, registry=None, **kwargs) -> ~_TFunc
720,226
ete3.coretype.arraytable
ArrayTable
This object is thought to work with matrix datasets (like microarrays). It allows to load the matrix an access easily to row and column vectors.
class ArrayTable(object): """This object is thought to work with matrix datasets (like microarrays). It allows to load the matrix an access easily to row and column vectors. """ def __repr__(self): return "ArrayTable (%s)" %hex(self.__hash__()) def __str__(self): return str(self.matrix) def __init__(self, matrix_file=None, mtype="float"): self.colNames = [] self.rowNames = [] self.colValues = {} self.rowValues = {} self.matrix = None self.mtype = None # If matrix file is supplied if matrix_file is not None: read_arraytable(matrix_file, \ mtype=mtype, \ arraytable_object = self) def get_row_vector(self,rowname): """ Returns the vector associated to the given row name """ return self.rowValues.get(rowname,None) def get_column_vector(self,colname): """ Returns the vector associated to the given column name """ return self.colValues.get(colname,None) def get_several_column_vectors(self,colnames): """ Returns a list of vectors associated to several column names """ vectors = [self.colValues[cname] for cname in colnames] return numpy.array(vectors) def get_several_row_vectors(self,rownames): """ Returns a list vectors associated to several row names """ vectors = [self.rowValues[rname] for rname in rownames] return numpy.array(vectors) def remove_column(self,colname): """Removes the given column form the current dataset """ col_value = self.colValues.pop(colname, None) if col_value is not None: new_indexes = list(range(len(self.colNames))) index = self.colNames.index(colname) self.colNames.pop(index) new_indexes.pop(index) newmatrix = self.matrix.swapaxes(0,1) newmatrix = newmatrix[new_indexes].swapaxes(0,1) self._link_names2matrix(newmatrix) def merge_columns(self, groups, grouping_criterion): """ Returns a new ArrayTable object in which columns are merged according to a given criterion. 'groups' argument must be a dictionary in which keys are the new column names, and each value is the list of current column names to be merged. 'grouping_criterion' must be 'min', 'max' or 'mean', and defines how numeric values will be merged. Example: my_groups = {'NewColumn':['column5', 'column6']} new_Array = Array.merge_columns(my_groups, 'max') """ if grouping_criterion == "max": grouping_f = get_max_vector elif grouping_criterion == "min": grouping_f = get_min_vector elif grouping_criterion == "mean": grouping_f = get_mean_vector else: raise ValueError("grouping_criterion not supported. Use max|min|mean ") grouped_array = self.__class__() grouped_matrix = [] colNames = [] alltnames = set([]) for gname,tnames in six.iteritems(groups): all_vectors=[] for tn in tnames: if tn not in self.colValues: raise ValueError(str(tn)+" column not found.") if tn in alltnames: raise ValueError(str(tn)+" duplicated column name for merging") alltnames.add(tn) vector = self.get_column_vector(tn).astype(float) all_vectors.append(vector) # Store the group vector = max expression of all items in group grouped_matrix.append(grouping_f(all_vectors)) # store group name colNames.append(gname) for cname in self.colNames: if cname not in alltnames: grouped_matrix.append(self.get_column_vector(cname)) colNames.append(cname) grouped_array.rowNames= self.rowNames grouped_array.colNames= colNames vmatrix = numpy.array(grouped_matrix).transpose() grouped_array._link_names2matrix(vmatrix) return grouped_array def transpose(self): """ Returns a new ArrayTable in which current matrix is transposed. """ transposedA = self.__class__() transposedM = self.matrix.transpose() transposedA.colNames = list(self.rowNames) transposedA.rowNames = list(self.colNames) transposedA._link_names2matrix(transposedM) # Check that everything is ok # for n in self.colNames: # print self.get_column_vector(n) == transposedA.get_row_vector(n) # for n in self.rowNames: # print self.get_row_vector(n) == transposedA.get_column_vector(n) return transposedA def _link_names2matrix(self, m): """ Synchronize curent column and row names to the given matrix""" if len(self.rowNames) != m.shape[0]: raise ValueError("Expecting matrix with %d rows" % m.size[0]) if len(self.colNames) != m.shape[1]: raise ValueError("Expecting matrix with %d columns" % m.size[1]) self.matrix = m self.colValues.clear() self.rowValues.clear() # link columns names to vectors i = 0 for colname in self.colNames: self.colValues[colname] = self.matrix[:,i] i+=1 # link row names to vectors i = 0 for rowname in self.rowNames: self.rowValues[rowname] = self.matrix[i,:] i+=1 def write(self, fname, colnames=None): write_arraytable(self, fname, colnames=colnames)
(matrix_file=None, mtype='float')
720,227
ete3.coretype.arraytable
__init__
null
def __init__(self, matrix_file=None, mtype="float"): self.colNames = [] self.rowNames = [] self.colValues = {} self.rowValues = {} self.matrix = None self.mtype = None # If matrix file is supplied if matrix_file is not None: read_arraytable(matrix_file, \ mtype=mtype, \ arraytable_object = self)
(self, matrix_file=None, mtype='float')
720,228
ete3.coretype.arraytable
__repr__
null
def __repr__(self): return "ArrayTable (%s)" %hex(self.__hash__())
(self)
720,229
ete3.coretype.arraytable
__str__
null
def __str__(self): return str(self.matrix)
(self)
720,230
ete3.coretype.arraytable
_link_names2matrix
Synchronize curent column and row names to the given matrix
def _link_names2matrix(self, m): """ Synchronize curent column and row names to the given matrix""" if len(self.rowNames) != m.shape[0]: raise ValueError("Expecting matrix with %d rows" % m.size[0]) if len(self.colNames) != m.shape[1]: raise ValueError("Expecting matrix with %d columns" % m.size[1]) self.matrix = m self.colValues.clear() self.rowValues.clear() # link columns names to vectors i = 0 for colname in self.colNames: self.colValues[colname] = self.matrix[:,i] i+=1 # link row names to vectors i = 0 for rowname in self.rowNames: self.rowValues[rowname] = self.matrix[i,:] i+=1
(self, m)
720,231
ete3.coretype.arraytable
get_column_vector
Returns the vector associated to the given column name
def get_column_vector(self,colname): """ Returns the vector associated to the given column name """ return self.colValues.get(colname,None)
(self, colname)
720,232
ete3.coretype.arraytable
get_row_vector
Returns the vector associated to the given row name
def get_row_vector(self,rowname): """ Returns the vector associated to the given row name """ return self.rowValues.get(rowname,None)
(self, rowname)
720,233
ete3.coretype.arraytable
get_several_column_vectors
Returns a list of vectors associated to several column names
def get_several_column_vectors(self,colnames): """ Returns a list of vectors associated to several column names """ vectors = [self.colValues[cname] for cname in colnames] return numpy.array(vectors)
(self, colnames)
720,234
ete3.coretype.arraytable
get_several_row_vectors
Returns a list vectors associated to several row names
def get_several_row_vectors(self,rownames): """ Returns a list vectors associated to several row names """ vectors = [self.rowValues[rname] for rname in rownames] return numpy.array(vectors)
(self, rownames)
720,235
ete3.coretype.arraytable
merge_columns
Returns a new ArrayTable object in which columns are merged according to a given criterion. 'groups' argument must be a dictionary in which keys are the new column names, and each value is the list of current column names to be merged. 'grouping_criterion' must be 'min', 'max' or 'mean', and defines how numeric values will be merged. Example: my_groups = {'NewColumn':['column5', 'column6']} new_Array = Array.merge_columns(my_groups, 'max')
def merge_columns(self, groups, grouping_criterion): """ Returns a new ArrayTable object in which columns are merged according to a given criterion. 'groups' argument must be a dictionary in which keys are the new column names, and each value is the list of current column names to be merged. 'grouping_criterion' must be 'min', 'max' or 'mean', and defines how numeric values will be merged. Example: my_groups = {'NewColumn':['column5', 'column6']} new_Array = Array.merge_columns(my_groups, 'max') """ if grouping_criterion == "max": grouping_f = get_max_vector elif grouping_criterion == "min": grouping_f = get_min_vector elif grouping_criterion == "mean": grouping_f = get_mean_vector else: raise ValueError("grouping_criterion not supported. Use max|min|mean ") grouped_array = self.__class__() grouped_matrix = [] colNames = [] alltnames = set([]) for gname,tnames in six.iteritems(groups): all_vectors=[] for tn in tnames: if tn not in self.colValues: raise ValueError(str(tn)+" column not found.") if tn in alltnames: raise ValueError(str(tn)+" duplicated column name for merging") alltnames.add(tn) vector = self.get_column_vector(tn).astype(float) all_vectors.append(vector) # Store the group vector = max expression of all items in group grouped_matrix.append(grouping_f(all_vectors)) # store group name colNames.append(gname) for cname in self.colNames: if cname not in alltnames: grouped_matrix.append(self.get_column_vector(cname)) colNames.append(cname) grouped_array.rowNames= self.rowNames grouped_array.colNames= colNames vmatrix = numpy.array(grouped_matrix).transpose() grouped_array._link_names2matrix(vmatrix) return grouped_array
(self, groups, grouping_criterion)
720,236
ete3.coretype.arraytable
remove_column
Removes the given column form the current dataset
def remove_column(self,colname): """Removes the given column form the current dataset """ col_value = self.colValues.pop(colname, None) if col_value is not None: new_indexes = list(range(len(self.colNames))) index = self.colNames.index(colname) self.colNames.pop(index) new_indexes.pop(index) newmatrix = self.matrix.swapaxes(0,1) newmatrix = newmatrix[new_indexes].swapaxes(0,1) self._link_names2matrix(newmatrix)
(self, colname)
720,237
ete3.coretype.arraytable
transpose
Returns a new ArrayTable in which current matrix is transposed.
def transpose(self): """ Returns a new ArrayTable in which current matrix is transposed. """ transposedA = self.__class__() transposedM = self.matrix.transpose() transposedA.colNames = list(self.rowNames) transposedA.rowNames = list(self.colNames) transposedA._link_names2matrix(transposedM) # Check that everything is ok # for n in self.colNames: # print self.get_column_vector(n) == transposedA.get_row_vector(n) # for n in self.rowNames: # print self.get_row_vector(n) == transposedA.get_column_vector(n) return transposedA
(self)
720,238
ete3.coretype.arraytable
write
null
def write(self, fname, colnames=None): write_arraytable(self, fname, colnames=colnames)
(self, fname, colnames=None)
720,239
ete3.clustering.clustertree
ClusterNode
Creates a new Cluster Tree object, which is a collection of ClusterNode instances connected in a hierarchical way, and representing a clustering result. a newick file or string can be passed as the first argument. An ArrayTable file or instance can be passed as a second argument. Examples: t1 = Tree() # creates an empty tree t2 = Tree( '(A:1,(B:1,(C:1,D:1):0.5):0.5);' ) t3 = Tree( '/home/user/myNewickFile.txt' )
class ClusterNode(TreeNode): """ Creates a new Cluster Tree object, which is a collection of ClusterNode instances connected in a hierarchical way, and representing a clustering result. a newick file or string can be passed as the first argument. An ArrayTable file or instance can be passed as a second argument. Examples: t1 = Tree() # creates an empty tree t2 = Tree( '(A:1,(B:1,(C:1,D:1):0.5):0.5);' ) t3 = Tree( '/home/user/myNewickFile.txt' ) """ def _set_forbidden(self, value): raise ValueError("This attribute can not be manually set.") def _get_intra(self): if self._silhouette is None: self.get_silhouette() return self._intracluster_dist def _get_inter(self): if self._silhouette is None: self.get_silhouette() return self._intercluster_dist def _get_silh(self): if self._silhouette is None: self.get_silhouette() return self._silhouette def _get_prof(self): if self._profile is None: self._calculate_avg_profile() return self._profile def _get_std(self): if self._std_profile is None: self._calculate_avg_profile() return self._std_profile def _set_profile(self, value): self._profile = value intracluster_dist = property(fget=_get_intra, fset=_set_forbidden) intercluster_dist = property(fget=_get_inter, fset=_set_forbidden) silhouette = property(fget=_get_silh, fset=_set_forbidden) profile = property(fget=_get_prof, fset=_set_profile) deviation = property(fget=_get_std, fset=_set_forbidden) def __init__(self, newick = None, text_array = None, \ fdist=clustvalidation.default_dist): # Default dist is spearman_dist when scipy module is loaded # otherwise, it is set to euclidean_dist. # Initialize basic tree features and loads the newick (if any) TreeNode.__init__(self, newick) self._fdist = None self._silhouette = None self._intercluster_dist = None self._intracluster_dist = None self._profile = None self._std_profile = None # Cluster especific features self.features.add("intercluster_dist") self.features.add("intracluster_dist") self.features.add("silhouette") self.features.add("profile") self.features.add("deviation") # Initialize tree with array data if text_array: self.link_to_arraytable(text_array) if newick: self.set_distance_function(fdist) def __repr__(self): return "ClusterTree node (%s)" %hex(self.__hash__()) def set_distance_function(self, fn): """ Sets the distance function used to calculate cluster distances and silouette index. ARGUMENTS: fn: a pointer to python function acepting two arrays (numpy) as arguments. EXAMPLE: # A simple euclidean distance my_dist_fn = lambda x,y: abs(x-y) tree.set_distance_function(my_dist_fn) """ for n in self.traverse(): n._fdist = fn n._silhouette = None n._intercluster_dist = None n._intracluster_dist = None def link_to_arraytable(self, arraytbl): """ Allows to link a given arraytable object to the tree structure under this node. Row names in the arraytable object are expected to match leaf names. Returns a list of nodes for with profiles could not been found in arraytable. """ # Initialize tree with array data if type(arraytbl) == ArrayTable: array = arraytbl else: array = ArrayTable(arraytbl) missing_leaves = [] matrix_values = [i for r in range(len(array.matrix))\ for i in array.matrix[r] if numpy.isfinite(i)] array._matrix_min = min(matrix_values) array._matrix_max = max(matrix_values) for n in self.traverse(): n.arraytable = array if n.is_leaf() and n.name in array.rowNames: n._profile = array.get_row_vector(n.name) elif n.is_leaf(): n._profile = [numpy.nan]*len(array.colNames) missing_leaves.append(n) if len(missing_leaves)>0: print("""[%d] leaf names could not be mapped to the matrix rows.""" %\ len(missing_leaves), file=stderr) self.arraytable = array def iter_leaf_profiles(self): """ Returns an iterator over all the profiles associated to the leaves under this node.""" for l in self.iter_leaves(): yield l.get_profile()[0] def get_leaf_profiles(self): """ Returns the list of all the profiles associated to the leaves under this node.""" return [l.get_profile()[0] for l in self.iter_leaves()] def get_silhouette(self, fdist=None): """ Calculates the node's silhouette value by using a given distance function. By default, euclidean distance is used. It also calculates the deviation profile, mean profile, and inter/intra-cluster distances. It sets the following features into the analyzed node: - node.intracluster - node.intercluster - node.silhouete intracluster distances a(i) are calculated as the Centroid Diameter intercluster distances b(i) are calculated as the Centroid linkage distance ** Rousseeuw, P.J. (1987) Silhouettes: A graphical aid to the interpretation and validation of cluster analysis. J. Comput. Appl. Math., 20, 53-65. """ if fdist is None: fdist = self._fdist # Updates internal values self._silhouette, self._intracluster_dist, self._intercluster_dist = \ clustvalidation.get_silhouette_width(fdist, self) # And returns them return self._silhouette, self._intracluster_dist, self._intercluster_dist def get_dunn(self, clusters, fdist=None): """ Calculates the Dunn index for the given set of descendant nodes. """ if fdist is None: fdist = self._fdist nodes = _translate_nodes(self, *clusters) return clustvalidation.get_dunn_index(fdist, *nodes) def _calculate_avg_profile(self): """ This internal function updates the mean profile associated to an internal node. """ # Updates internal values self._profile, self._std_profile = clustvalidation.get_avg_profile(self)
(newick=None, text_array=None, fdist=<function spearman_dist at 0x7ff39b713d90>)
720,240
ete3.coretype.tree
__add__
This allows to sum two trees.
def __add__(self, value): """ This allows to sum two trees.""" # Should a make the sum with two copies of the original trees? if type(value) == self.__class__: new_root = self.__class__() new_root.add_child(self) new_root.add_child(value) return new_root else: raise TreeError("Invalid node type")
(self, value)
720,241
ete3.coretype.tree
__and__
This allows to execute tree&'A' to obtain the descendant node whose name is A
def __and__(self, value): """ This allows to execute tree&'A' to obtain the descendant node whose name is A""" value=str(value) try: first_match = next(self.iter_search_nodes(name=value)) return first_match except StopIteration: raise TreeError("Node not found")
(self, value)
720,242
ete3.coretype.tree
__bool__
Python3's equivalent of __nonzero__ If this is not defined bool(class_instance) will call __len__ in python3
def __bool__(self): """ Python3's equivalent of __nonzero__ If this is not defined bool(class_instance) will call __len__ in python3 """ return True
(self)
720,243
ete3.coretype.tree
__contains__
Check if item belongs to this node. The 'item' argument must be a node instance or its associated name.
def __contains__(self, item): """ Check if item belongs to this node. The 'item' argument must be a node instance or its associated name.""" if isinstance(item, self.__class__): return item in set(self.get_descendants()) elif type(item)==str: return item in set([n.name for n in self.traverse()])
(self, item)