project: connector_plain_list_project_id: [] created_at: '2022-09-15T11:08:45.173759+00:00' created_by_identity: identity_type: USER name: stefan@cloudomation.com deleted_at: null deleted_by_identity: null description: Cloudomation contains functionality to gather information about remote systems and run common procedures on them. extension_id: null file_plain_list_project_id: [] flow_plain_list_project_id: - name: analysis.sqlpg.action.vacuum script: "import datetime\nimport textwrap\nimport flow_api\n\n\ndef handler(system:\ \ flow_api.System, this: flow_api.Execution):\n inputs = this.get('input_value')\n\ \ connector_id = inputs['id']\n connector = system.connector(connector_id,\ \ by='id')\n connector_name = connector.get('name')\n\n this.connect(\n\ \ connector_name,\n name='vacuum',\n execute='VACUUM',\n\ \ )\n\n return this.success('all done')" - name: analysis.sqlpg.action.create-vacuum-schedule script: "import datetime\nimport textwrap\nimport flow_api\n\n\ndef handler(system:\ \ flow_api.System, this: flow_api.Execution):\n inputs = this.get('input_value')\n\ \ connector_id = inputs['id']\n connector = system.connector(connector_id,\ \ by='id')\n connector_name = connector.get('name')\n\n response = this.message(\n\ \ subject=f'Vacuum schedule of {connector_name}',\n body={\n \ \ 'type': 'object',\n 'properties': {\n \ \ 'info': {\n 'element': 'markdown',\n \ \ 'type': 'string',\n 'order': 1,\n 'docs':\ \ textwrap.dedent(\n f'''\n Create\ \ a new schedule which periodically runs VACUUM in the PostgreSQL database {connector_name}\n\ \ '''\n ),\n },\n \ \ 'schedule_name': {\n 'element': 'string',\n\ \ 'type': 'string',\n 'order': 2,\n \ \ 'default': f'Periodic VACUUM of {connector_name}',\n \ \ 'label': 'Name for the schedule',\n },\n \ \ 'time': {\n 'element': 'time',\n \ \ 'type': 'string',\n 'example': '08:15',\n \ \ 'order': 3,\n 'label': 'Time when the VACUUM\ \ process should be started every day',\n },\n \ \ 'submit-button': {\n 'element': 'submit',\n \ \ 'type': 'boolean',\n 'order': 4,\n \ \ 'label': 'Start schedule',\n },\n },\n \ \ 'required': [\n 'schedule_name',\n \ \ 'time',\n ],\n },\n ).get('response')\n\n schedule_name\ \ = response['schedule_name']\n\n setting_id = system.setting(f'{schedule_name}-setting').save(\n\ \ value={\n 'config': {\n 'time': response['time'],\n\ \ 'timezone': response['cloudomation_fe_timezone'],\n \ \ 'input_value': {\n 'id': connector_id,\n \ \ },\n }\n }\n ).get('id')\n\n system.schedule(schedule_name).save(\n\ \ flow_id=system.flow('analysis.sqlpg.action.vacuum').get('id'),\n \ \ setting_id=setting_id,\n scheduler_id=system.scheduler('daily').get('id'),\n\ \ is_enabled=True,\n )\n\n return this.success('all done')" - name: analysis.sqlpg.action.vacuum-full script: "import datetime\nimport textwrap\nimport flow_api\n\n\ndef handler(system:\ \ flow_api.System, this: flow_api.Execution):\n inputs = this.get('input_value')\n\ \ connector_id = inputs['id']\n connector = system.connector(connector_id,\ \ by='id')\n connector_name = connector.get('name')\n\n this.connect(\n\ \ connector_name,\n name='vacuum full',\n execute='VACUUM\ \ FULL',\n )\n\n return this.success('all done')" - name: analysis.ssh.action.create-diskspace-monitoring-schedule script: "import datetime\nimport textwrap\nimport flow_api\n\n\ndef handler(system:\ \ flow_api.System, this: flow_api.Execution):\n inputs = this.get('input_value')\n\ \ connector_id = inputs['id']\n connector = system.connector(connector_id,\ \ by='id')\n connector_name = connector.get('name')\n\n response = this.message(\n\ \ subject=f'Diskspace monitoring of {connector_name}',\n body={\n\ \ 'type': 'object',\n 'properties': {\n \ \ 'info': {\n 'element': 'markdown',\n \ \ 'type': 'string',\n 'order': 1,\n \ \ 'docs': textwrap.dedent(\n f'''\n \ \ Create a new schedule which analysis disk usage of {connector_name}\ \ and notifies Cloudomation users.\n '''\n \ \ ),\n },\n 'schedule_name': {\n \ \ 'element': 'string',\n 'type': 'string',\n\ \ 'order': 2,\n 'default': f'Diskspace\ \ monitoring of {connector_name}',\n 'label': 'Name for the\ \ schedule',\n },\n 'interval_minutes': {\n \ \ 'element': 'number',\n 'type': 'number',\n\ \ 'example': '60',\n 'order': 3,\n \ \ 'label': 'Interval of recurring execution in minutes',\n \ \ },\n 'max_use_percent': {\n \ \ 'element': 'number',\n 'type': 'number',\n \ \ 'order': 4,\n 'default': 90,\n \ \ 'label': 'Maximum disk usage percentage before sending out notifications',\n\ \ },\n 'min_available_gib': {\n \ \ 'element': 'number',\n 'type': 'number',\n \ \ 'order': 5,\n 'default': 10,\n \ \ 'label': 'Minimum available GiBs before sending out notifications',\n\ \ },\n 'mountpoints': {\n 'element':\ \ 'string',\n 'type': 'string',\n 'order':\ \ 6,\n 'example': '/,/home,/etc,/var',\n \ \ 'label': 'Comma separated list of mountpoints which are monitored. If ommited,\ \ all mountpoints will be monitored.',\n },\n \ \ 'notification_recipients': {\n 'element': 'string',\n \ \ 'type': 'string',\n 'order': 7,\n \ \ 'default': system.get_own_user().get('name'),\n \ \ 'label': 'Comman separated list of Cloudomation users which should\ \ receive notifications',\n },\n 'submit-button':\ \ {\n 'element': 'submit',\n 'type': 'boolean',\n\ \ 'order': 8,\n 'label': 'Start monitoring\ \ schedule',\n },\n },\n 'required': [\n\ \ 'schedule_name',\n 'interval_minutes',\n \ \ 'max_use_percent',\n 'min_available_gib',\n \ \ 'notification_recipients',\n ],\n },\n ).get('response')\n\ \n schedule_name = response['schedule_name']\n mountpoints = response.get('mountpoints')\n\ \ if mountpoints is not None:\n mountpoints = [m.strip() for m in\ \ mountpoints.split(',')]\n notification_recipients = [r.strip() for r in\ \ response['notification_recipients'].split(',')]\n\n setting_id = system.setting(f'{schedule_name}-setting').save(\n\ \ value={\n 'config': {\n 'interval_seconds':\ \ int(response['interval_minutes']) * 60,\n 'timezone': response['cloudomation_fe_timezone'],\n\ \ 'input_value': {\n 'connector_id': connector_id,\n\ \ 'max_use_percent': int(response['max_use_percent']),\n\ \ 'min_available_gib': int(response['min_available_gib']),\n\ \ 'mountpoints': mountpoints,\n 'notification_recipients':\ \ notification_recipients,\n },\n },\n },\n\ \ ).get('id')\n\n system.schedule(schedule_name).save(\n flow_id=system.flow('analysis.ssh.action.handler.diskspace-monitoring').get('id'),\n\ \ setting_id=setting_id,\n scheduler_id=system.scheduler('recurring').get('id'),\n\ \ is_enabled=True,\n )\n\n return this.success('all done')" - name: analysis.sqlpg.action.create-vacuum-full-schedule script: "import datetime\nimport textwrap\nimport flow_api\n\n\ndef handler(system:\ \ flow_api.System, this: flow_api.Execution):\n inputs = this.get('input_value')\n\ \ connector_id = inputs['id']\n connector = system.connector(connector_id,\ \ by='id')\n connector_name = connector.get('name')\n\n response = this.message(\n\ \ subject=f'Vacuum full schedule of {connector_name}',\n body={\n\ \ 'type': 'object',\n 'properties': {\n \ \ 'info': {\n 'element': 'markdown',\n \ \ 'type': 'string',\n 'order': 1,\n \ \ 'docs': textwrap.dedent(\n f'''\n \ \ Create a new schedule which periodically runs VACUUM FULL in the PostgreSQL\ \ database {connector_name}\n '''\n \ \ ),\n },\n 'schedule_name': {\n \ \ 'element': 'string',\n 'type': 'string',\n \ \ 'order': 2,\n 'default': f'Periodic VACUUM\ \ FULL of {connector_name}',\n 'label': 'Name for the schedule',\n\ \ },\n 'time': {\n 'element':\ \ 'time',\n 'type': 'string',\n 'example':\ \ '08:15',\n 'order': 3,\n 'label': 'Time\ \ when the VACUUM FULL process should be started every day',\n \ \ },\n 'submit-button': {\n 'element': 'submit',\n\ \ 'type': 'boolean',\n 'order': 4,\n \ \ 'label': 'Start schedule',\n },\n \ \ },\n 'required': [\n 'schedule_name',\n \ \ 'time',\n ],\n },\n ).get('response')\n\n\ \ schedule_name = response['schedule_name']\n\n setting_id = system.setting(f'{schedule_name}-setting').save(\n\ \ value={\n 'config': {\n 'time': response['time'],\n\ \ 'timezone': response['cloudomation_fe_timezone'],\n \ \ 'input_value': {\n 'id': connector_id,\n \ \ },\n }\n }\n ).get('id')\n\n system.schedule(schedule_name).save(\n\ \ flow_id=system.flow('analysis.sqlpg.action.vacuum-full').get('id'),\n\ \ setting_id=setting_id,\n scheduler_id=system.scheduler('daily').get('id'),\n\ \ is_enabled=True,\n )\n\n return this.success('all done')" - name: analysis.sqlpg.action.create-schema-monitoring-schedule script: "import datetime\nimport textwrap\nimport flow_api\n\n\ndef handler(system:\ \ flow_api.System, this: flow_api.Execution):\n inputs = this.get('input_value')\n\ \ connector_id = inputs['id']\n connector = system.connector(connector_id,\ \ by='id')\n connector_name = connector.get('name')\n\n response = this.message(\n\ \ subject=f'Monitor schema changes of {connector_name}',\n body={\n\ \ 'type': 'object',\n 'properties': {\n \ \ 'info': {\n 'element': 'markdown',\n \ \ 'type': 'string',\n 'order': 1,\n \ \ 'docs': textwrap.dedent(\n f'''\n \ \ Create a new schedule which periodically checks for changes in the schema\ \ of {connector_name}\n '''\n ),\n\ \ },\n 'schedule_name': {\n \ \ 'element': 'string',\n 'type': 'string',\n \ \ 'order': 2,\n 'default': f'Monitor schema of {connector_name}',\n\ \ 'label': 'Name for the schedule',\n },\n\ \ 'time': {\n 'element': 'time',\n \ \ 'type': 'string',\n 'example': '08:15',\n \ \ 'order': 3,\n 'label': 'Time when the\ \ schema should be checked every day',\n },\n \ \ 'notification_recipients': {\n 'element': 'string',\n \ \ 'type': 'string',\n 'order': 4,\n \ \ 'default': system.get_own_user().get('name'),\n \ \ 'label': 'Comman separated list of Cloudomation users which should\ \ receive notifications',\n },\n 'submit-button':\ \ {\n 'element': 'submit',\n 'type': 'boolean',\n\ \ 'order': 5,\n 'label': 'Start schedule',\n\ \ },\n },\n 'required': [\n \ \ 'schedule_name',\n 'time',\n 'notification_recipients',\n\ \ ],\n },\n ).get('response')\n\n schedule_name = response['schedule_name']\n\ \ notification_recipients = [r.strip() for r in response['notification_recipients'].split(',')]\n\ \n setting_id = system.setting(f'{schedule_name}-setting').save(\n \ \ value={\n 'config': {\n 'time': response['time'],\n\ \ 'timezone': response['cloudomation_fe_timezone'],\n \ \ 'input_value': {\n 'connector_id': connector_id,\n\ \ 'notification_recipients': notification_recipients,\n \ \ },\n },\n },\n ).get('id')\n\n system.schedule(schedule_name).save(\n\ \ flow_id=system.flow('analysis.sqlpg.action.handler.schema-monitoring').get('id'),\n\ \ setting_id=setting_id,\n scheduler_id=system.scheduler('daily').get('id'),\n\ \ is_enabled=True,\n )\n\n return this.success('all done')" - name: analysis.sqlpg script: "import datetime\nimport flow_api\n\n\ndef handler(system: flow_api.System,\ \ this: flow_api.Execution):\n inputs = this.get('input_value')\n connector_id\ \ = inputs['id']\n connector = system.connector(connector_id, by='id')\n\ \ connector_name = connector.get('name')\n\n server_version = this.connect(\n\ \ connector_name,\n name='analysis: server_version',\n \ \ fetchval=(\n '''\n SELECT version()\n '''\n\ \ ),\n ).get('output_value')['result']\n\n tables = this.connect(\n\ \ connector_name,\n name='analysis: tables',\n fetch=(\n\ \ '''\n SELECT\n tablename,\n \ \ schemaname,\n tableowner,\n tablespace\n\ \ FROM pg_catalog.pg_tables\n WHERE schemaname != 'pg_catalog'\n\ \ AND schemaname != 'information_schema'\n '''\n \ \ ),\n ).get('output_value')['result']\n\n for table in tables:\n \ \ table_name = table['tablename']\n table_schema = table['schemaname']\n\ \ table['columns'] = this.connect(\n connector_name,\n \ \ name=f'analysis: table columns {table_name}',\n fetch=(\n\ \ f'''\n SELECT\n data_type,\n\ \ column_name,\n is_identity,\n \ \ is_nullable,\n column_default,\n \ \ character_maximum_length\n FROM information_schema.columns\n\ \ WHERE table_schema = '{table_schema}'\n AND\ \ table_name = '{table_name}'\n '''\n ),\n \ \ ).get('output_value')['result']\n\n analysis_data = {\n 'server_version':\ \ server_version,\n 'tables': tables,\n }\n now = datetime.datetime.now(datetime.timezone.utc)\n\ \ connector.save(\n analysis_data=analysis_data,\n last_analysis_at=now.isoformat(),\n\ \ )\n this.set_output(\n analysis_data=analysis_data,\n \ \ last_analysis_at=now.isoformat(),\n )\n\n return this.success('all done')" - name: analysis.rest script: "import textwrap\nimport datetime\nimport yarl\nimport flow_api\n\n\n\ def handler(system: flow_api.System, this: flow_api.Execution):\n inputs\ \ = this.get('input_value')\n connector_id = inputs['id']\n connector\ \ = system.connector(connector_id, by='id')\n connector_name, connector_value\ \ = connector.get('name', 'value')\n\n openapi_locations = []\n\n # when\ \ using a connector with an URL input, split it up\n if 'url' in connector_value:\n\ \ url = yarl.URL(connector_value.pop('url'), encoded=True)\n connector_value.update({\n\ \ 'scheme': url.scheme,\n 'hostname': url.host,\n \ \ })\n\n # detect JIRA cloud which uses non-standard location\n if\ \ '.atlassian.net' in connector_value.get('hostname'):\n openapi_locations.append({\n\ \ 'hostname': 'developer.atlassian.com',\n 'base_path':\ \ '/cloud/jira/platform',\n 'path': 'swagger-v3.v3.json',\n \ \ })\n else:\n # try commonly used openapi locations\n for\ \ filename in (\n 'openapi.json',\n 'openapi.yaml',\n\ \ 'swagger.json',\n ):\n if 'base_path' in\ \ connector_value:\n openapi_locations.append({\n \ \ **connector_value,\n 'path': filename\n \ \ })\n openapi_locations.extend([\n {\n \ \ **connector_value,\n 'base_path': None,\n\ \ 'path': filename\n },\n {\n\ \ **connector_value,\n 'base_path': '/api/latest',\n\ \ 'path': filename\n },\n {\n\ \ **connector_value,\n 'base_path': '/api-docs',\n\ \ 'path': filename\n },\n ])\n\n\ \ for i, kwa in enumerate(openapi_locations, start=1):\n try:\n \ \ response = this.connect(\n connector_name,\n \ \ name=f'fetch OpenAPI spec try #{i}',\n **{\n \ \ **kwa,\n 'method': 'get',\n \ \ },\n ).get('output_value')\n except flow_api.DependencyFailedError:\n\ \ continue\n if 'json' in response:\n openapi_dict\ \ = response['json']\n break\n else:\n this.save(message='did\ \ not automatically find the OpenAPI specification file. Please provide the\ \ path to openapi.json')\n openapi_url = this.message(\n subject=f'Path\ \ to OpenAPI specification of {connector_name}',\n body={\n \ \ 'type': 'object',\n 'properties': {\n \ \ 'info': {\n 'element': 'markdown',\n \ \ 'type': 'string',\n 'order': 1,\n\ \ 'docs': textwrap.dedent(\n \ \ f'''\n Please provide the full path to the OpenAPI\ \ specification file of {connector_name}.\n '''\n\ \ ),\n },\n 'openapi_url':\ \ {\n 'element': 'string',\n 'type':\ \ 'string',\n 'order': 2,\n 'example':\ \ f\"{connector_value['hostname']}/api/latest/openapi.json\",\n \ \ 'default': f\"{connector_value['hostname']}/api/latest/openapi.json\"\ ,\n 'label': f\"URL to openapi.json of {connector_value['hostname']}\"\ ,\n },\n 'submit-button': {\n \ \ 'element': 'submit',\n 'type': 'boolean',\n\ \ 'order': 3,\n 'label': 'Analyze',\n\ \ },\n },\n 'required': [\n\ \ 'openapi_url',\n ],\n },\n \ \ wait_timeout=3600\n ).get('response')['openapi_url']\n \ \ kwa = {\n **connector_value,\n 'url': openapi_url,\n\ \ 'hostname': None,\n 'method': 'get',\n }\n \ \ try:\n response = this.connect(\n connector_name,\n\ \ name='fetch OpenAPI spec',\n **kwa,\n \ \ ).get('output_value')\n except flow_api.DependencyFailedError\ \ as ex:\n raise flow_api.DependencyFailedError('did not find an\ \ OpenAPI specification') from ex\n if 'json' in response:\n \ \ openapi_dict = response['json']\n else:\n return this.error(f\"\ did not find an OpenAPI specification at {openapi_url}\")\n\n servers = openapi_dict.get('servers',[])\n\ \ if servers:\n server = servers[0].get('url')\n api_paths = openapi_dict.get('paths')\n\ \ components = openapi_dict.get('components')\n result = {}\n for api_path,\ \ method in api_paths.items():\n response_content = None\n if\ \ method.get('get', {}).get('responses', {}).get('200'):\n response_content\ \ = method.get('get').get('responses').get('200').get('content')\n if\ \ response_content:\n for content_type, schema in response_content.items():\n\ \ if content_type == 'application/json':\n \ \ props = get_schema_properties(schema.get('schema'), components)\n \ \ if not api_path or not props:\n continue\n\ \ result[api_path] = props\n\n analysis_data = {\n \ \ 'server': server,\n 'get_endpoints': result,\n }\n now =\ \ datetime.datetime.now(datetime.timezone.utc)\n connector.save(\n \ \ analysis_data=analysis_data,\n last_analysis_at=now.isoformat(),\n\ \ )\n this.set_output(\n analysis_data=analysis_data,\n \ \ last_analysis_at=now.isoformat(),\n )\n\n return this.success('all done')\n\ \n\ndef get_schema_properties(schema, components):\n if schema.get('$ref'):\n\ \ parts = schema['$ref'].split('/')\n return components[parts[2]][parts[3]].get('properties')\n\ \ return schema.get('properties')" - name: analysis.ssh.action.handler.diskspace-monitoring script: "import datetime\nimport flow_api\n\n\ndef handler(system: flow_api.System,\ \ this: flow_api.Execution):\n inputs = this.get('input_value')\n connector_id\ \ = inputs['connector_id']\n max_use_percent = inputs['max_use_percent']\n\ \ min_available_gib = inputs['min_available_gib']\n mountpoints = inputs['mountpoints']\n\ \ notification_recipients = inputs['notification_recipients']\n connector\ \ = system.connector(connector_id, by='id')\n connector_name, last_analysis_at,\ \ analysis_data = connector.get('name', 'last_analysis_at', 'analysis_data')\n\ \n if (\n last_analysis_at is None or\n datetime.datetime.fromisoformat(last_analysis_at)\ \ < (\n datetime.datetime.now(datetime.timezone.utc) - datetime.timedelta(minutes=10)\n\ \ )\n ):\n # we do not have analysis results from within\ \ the last 10 minutes\n # let's collect fresh analysis data\n \ \ analysis_data = this.flow(\n 'analysis.ssh',\n id=connector_id\n\ \ ).get('output_value')['analysis_data']\n\n notifications = []\n\ \ for disk_info in analysis_data['device_infos']['disks']:\n if mountpoints\ \ is not None and disk_info['mountpoint'] not in mountpoints:\n continue\n\ \ if disk_info['use_percent'] > max_use_percent:\n notifications.append(f\"\ disk {disk_info['filesystem']} mounted on {disk_info['mountpoint']} usage {disk_info['use_percent']}%\ \ > {max_use_percent}%\")\n if disk_info['available_bytes'] < min_available_gib\ \ * 1024 * 1024:\n notifications.append(f\"disk {disk_info['filesystem']}\ \ mounted on {disk_info['mountpoint']} available {disk_info['available_bytes']//1024//1024}\ \ GiB ({disk_info['available_bytes']} bytes) < {min_available_gib} GiB ({min_available_gib*1024*1024}\ \ bytes)\")\n\n if notifications:\n for notification_recipient in\ \ notification_recipients:\n system.user(notification_recipient).send_mail(\n\ \ subject=f'Diskspace monitoring alert: {connector_name}',\n\ \ text='\\n'.join(notifications),\n )\n return\ \ this.success('sent monitoring alert')\n\n return this.success('all done')" - name: analysis.sqlpg.action.handler.schema-monitoring script: "import datetime\nimport difflib\nimport yaml\nimport flow_api\n\n\ndef\ \ handler(system: flow_api.System, this: flow_api.Execution):\n inputs =\ \ this.get('input_value')\n connector_id = inputs['connector_id']\n notification_recipients\ \ = inputs['notification_recipients']\n connector = system.connector(connector_id,\ \ by='id')\n connector_name, last_analysis_at, analysis_data = connector.get('name',\ \ 'last_analysis_at', 'analysis_data')\n\n try:\n last_schema_str\ \ = system.file(f'analysis.sqlpg.schema.{connector_name}.yaml').get_text_content()\n\ \ last_schema_modified_at = system.file(f'analysis.sqlpg.schema.{connector_name}.yaml').get('modified_at')\n\ \ except flow_api.ResourceNotFoundError:\n last_schema_str = ''\n\ \ last_schema_modified_at = None\n\n output_value = this.flow(\n \ \ 'analysis.sqlpg',\n id=connector_id\n ).get('output_value')\n\ \n current_schema_str = yaml.safe_dump(output_value['analysis_data']['tables'],\ \ default_flow_style=False)\n if last_schema_str == current_schema_str:\n\ \ # no change\n return this.success('all done: no change in schema')\n\ \n # schema has changed\n # rename schema file to \"-old\"\n system.file(f'analysis.sqlpg.schema.{connector_name}-old.yaml').delete()\n\ \ system.file(f'analysis.sqlpg.schema.{connector_name}.yaml').save(name=f'analysis.sqlpg.schema.{connector_name}-old.yaml')\n\ \ # save new schema\n system.file(f'analysis.sqlpg.schema.{connector_name}.yaml').save_text_content(current_schema_str)\n\ \ # calulate diff\n diff_str = '\\n'.join(difflib.unified_diff(\n \ \ last_schema_str.splitlines(),\n current_schema_str.splitlines(),\n\ \ fromfile='last schema',\n tofile='current schema',\n ))\n\ \n self_url = system.get_self_url()\n old_schema_url = f\"{self_url}/file/{system.file(f'analysis.sqlpg.schema.{connector_name}-old.yaml').get('id')}\"\ \n new_schema_url = f\"{self_url}/file/{system.file(f'analysis.sqlpg.schema.{connector_name}.yaml').get('id')}\"\ \n for notification_recipient in notification_recipients:\n system.user(notification_recipient).send_mail(\n\ \ subject=f'Schema monitoring notification: {connector_name}',\n\ \ html=(\n f'''\n

The schema of\ \ {connector_name} has changed.

\n

Old schema from {last_schema_modified_at}

\n\ \

New schema from {output_value['last_analysis_at']}

\n\ \

Here's a diff of the changes:

\n
\n\
      \                {diff_str}\n                
\n

The\ \ previous schema: {old_schema_url}

\n

The new schema:\ \ {new_schema_url}

\n '''\n ),\n )\n \ \ return this.success('sent monitoring notification')" - name: analysis.ssh script: "import datetime\nimport flow_api\n\n\ndef handler(system: flow_api.System,\ \ this: flow_api.Execution):\n inputs = this.get('input_value')\n connector_id\ \ = inputs['id']\n connector = system.connector(connector_id, by='id')\n\ \ connector_name = connector.get('name')\n\n os_infos = this.connect(\n\ \ connector_name,\n name='analysis: os',\n script=(\n \ \ '''\n UNAME=`uname -a`\n OS_RELEASE=`cat /etc/os-release`\n\ \ ISSUE=`cat /etc/issue`\n VERSION=`cat /proc/version`\n\ \ HOSTNAMECTL=`hostnamectl`\n LSB_RELEASE=`lsb_release\ \ -a`\n '''\n ),\n output_vars=[\n 'UNAME',\n\ \ 'OS_RELEASE',\n 'ISSUE',\n 'VERSION',\n \ \ 'HOSTNAMECTL',\n 'LSB_RELEASE',\n ],\n ).get('output_value')['vars']\n\ \n device_infos = this.connect(\n connector_name,\n name='analysis:\ \ devices',\n script=(\n '''\n DISKS=`df -B1`\n\ \ MEMORY=`free -m`\n SWAP=`cat /proc/swaps`\n \ \ UPTIME=`cat /proc/uptime`\n '''\n ),\n output_vars=[\n\ \ 'DISKS',\n 'MEMORY',\n 'SWAP',\n \ \ 'UPTIME',\n ],\n ).get('output_value')['vars']\n\n # parse\ \ disk infos\n disks_infos = []\n for disk_info_line in device_infos.pop('DISKS',\ \ '').splitlines()[1:]:\n filesystem, _, used_bytes, available_bytes,\ \ use_percent, mountpoint = [s for s in disk_info_line.split(' ') if s]\n \ \ disks_infos.append({\n 'filesystem': filesystem,\n \ \ 'used_bytes': int(used_bytes),\n 'available_bytes': int(available_bytes),\n\ \ 'use_percent': int(use_percent[:-1]),\n 'mountpoint':\ \ mountpoint,\n })\n device_infos['disks'] = disks_infos\n\n analysis_data\ \ = {\n 'os_infos': os_infos,\n 'device_infos': device_infos,\n\ \ }\n now = datetime.datetime.now(datetime.timezone.utc)\n connector.save(\n\ \ analysis_data=analysis_data,\n last_analysis_at=now.isoformat(),\n\ \ )\n this.set_output(\n analysis_data=analysis_data,\n \ \ last_analysis_at=now.isoformat(),\n )\n\n return this.success('all done')" git_config_plain_list_project_id: [] icon: data:image/svg+xml;base64,PHN2ZyB4bWxucz0iaHR0cDovL3d3dy53My5vcmcvMjAwMC9zdmciIHZpZXdCb3g9IjAgMCAyNCAyNCI+PHBhdGggZD0iTTkuNSwzQTYuNSw2LjUgMCAwLDEgMTYsOS41QzE2LDExLjExIDE1LjQxLDEyLjU5IDE0LjQ0LDEzLjczTDE0LjcxLDE0SDE1LjVMMjAuNSwxOUwxOSwyMC41TDE0LDE1LjVWMTQuNzFMMTMuNzMsMTQuNDRDMTIuNTksMTUuNDEgMTEuMTEsMTYgOS41LDE2QTYuNSw2LjUgMCAwLDEgMyw5LjVBNi41LDYuNSAwIDAsMSA5LjUsM005LjUsNUM3LDUgNSw3IDUsOS41QzUsMTIgNywxNCA5LjUsMTRDMTIsMTQgMTQsMTIgMTQsOS41QzE0LDcgMTIsNSA5LjUsNVoiIC8+PC9zdmc+ is_archived: false is_readonly: false modified_at: '2022-10-18T09:26:00.417797+00:00' modified_by_identity: identity_type: GIT_CONFIG name: common-content name: Connection Analysis & Test bundle oauth_plain_list_project_id: [] organization_id_organization: name: cloudomation plugin_plain_list_project_id: - is_enabled: true name: analysis-actions plugin_action_plain_list_plugin_id: - color: null description: Create a schedule to notify Cloudomation users about disk usage flow_id_flow: name: analysis.ssh.action.create-diskspace-monitoring-schedule icon: calendarClock is_enabled: true name: Add diskspace monitoring parent_plugin_action_id_plugin_action: name: Analyze SSH host record_filter: connector_type: SSH resource_type: CONNECTOR type: RECORD - color: null description: Run VACUUM in the PostgreSQL database flow_id_flow: name: analysis.sqlpg.action.vacuum icon: vacuum is_enabled: true name: Run VACUUM parent_plugin_action_id_plugin_action: name: Analyze PostgreSQL database record_filter: connector_type: SQLPG resource_type: CONNECTOR type: RECORD - color: null description: Fetch information from the remote SSH server flow_id_flow: name: analysis.ssh icon: magnify is_enabled: true name: Analyze SSH host parent_plugin_action_id_plugin_action: null record_filter: connector_type: SSH resource_type: CONNECTOR type: RECORD - color: null description: Create a schedule to run VACUUM FULL in the PostgreSQL database flow_id_flow: name: analysis.sqlpg.action.create-vacuum-full-schedule icon: calendarClock is_enabled: true name: Schedule VACUUM FULL parent_plugin_action_id_plugin_action: name: Analyze PostgreSQL database record_filter: connector_type: SQLPG resource_type: CONNECTOR type: RECORD - color: null description: Create a schedule to watch for changes in the database schema flow_id_flow: name: analysis.sqlpg.action.create-schema-monitoring-schedule icon: calendarClock is_enabled: true name: Add schema monitoring parent_plugin_action_id_plugin_action: name: Analyze PostgreSQL database record_filter: connector_type: SQLPG resource_type: CONNECTOR type: RECORD - color: null description: Fetch information from the REST endpoint flow_id_flow: name: analysis.rest icon: magnify is_enabled: true name: Analyze REST endpoint parent_plugin_action_id_plugin_action: null record_filter: connector_type: REST resource_type: CONNECTOR type: RECORD - color: null description: Run VACUUM FULL in the PostgreSQL database flow_id_flow: name: analysis.sqlpg.action.vacuum-full icon: vacuum is_enabled: true name: Run VACUUM FULL parent_plugin_action_id_plugin_action: name: Analyze PostgreSQL database record_filter: connector_type: SQLPG resource_type: CONNECTOR type: RECORD - color: null description: Fetch information from the PostgreSQL database flow_id_flow: name: analysis.sqlpg icon: magnify is_enabled: true name: Analyze PostgreSQL database parent_plugin_action_id_plugin_action: null record_filter: connector_type: SQLPG resource_type: CONNECTOR type: RECORD - color: null description: Create a schedule to run VACUUM in the PostgreSQL database flow_id_flow: name: analysis.sqlpg.action.create-vacuum-schedule icon: calendarClock is_enabled: true name: Schedule VACUUM parent_plugin_action_id_plugin_action: name: Analyze PostgreSQL database record_filter: connector_type: SQLPG resource_type: CONNECTOR type: RECORD project_id_project: name: Connection Analysis & Test bundle record_tag_plain_list_record_id: [] role_plain_list_project_id: [] schedule_plain_list_project_id: [] scheduler_plain_list_project_id: [] schema_plain_list_project_id: [] setting_plain_list_project_id: [] sync_config_plain_list_project_id: [] tag_plain_list_project_id: [] vault_config_plain_list_project_id: [] version: '1' webhook_plain_list_project_id: [] workspace_id_workspace: name: develop wrapper_plain_list_project_id: []