diff --git a/.gitignore b/.gitignore index 8eab2b90e..301054d7e 100644 --- a/.gitignore +++ b/.gitignore @@ -66,6 +66,10 @@ instance/ # Sphinx documentation docs/_build/ .pickOptions.sh +tmp/ +# templates/events.yaml +*-series-log-messages.rst +*-series-alarm-messages.rst # API Reference Guide diff --git a/doc/requirements.txt b/doc/requirements.txt index 3d57d6955..307d4d3f7 100644 --- a/doc/requirements.txt +++ b/doc/requirements.txt @@ -1,6 +1,7 @@ sphinx==4.2.0 openstackdocstheme>=2.2.1,<=2.3.1 # Apache-2.0 docutils==0.17.1 +PyYAML==6.0 # API Reference Guide os-api-ref>=1.5.0 # Apache-2.0 diff --git a/doc/source/dist_cloud/kubernetes/configuring-kubernetes-update-orchestration-on-distributed-cloud.rst b/doc/source/dist_cloud/kubernetes/configuring-kubernetes-update-orchestration-on-distributed-cloud.rst index 123b33fa8..f66b08b87 100644 --- a/doc/source/dist_cloud/kubernetes/configuring-kubernetes-update-orchestration-on-distributed-cloud.rst +++ b/doc/source/dist_cloud/kubernetes/configuring-kubernetes-update-orchestration-on-distributed-cloud.rst @@ -17,7 +17,7 @@ If you want to use the Horizon Web interface, see Management-affecting alarms cannot be ignored using relaxed alarm rules during an orchestrated Kubernetes version upgrade operation. For a list of management-affecting alarms, see |fault-doc|: - :ref:`Alarm Messages <100-series-alarm-messages>`. To display + :ref:`Alarm Messages <100-series-alarm-messages-starlingx>`. To display management-affecting active alarms, use the following command: .. code-block:: none diff --git a/doc/source/fault-mgmt/kubernetes/100-series-alarm-messages.rst b/doc/source/fault-mgmt/kubernetes/100-series-alarm-messages.rst deleted file mode 100644 index 7b881e4b6..000000000 --- a/doc/source/fault-mgmt/kubernetes/100-series-alarm-messages.rst +++ /dev/null @@ -1,368 +0,0 @@ - -.. jsy1579701868527 -.. _100-series-alarm-messages: - -========================= -100 Series Alarm Messages -========================= - -The system inventory and maintenance service reports system changes with -different degrees of severity. Use the reported alarms to monitor the overall -health of the system. - -.. include:: /_includes/x00-series-alarm-messages.rest - -.. _100-series-alarm-messages-table-zrd-tg5-v5: - -.. list-table:: - :widths: 6 25 - :header-rows: 0 - - * - **Alarm ID: 100.101** - - Platform CPU threshold exceeded; threshold x%, actual y%. - CRITICAL @ 95% - - MAJOR @ 90% - * - Entity Instance - - host= - * - Degrade Affecting Severity: - - Critical - * - Severity: - - C/M\* - * - Proposed Repair Action - - Monitor and if condition persists, contact next level of support. - ------ - -.. list-table:: - :widths: 6 25 - :header-rows: 0 - - * - **Alarm ID: 100.103** - - Memory threshold exceeded; threshold x%, actual y% . - - CRITICAL @ 90% - - MAJOR @ 80% - * - Entity Instance - - host= - * - Degrade Affecting Severity: - - Critical - * - Severity: - - C/M - * - Proposed Repair Action - - Monitor and if condition persists, contact next level of support; may - require additional memory on Host. - ------ - -.. list-table:: - :widths: 6 25 - :header-rows: 0 - - * - **Alarm ID: 100.104** - - host=.filesystem= - File System threshold exceeded; threshold x%, actual y%. - - CRITICAL @ 90% - - MAJOR @ 80% - - OR - - host=.volumegroup= - Monitor and if condition persists, consider adding additional - physical volumes to the volume group. - * - Entity Instance - - host=.filesystem= - - OR - - host=.volumegroup= - * - Degrade Affecting Severity: - - Critical - * - Severity: - - C\*/M - * - Proposed Repair Action - - Reduce usage or resize filesystem. - ------ - -.. list-table:: - :widths: 6 25 - :header-rows: 0 - - * - **Alarm ID: 100.105** - - filesystem is not added on both controllers and/or does not - have the same size: . - * - Entity Instance - - fs\_name= - * - Degrade Affecting Severity: - - None - * - Severity: - - C/M\* - * - Proposed Repair Action - - Add image-conversion filesystem on both controllers. - - Consult the System Administration Manual for more details. - - If problem persists, contact next level of support. - ------ - -.. list-table:: - :widths: 6 25 - :header-rows: 0 - - * - **Alarm ID: 100.106** - - 'OAM' Port failed. - * - Entity Instance - - host=.port= - * - Degrade Affecting Severity: - - Major - * - Severity: - - M\* - * - Proposed Repair Action - - Check cabling and far-end port configuration and status on adjacent - equipment. - ------ - -.. list-table:: - :widths: 6 25 - :header-rows: 0 - - * - **Alarm ID: 100.107** - - 'OAM' Interface degraded. - - or - - 'OAM' Interface failed. - * - Entity Instance - - host=.interface= - * - Degrade Affecting Severity: - - Major - * - Severity: - - C or M\* - * - Proposed Repair Action - - Check cabling and far-end port configuration and status on adjacent - equipment. - ------ - -.. list-table:: - :widths: 6 25 - :header-rows: 0 - - * - **Alarm ID: 100.108** - - 'MGMT' Port failed. - * - Entity Instance - - host=.port= - * - Degrade Affecting Severity: - - Major - * - Severity: - - M\* - * - Proposed Repair Action - - Check cabling and far-end port configuration and status on adjacent - equipment. - ------ - -.. list-table:: - :widths: 6 25 - :header-rows: 0 - - * - **Alarm ID: 100.109** - - 'MGMT' Interface degraded. - - or - - 'MGMT' Interface failed. - * - Entity Instance - - host=.interface= - * - Degrade Affecting Severity: - - Major - * - Severity: - - C or M\* - * - Proposed Repair Action - - Check cabling and far-end port configuration and status on adjacent - equipment. - ------ - -.. list-table:: - :widths: 6 25 - :header-rows: 0 - - * - **Alarm ID: 100.110** - - 'CLUSTER-HOST' Port failed. - * - Entity Instance - - host=.port= - * - Degrade Affecting Severity: - - Major - * - Severity: - - C or M\* - * - Proposed Repair Action - - Check cabling and far-end port configuration and status on adjacent - equipment. - ------ - -.. list-table:: - :widths: 6 25 - :header-rows: 0 - - * - **Alarm ID: 100.111** - - 'CLUSTER-HOST' Interface degraded. - - OR - - 'CLUSTER-HOST' Interface failed. - * - Entity Instance - - host=.interface= - * - Degrade Affecting Severity: - - Major - * - Severity: - - C or M\* - * - Proposed Repair Action - - Check cabling and far-end port configuration and status on adjacent - equipment. - ------ - -.. list-table:: - :widths: 6 25 - :header-rows: 0 - - * - **Alarm ID: 100.112** - - 'DATA-VRS' Port down. - * - Entity Instance - - host=.port= - * - Degrade Affecting Severity: - - Major - * - Severity: - - M - * - Proposed Repair Action - - Check cabling and far-end port configuration and status on adjacent - equipment. - ------ - -.. list-table:: - :widths: 6 25 - :header-rows: 0 - - * - **Alarm ID: 100.113** - - 'DATA-VRS' Interface degraded. - - or - - 'DATA-VRS' Interface down. - * - Entity Instance - - host=.interface= - * - Degrade Affecting Severity: - - Major - * - Severity: - - C or M\* - * - Proposed Repair Action - - Check cabling and far-end port configuration and status on adjacent - equipment. - ------ - -.. list-table:: - :widths: 6 25 - :header-rows: 0 - - * - **Alarm ID: 100.114** - - NTP configuration does not contain any valid or reachable NTP servers. - The alarm is raised regardless of NTP enabled/disabled status. - - NTP address is not a valid or a reachable NTP server. - - Connectivity to external PTP Clock Synchronization is lost. - * - Entity Instance - - host=.ntp - - host=.ntp= - * - Degrade Affecting Severity: - - None - * - Severity: - - M or m - * - Proposed Repair Action - - Monitor and if condition persists, contact next level of support. - ------ - -.. list-table:: - :widths: 6 25 - :header-rows: 0 - - * - **Alarm ID: 100.118** - - Controller cannot establish connection with remote logging server. - * - Entity Instance - - host= - * - Degrade Affecting Severity: - - None - * - Severity: - - m - * - Proposed Repair Action - - Ensure Remote Log Server IP is reachable from Controller through OAM - interface; otherwise contact next level of support. - ------ - -.. list-table:: - :widths: 6 25 - :header-rows: 0 - - * - **Alarm ID: 100.119** - - does not support the provisioned PTP mode - - OR - - PTP clocking is out-of-tolerance - - OR - - is not locked to remote PTP Grand Master (|PTS|) - - OR - - GNSS signal loss state: - - OR - - 1PPS signal loss state: - * - Entity Instance - - host=.ptp OR host=.ptp=no-lock - - OR - - host=.ptp=.unsupported=hardware-timestamping - - OR - - host=.ptp=.unsupported=software-timestamping - - OR - - host=.ptp=.unsupported=legacy-timestamping - - OR - - host=.ptp=out-of-tolerance - - OR - - host=.instance=.ptp=out-of-tolerance - - OR - - host=.interface=.ptp=signal-loss - * - Degrade Affecting Severity: - - None - * - Severity: - - M or m - * - Proposed Repair Action - - Monitor and, if condition persists, contact next level of support. diff --git a/doc/source/fault-mgmt/kubernetes/200-series-alarm-messages.rst b/doc/source/fault-mgmt/kubernetes/200-series-alarm-messages.rst deleted file mode 100644 index b4ca18348..000000000 --- a/doc/source/fault-mgmt/kubernetes/200-series-alarm-messages.rst +++ /dev/null @@ -1,389 +0,0 @@ - -.. uof1579701912856 -.. _200-series-alarm-messages: - -========================= -200 Series Alarm Messages -========================= - -The system inventory and maintenance service reports system changes with -different degrees of severity. Use the reported alarms to monitor the overall -health of the system. - -.. include:: /_includes/x00-series-alarm-messages.rest - -.. _200-series-alarm-messages-table-zrd-tg5-v5: - -.. list-table:: - :widths: 6 25 - :header-rows: 0 - - * - **Alarm ID: 200.001** - - was administratively locked to take it out-of-service. - * - Entity Instance - - host= - * - Degrade Affecting Severity: - - None - * - Severity: - - W\* - * - Proposed Repair Action - - Administratively unlock Host to bring it back in-service. - ------ - -.. list-table:: - :widths: 6 25 - :header-rows: 0 - - * - **Alarm ID: 200.004** - - experienced a service-affecting failure. - - Host is being auto recovered by Reboot. - * - Entity Instance - - host= - * - Degrade Affecting Severity: - - None - * - Severity: - - C\* - * - Proposed Repair Action - - If auto-recovery is consistently unable to recover host to the - unlocked-enabled state contact next level of support or lock and replace - failing host. - ------ - -.. list-table:: - :widths: 6 25 - :header-rows: 0 - - * - **Alarm ID: 200.005** - - Degrade: - - is experiencing an intermittent 'Management Network' - communication failures that have exceeded its lower alarming threshold. - - Failure: - - is experiencing a persistent Critical 'Management Network' - communication failure. - * - Entity Instance - - host= - * - Degrade Affecting Severity: - - None - * - Severity: - - M\* (Degrade) or C\* (Failure) - * - Proposed Repair Action - - Check 'Management Network' connectivity and support for multicast - messaging. If problem consistently occurs after that and Host is reset, - then contact next level of support or lock and replace failing host. - ------ - -.. list-table:: - :widths: 6 25 - :header-rows: 0 - - * - **Alarm ID: 200.006** - - Main Process Monitor Daemon Failure \(Major\) - - 'Process Monitor' \(pmond\) process is not running or - functioning properly. The system is trying to recover this process. - - Monitored Process Failure \(Critical/Major/Minor\) - - Critical: Critical '' process has failed and - could not be auto-recovered gracefully. Auto-recovery progression by - host reboot is required and in progress. - - Major: is degraded due to the failure of its '' - process. Auto recovery of this Major process is in progress. - - Minor: - - '' process has failed. Auto recovery of this - Minor process is in progress. - - '' process has failed. Manual recovery is required. - - tp4l/phc2sys process failure. Manual recovery is required. - * - Entity Instance - - host=.process= - * - Degrade Affecting Severity: - - Major - * - Severity: - - C/M/m\* - * - Proposed Repair Action - - If this alarm does not automatically clear after some time and continues - to be asserted after Host is locked and unlocked then contact next level - of support for root cause analysis and recovery. - - If problem consistently occurs after Host is locked and unlocked then - contact next level of support for root cause analysis and recovery. - ------ - -.. list-table:: - :widths: 6 25 - :header-rows: 0 - - * - **Alarm ID: 200.007** - - Critical: \(with host degrade\): - - Host is degraded due to a 'Critical' out-of-tolerance reading from the - '' sensor - - Major: \(with host degrade\) - - Host is degraded due to a 'Major' out-of-tolerance reading from the - '' sensor - - Minor: - - Host is reporting a 'Minor' out-of-tolerance reading from the - '' sensor - * - Entity Instance - - host=.sensor= - * - Degrade Affecting Severity: - - Critical - * - Severity: - - C/M/m - * - Proposed Repair Action - - If problem consistently occurs after Host is power cycled and or reset, - contact next level of support or lock and replace failing host. - ------ - -.. list-table:: - :widths: 6 25 - :header-rows: 0 - - * - **Alarm ID: 200.009** - - Degrade: - - is experiencing an intermittent 'Cluster-host Network' - communication failures that have exceeded its lower alarming threshold. - - Failure: - - is experiencing a persistent Critical 'Cluster-host Network' - communication failure. - * - Entity Instance - - host= - * - Degrade Affecting Severity: - - None - * - Severity: - - M\* (Degrade) or C\* (Critical) - * - Proposed Repair Action - - Check 'Cluster-host Network' connectivity and support for multicast - messaging. If problem consistently occurs after that and Host is reset, - then contact next level of support or lock and replace failing host. - ------ - -.. list-table:: - :widths: 6 25 - :header-rows: 0 - - * - **Alarm ID: 200.010** - - access to board management module has failed. - * - Entity Instance - - host= - * - Degrade Affecting Severity: - - None - * - Severity: - - W - * - Proposed Repair Action - - Check Host's board management configuration and connectivity. - ------ - -.. list-table:: - :widths: 6 25 - :header-rows: 0 - - * - **Alarm ID: 200.011** - - experienced a configuration failure during initialization. - Host is being re-configured by Reboot. - * - Entity Instance - - host= - * - Degrade Affecting Severity: - - None - * - Severity: - - C\* - * - Proposed Repair Action - - If auto-recovery is consistently unable to recover host to the - unlocked-enabled state contact next level of support or lock and - replace failing host. - ------ - -.. list-table:: - :widths: 6 25 - :header-rows: 0 - - * - **Alarm ID: 200.013** - - compute service of the only available controller is not - operational. Auto-recovery is disabled. Degrading host instead. - * - Entity Instance - - host= - * - Degrade Affecting Severity: - - Major - * - Severity: - - M\* - * - Proposed Repair Action - - Enable second controller and Switch Activity \(Swact\) over to it as - soon as possible. Then Lock and Unlock host to recover its local compute - service. - ------ - -.. list-table:: - :widths: 6 25 - :header-rows: 0 - - * - **Alarm ID: 200.014** - - The Hardware Monitor was unable to load, configure and monitor one - or more hardware sensors. - * - Entity Instance - - host= - * - Degrade Affecting Severity: - - None - * - Severity: - - m - * - Proposed Repair Action - - Check Board Management Controller provisioning. Try reprovisioning the - BMC. If problem persists try power cycling the host and then the entire - server including the BMC power. If problem persists then contact next - level of support. - ------ - -.. list-table:: - :widths: 6 25 - :header-rows: 0 - - * - **Alarm ID: 200.015** - - Unable to read one or more sensor groups from this host's board - management controller. - * - Entity Instance - - host= - * - Degrade Affecting Severity: - - None - * - Severity: - - M - * - Proposed Repair Action - - Check board management connectivity and try rebooting the board - management controller. If problem persists contact next level of - support or lock and replace failing host. - ------ - -.. list-table:: - :widths: 6 25 - :header-rows: 0 - - * - **Alarm ID: 210.001** - - System Backup in progress. - * - Entity Instance - - host=controller - * - Degrade Affecting Severity: - - None - * - Severity: - - m\* - * - Proposed Repair Action - - No action required. - ------ - -.. list-table:: - :widths: 6 25 - :header-rows: 0 - - * - **Alarm ID: 250.001** - - Configuration is out-of-date. - * - Entity Instance - - host= - * - Degrade Affecting Severity: - - None - * - Severity: - - M\* - * - Proposed Repair Action - - Administratively lock and unlock to update config. - ------ - -.. list-table:: - :widths: 6 25 - :header-rows: 0 - - * - **Alarm ID: 250.003** - - Kubernetes certificates rotation failed on host [, reason = - ]. - * - Entity Instance - - host= - * - Degrade Affecting Severity: - - None - * - Severity: - - M/w - * - Proposed Repair Action - - Lock and unlock the host to update services with new certificates - (Manually renew kubernetes certificates first if renewal failed). - ------ - -.. only:: partner - - .. include:: /_includes/200-series-alarm-messages.rest - - -.. list-table:: - :widths: 6 25 - :header-rows: 0 - - * - **Alarm ID: 270.001** - - Host compute services failure\[, reason = \] - * - Entity Instance - - host=.services=compute - * - Degrade Affecting Severity: - - None - * - Severity: - - C\* - * - Proposed Repair Action - - Wait for host services recovery to complete; if problem persists contact - next level of support. - ------ - -.. list-table:: - :widths: 6 25 - :header-rows: 0 - - * - **Alarm ID: 280.001** - - is offline. - * - Entity Instance - - subcloud= - * - Degrade Affecting Severity: - - None - * - Severity: - - C\* - * - Proposed Repair Action - - Wait for subcloud to become online; if problem persists contact next - level of support. - ------ - -.. list-table:: - :widths: 6 25 - :header-rows: 0 - - * - **Alarm ID: 280.002** - - sync status is out-of-sync. - * - Entity Instance - - \[subcloud=.resource= \| \| - \| \] - * - Degrade Affecting Severity: - - None - * - Severity: - - M\* - * - Proposed Repair Action - - If problem persists contact next level of support. \ No newline at end of file diff --git a/doc/source/fault-mgmt/kubernetes/200-series-maintenance-customer-log-messages.rst b/doc/source/fault-mgmt/kubernetes/200-series-maintenance-customer-log-messages.rst deleted file mode 100644 index c0e3dfee4..000000000 --- a/doc/source/fault-mgmt/kubernetes/200-series-maintenance-customer-log-messages.rst +++ /dev/null @@ -1,120 +0,0 @@ - -.. lzz1579291773073 -.. _200-series-maintenance-customer-log-messages: - -============================================ -200 Series Maintenance Customer Log Messages -============================================ - -The Customer Logs include events that do not require immediate user action. - -The following types of events are included in the Customer Logs. The severity -of the events is represented in the table by one or more letters, as follows: - -.. _200-series-maintenance-customer-log-messages-ul-jsd-jkg-vp: - -- C: Critical - -- M: Major - -- m: Minor - -- W: Warning - -- NA: Not applicable - -.. _200-series-maintenance-customer-log-messages-table-zgf-jvw-v5: - - -.. table:: Table 1. Customer Log Messages - :widths: auto - - +-----------------+------------------------------------------------------------------+----------+ - | Log ID | Description | Severity | - + +------------------------------------------------------------------+----------+ - | | Entity Instance ID | | - +=================+==================================================================+==========+ - | 200.020 | has been 'discovered' on the network | NA | - | | | | - | | host=.event=discovered | | - +-----------------+------------------------------------------------------------------+----------+ - | 200.020 | has been 'added' to the system | NA | - | | | | - | | host=.event=add | | - +-----------------+------------------------------------------------------------------+----------+ - | 200.020 | has 'entered' multi-node failure avoidance | NA | - | | | | - | | host=.event=mnfa\_enter | | - +-----------------+------------------------------------------------------------------+----------+ - | 200.020 | has 'exited' multi-node failure avoidance | NA | - | | | | - | | host=.event=mnfa\_exit | | - +-----------------+------------------------------------------------------------------+----------+ - | 200.021 | board management controller has been 'provisioned' | NA | - | | | | - | | host=.command=provision | | - +-----------------+------------------------------------------------------------------+----------+ - | 200.021 | board management controller has been 're-provisioned' | NA | - | | | | - | | host=.command=reprovision | | - +-----------------+------------------------------------------------------------------+----------+ - | 200.021 | board management controller has been 'de-provisioned' | NA | - | | | | - | | host=.command=deprovision | | - +-----------------+------------------------------------------------------------------+----------+ - | 200.021 | manual 'unlock' request | NA | - | | | | - | | host=.command=unlock | | - +-----------------+------------------------------------------------------------------+----------+ - | 200.021 | manual 'reboot' request | NA | - | | | | - | | host=.command=reboot | | - +-----------------+------------------------------------------------------------------+----------+ - | 200.021 | manual 'reset' request | NA | - | | | | - | | host=.command=reset | | - +-----------------+------------------------------------------------------------------+----------+ - | 200.021 | manual 'power-off' request | NA | - | | | | - | | host=.command=power-off | | - +-----------------+------------------------------------------------------------------+----------+ - | 200.021 | manual 'power-on' request | NA | - | | | | - | | host=.command=power-on | | - +-----------------+------------------------------------------------------------------+----------+ - | 200.021 | manual 'reinstall' request | NA | - | | | | - | | host=.command=reinstall | | - +-----------------+------------------------------------------------------------------+----------+ - | 200.021 | manual 'force-lock' request | NA | - | | | | - | | host=.command=force-lock | | - +-----------------+------------------------------------------------------------------+----------+ - | 200.021 | manual 'delete' request | NA | - | | | | - | | host=.command=delete | | - +-----------------+------------------------------------------------------------------+----------+ - | 200.021 | manual 'controller switchover' request | NA | - | | | | - | | host=.command=swact | | - +-----------------+------------------------------------------------------------------+----------+ - | 200.022 | is now 'disabled' | NA | - | | | | - | | host=.state=disabled | | - +-----------------+------------------------------------------------------------------+----------+ - | 200.022 | is now 'enabled' | NA | - | | | | - | | host=.state=enabled | | - +-----------------+------------------------------------------------------------------+----------+ - | 200.022 | is now 'online' | NA | - | | | | - | | host=.status=online | | - +-----------------+------------------------------------------------------------------+----------+ - | 200.022 | is now 'offline' | NA | - | | | | - | | host=.status=offline | | - +-----------------+------------------------------------------------------------------+----------+ - | 200.022 | is 'disabled-failed' to the system | NA | - | | | | - | | host=.status=failed | | - +-----------------+------------------------------------------------------------------+----------+ \ No newline at end of file diff --git a/doc/source/fault-mgmt/kubernetes/300-series-alarm-messages.rst b/doc/source/fault-mgmt/kubernetes/300-series-alarm-messages.rst deleted file mode 100644 index cf4326917..000000000 --- a/doc/source/fault-mgmt/kubernetes/300-series-alarm-messages.rst +++ /dev/null @@ -1,53 +0,0 @@ - -.. zwe1579701930425 -.. _300-series-alarm-messages: - -========================= -300 Series Alarm Messages -========================= - -The system inventory and maintenance service reports system changes with -different degrees of severity. Use the reported alarms to monitor the -overall health of the system. - -.. include:: /_includes/x00-series-alarm-messages.rest - -.. _300-series-alarm-messages-table-zrd-tg5-v5: - -.. list-table:: - :widths: 6 25 - :header-rows: 0 - - * - **Alarm ID: 300.001** - - 'Data' Port failed. - * - Entity Instance - - host=.port= - * - Degrade Affecting Severity: - - None - * - Severity: - - M\* - * - Proposed Repair Action - - Check cabling and far-end port configuration and status on adjacent - equipment. - ------ - -.. list-table:: - :widths: 6 25 - :header-rows: 0 - - * - **Alarm ID: 300.002** - - 'Data' Interface degraded. - - or - - 'Data' Interface failed. - * - Entity Instance - - host=.interface= - * - Degrade Affecting Severity: - - Critical - * - Severity: - - C/M\* - * - Proposed Repair Action - - Check cabling and far-end port configuration and status on adjacent - equipment. \ No newline at end of file diff --git a/doc/source/fault-mgmt/kubernetes/400-series-alarm-messages.rst b/doc/source/fault-mgmt/kubernetes/400-series-alarm-messages.rst deleted file mode 100644 index c35191bb9..000000000 --- a/doc/source/fault-mgmt/kubernetes/400-series-alarm-messages.rst +++ /dev/null @@ -1,69 +0,0 @@ - -.. ots1579702138430 -.. _400-series-alarm-messages: - -========================= -400 Series Alarm Messages -========================= - -The system inventory and maintenance service reports system changes with -different degrees of severity. Use the reported alarms to monitor the overall -health of the system. - -.. include:: /_includes/x00-series-alarm-messages.rest - -.. _400-series-alarm-messages-table-zrd-tg5-v5: - -.. list-table:: - :widths: 6 25 - :header-rows: 0 - - * - **Alarm ID: 400.003** - - License key is not installed; a valid license key is required for - operation. - - or - - License key has expired or is invalid; a valid license key is required - for operation. - - or - - Evaluation license key will expire on ; there are days - remaining in this evaluation. - - or - - Evaluation license key will expire on ; there is only 1 day - remaining in this evaluation. - * - Entity Instance: - - host= - * - Degrade Affecting Severity: - - None - * - Severity: - - C\* - * - Proposed Repair Action - - Contact next level of support to obtain a new license key. - ------ - -.. list-table:: - :widths: 6 25 - :header-rows: 0 - - * - **Alarm ID: 400.005** - - Communication failure detected with peer over port . - - or - - Communication failure detected with peer over port - within the last 30 seconds. - * - Entity Instance: - - host=.network= - * - Degrade Affecting Severity: - - None - * - Severity: - - M\* - * - Proposed Repair Action - - Check cabling and far-end port configuration and status on adjacent - equipment. \ No newline at end of file diff --git a/doc/source/fault-mgmt/kubernetes/400-series-customer-log-messages.rst b/doc/source/fault-mgmt/kubernetes/400-series-customer-log-messages.rst deleted file mode 100644 index 3af6539d8..000000000 --- a/doc/source/fault-mgmt/kubernetes/400-series-customer-log-messages.rst +++ /dev/null @@ -1,81 +0,0 @@ - -.. pgb1579292662158 -.. _400-series-customer-log-messages: - -================================ -400 Series Customer Log Messages -================================ - -The Customer Logs include events that do not require immediate user action. - -The following types of events are included in the Customer Logs. The severity -of the events is represented in the table by one or more letters, as follows: - -.. _400-series-customer-log-messages-ul-jsd-jkg-vp: - -- C: Critical - -- M: Major - -- m: Minor - -- W: Warning - -- NA: Not applicable - -.. _400-series-customer-log-messages-table-zgf-jvw-v5: - -.. list-table:: - :widths: 6 15 - :header-rows: 0 - - * - **Alarm ID: 400.003** - - License key has expired or is invalid - - or - - Evaluation license key will expire on - - or - - License key is valid - * - Entity Instance - - host= - * - Severity: - - C - ------ - -.. list-table:: - :widths: 6 15 - :header-rows: 0 - - * - **Alarm ID: 400.005** - - Communication failure detected with peer over port on host - - - or - - Communication failure detected with peer over port on host - within the last seconds - - or - - Communication established with peer over port on host - * - Entity Instance - - host=.network= - * - Severity: - - C - ------ - -.. list-table:: - :widths: 6 15 - :header-rows: 0 - - * - **Alarm ID: 400.007** - - Swact or swact-force - * - Entity Instance - - host= - * - Severity: - - C \ No newline at end of file diff --git a/doc/source/fault-mgmt/kubernetes/500-series-alarm-messages.rst b/doc/source/fault-mgmt/kubernetes/500-series-alarm-messages.rst deleted file mode 100644 index e98c8d366..000000000 --- a/doc/source/fault-mgmt/kubernetes/500-series-alarm-messages.rst +++ /dev/null @@ -1,91 +0,0 @@ - -.. xpx1579702157578 -.. _500-series-alarm-messages: - -========================= -500 Series Alarm Messages -========================= - -The system inventory and maintenance service reports system changes with -different degrees of severity. Use the reported alarms to monitor the overall -health of the system. - -.. include:: /_includes/x00-series-alarm-messages.rest - -.. _500-series-alarm-messages-table-zrd-tg5-v5: - - ------ - -.. list-table:: - :widths: 6 25 - :header-rows: 0 - - * - **Alarm ID: 500.101** - - Developer patch certificate enabled. - * - Entity Instance - - host=controller - * - Degrade Affecting Severity: - - None - * - Severity: - - C - * - Proposed Repair Action - - Reinstall system to disable developer certificate and remove untrusted - patches. - ------ - -.. list-table:: - :widths: 6 25 - :header-rows: 0 - - * - **Alarm ID: 500.200** - - Certificate ‘system certificate-show ' (mode=) expiring soon on . - OR - Certificate ‘/’ expiring soon on . - OR - Certificate ‘’ expiring soon on . - system.certificate.k8sRootCA - * - Entity Instance - - system.certificate.mode=.uuid= - OR - namespace=.certificate= - OR - namespace=.secret= - * - Degrade Affecting Severity: - - None - * - Severity: - - M - * - Proposed Repair Action - - Renew certificate for the entity identified. - * - Management_Affecting_Severity: - - none - ------ - -.. list-table:: - :widths: 6 25 - :header-rows: 0 - - * - **Alarm ID: 500.210** - - Certificate ‘system certificate-show ' (mode=) expired. - OR - Certificate ‘/’ expired. - OR - Certificate ‘’ expired. - * - Entity Instance - - system.certificate.mode=.uuid= - OR - namespace=.certificate= - OR - namespace=.secret= - OR - system.certificate.k8sRootCA - * - Degrade Affecting Severity: - - None - * - Severity: - - C - * - Proposed Repair Action - - Renew certificate for the entity identified. - * - Management_Affecting_Severity: - - none diff --git a/doc/source/fault-mgmt/kubernetes/750-series-alarm-messages.rst b/doc/source/fault-mgmt/kubernetes/750-series-alarm-messages.rst deleted file mode 100644 index ec6d3c228..000000000 --- a/doc/source/fault-mgmt/kubernetes/750-series-alarm-messages.rst +++ /dev/null @@ -1,118 +0,0 @@ - -.. cta1579702173704 -.. _750-series-alarm-messages: - -========================= -750 Series Alarm Messages -========================= - -The system inventory and maintenance service reports system changes with -different degrees of severity. Use the reported alarms to monitor the overall -health of the system. - -.. include:: /_includes/x00-series-alarm-messages.rest - -.. _750-series-alarm-messages-table-zrd-tg5-v5: - -.. list-table:: - :widths: 6 25 - :header-rows: 0 - - * - **Alarm ID: 750.001** - - Application upload failure. - * - Entity Instance - - k8s\_application= - * - Degrade Affecting Severity: - - None - * - Severity: - - W - * - Proposed Repair Action - - Check the system inventory log for the cause. - ------ - -.. list-table:: - :widths: 6 25 - :header-rows: 0 - - * - **Alarm ID: 750.002** - - Application apply failure. - * - Entity Instance - - k8s\_application= - * - Degrade Affecting Severity: - - None - * - Severity: - - M - * - Proposed Repair Action - - Retry applying the application. If the issue persists, please check the - system inventory log for cause. - ------ - -.. list-table:: - :widths: 6 25 - :header-rows: 0 - - * - **Alarm ID: 750.003** - - Application remove failure. - * - Entity Instance - - k8s\_application= - * - Degrade Affecting Severity: - - None - * - Severity: - - M - * - Proposed Repair Action - - Retry removing the application. If the issue persists, please the check - system inventory log for cause. - ------ - -.. list-table:: - :widths: 6 25 - :header-rows: 0 - - * - **Alarm ID: 750.004** - - Application apply in progress. - * - Entity Instance - - k8s\_application= - * - Degrade Affecting Severity: - - None - * - Severity: - - W - * - Proposed Repair Action - - No action is required. - ------ - -.. list-table:: - :widths: 6 25 - :header-rows: 0 - - * - **Alarm ID: 750.005** - - Application update in progress. - * - Entity Instance - - k8s\_application= - * - Degrade Affecting Severity: - - None - * - Severity: - - W - * - Proposed Repair Action - - No action is required. - ------ - -.. list-table:: - :widths: 6 25 - :header-rows: 0 - - * - **Alarm ID: 750.006** - - Automatic application re-apply is pending. - * - Entity Instance - - k8s\_application= - * - Degrade Affecting Severity: - - None - * - Severity: - - W - * - Proposed Repair Action - - Ensure all hosts are either locked or unlocked. When the system is - stable the application will automatically be reapplied. \ No newline at end of file diff --git a/doc/source/fault-mgmt/kubernetes/800-series-alarm-messages.rst b/doc/source/fault-mgmt/kubernetes/800-series-alarm-messages.rst deleted file mode 100644 index d0bd7e264..000000000 --- a/doc/source/fault-mgmt/kubernetes/800-series-alarm-messages.rst +++ /dev/null @@ -1,152 +0,0 @@ - -.. rww1579702317136 -.. _800-series-alarm-messages: - -========================= -800 Series Alarm Messages -========================= - -The system inventory and maintenance service reports system changes with -different degrees of severity. Use the reported alarms to monitor the overall -health of the system. - -.. include:: /_includes/x00-series-alarm-messages.rest - -.. _800-series-alarm-messages-table-zrd-tg5-v5: - -.. list-table:: - :widths: 6 25 - :header-rows: 0 - - * - **Alarm ID: 800.001** - - Storage Alarm Condition: - - 1 mons down, quorum 1,2 controller-1,storage-0 - * - Entity Instance - - cluster= - * - Degrade Affecting Severity: - - None - * - Severity: - - C/M\* - * - Proposed Repair Action - - If problem persists, contact next level of support. - -.. ----- -.. -.. .. list-table:: -.. :widths: 6 25 -.. :header-rows: 0 -.. -.. * - **Alarm ID: 800.003** -.. - Storage Alarm Condition: Quota/Space mismatch for the tier. -.. The sum of Ceph pool quotas does not match the tier size. -.. * - Entity Instance -.. - cluster=.tier= -.. * - Degrade Affecting Severity: -.. - None -.. * - Severity: -.. - m -.. * - Proposed Repair Action -.. - Update ceph storage pool quotas to use all available tier space. - ------ - -.. list-table:: - :widths: 6 25 - :header-rows: 0 - - * - **Alarm ID: 800.010** - - Potential data loss. No available OSDs in storage replication group. - * - Entity Instance - - cluster=.peergroup= - * - Degrade Affecting Severity: - - None - * - Severity: - - C\* - * - Proposed Repair Action - - Ensure storage hosts from replication group are unlocked and available. - Check if OSDs of each storage host are up and running. If problem - persists contact next level of support. - ------ - -.. list-table:: - :widths: 6 25 - :header-rows: 0 - - * - **Alarm ID: 800.011** - - Loss of replication in peergroup. - * - Entity Instance - - cluster=.peergroup= - * - Degrade Affecting Severity: - - None - * - Severity: - - M\* - * - Proposed Repair Action - - Ensure storage hosts from replication group are unlocked and available. - Check if OSDs of each storage host are up and running. If problem - persists contact next level of support. - ------ - -.. list-table:: - :widths: 6 25 - :header-rows: 0 - - * - **Alarm ID: 800.102** - - Storage Alarm Condition: - - PV configuration on . - Reason: . - * - Entity Instance - - pv= - * - Degrade Affecting Severity: - - None - * - Severity: - - C/M\* - * - Proposed Repair Action - - Remove failed PV and associated Storage Device then recreate them. - ------ - -.. list-table:: - :widths: 6 25 - :header-rows: 0 - - * - **Alarm ID: 800.103** - - Storage Alarm Condition: - - \[ Metadata usage for LVM thin pool / exceeded - threshold and automatic extension failed. - - Metadata usage for LVM thin pool / exceeded - threshold \]; threshold x%, actual y%. - * - Entity Instance - - .lvmthinpool=/ - * - Degrade Affecting Severity: - - None - * - Severity: - - C\* - * - Proposed Repair Action - - Increase Storage Space Allotment for Cinder on the 'lvm' backend. - Consult the user documentation for more details. If problem persists, - contact next level of support. - ------ - -.. list-table:: - :widths: 6 25 - :header-rows: 0 - - * - **Alarm ID: 800.104** - - Storage Alarm Condition: - - configuration failed to apply on host: . - * - Degrade Affecting Severity: - - None - * - Severity: - - C\* - * - Proposed Repair Action - - Update backend setting to reapply configuration. Consult the user - documentation for more details. If problem persists, contact next level - of support. \ No newline at end of file diff --git a/doc/source/fault-mgmt/kubernetes/900-series-alarm-messages.rst b/doc/source/fault-mgmt/kubernetes/900-series-alarm-messages.rst deleted file mode 100644 index 2217af0f6..000000000 --- a/doc/source/fault-mgmt/kubernetes/900-series-alarm-messages.rst +++ /dev/null @@ -1,356 +0,0 @@ - -.. pti1579702342696 -.. _900-series-alarm-messages: - -========================= -900 Series Alarm Messages -========================= - -The system inventory and maintenance service reports system changes with -different degrees of severity. Use the reported alarms to monitor the overall -health of the system. - -.. include:: /_includes/x00-series-alarm-messages.rest - -.. _900-series-alarm-messages-table-zrd-tg5-v5: - -.. list-table:: - :widths: 6 25 - :header-rows: 0 - - * - **Alarm ID: 900.001** - - Patching operation in progress. - * - Entity Instance - - host=controller - * - Degrade Affecting Severity: - - None - * - Severity: - - m\* - * - Proposed Repair Action - - Complete reboots of affected hosts. - ------ - -.. list-table:: - :widths: 6 25 - :header-rows: 0 - - * - **Alarm ID: 900.002** - - Patch host install failure. - * - Entity Instance - - host= - * - Degrade Affecting Severity: - - None - * - Severity: - - M\* - * - Proposed Repair Action - - Undo patching operation. - ------ - -.. list-table:: - :widths: 6 25 - :header-rows: 0 - - * - **Alarm ID: 900.003** - - Obsolete patch in system. - * - Entity Instance - - host=controller - * - Degrade Affecting Severity: - - None - * - Severity: - - W\* - * - Proposed Repair Action - - Remove and delete obsolete patches. - ------ - -.. list-table:: - :widths: 6 25 - :header-rows: 0 - - * - **Alarm ID: 900.004** - - Host version mismatch. - * - Entity Instance - - host= - * - Degrade Affecting Severity: - - None - * - Severity: - - M\* - * - Proposed Repair Action - - Reinstall host to update applied load. - ------ - -.. list-table:: - :widths: 6 25 - :header-rows: 0 - - * - **Alarm ID: 900.005** - - System Upgrade in progress. - * - Entity Instance - - host=controller - * - Degrade Affecting Severity: - - None - * - Severity: - - m\* - * - Proposed Repair Action - - No action required. - ------ - -.. list-table:: - :widths: 6 25 - :header-rows: 0 - - * - **Alarm ID: 900.008** - - Kubernetes rootca update in progress. - * - Entity Instance - - host=controller - * - Degrade Affecting Severity: - - None - * - Severity: - - m - * - Proposed Repair Action - - Wait for kubernetes rootca procedure to complete. - ------ - -.. list-table:: - :widths: 6 25 - :header-rows: 0 - - * - **Alarm ID: 900.009** - - Kubernetes root CA update aborted, certificates may not be fully updated. - * - Entity Instance - - host=controller - * - Degrade Affecting Severity: - - None - * - Severity: - - m - * - Management Affecting Severity: - - w - * - Proposed Repair Action - - Fully update certificates by a new root CA update. - ------ - -.. list-table:: - :widths: 6 25 - :header-rows: 0 - - * - **Alarm ID: 900.101** - - Software patch auto-apply in progress. - * - Entity Instance - - orchestration=sw-patch - * - Degrade Affecting Severity: - - None - * - Severity: - - M\* - * - Proposed Repair Action - - Wait for software patch auto-apply to complete; if problem persists - contact next level of support. - ------ - -.. list-table:: - :widths: 6 25 - :header-rows: 0 - - * - **Alarm ID: 900.102** - - Software patch auto-apply aborting. - * - Entity Instance - - orchestration=sw-patch - * - Degrade Affecting Severity: - - None - * - Severity: - - M\* - * - Proposed Repair Action - - Wait for software patch auto-apply abort to complete; if problem - persists contact next level of support. - ------ - -.. list-table:: - :widths: 6 25 - :header-rows: 0 - - * - **Alarm ID: 900.103** - - Software patch auto-apply failed. - * - Entity Instance - - orchestration=sw-patch - * - Degrade Affecting Severity: - - None - * - Severity: - - M\* - * - Proposed Repair Action - - Attempt to apply software patches manually; if problem persists contact - next level of support. - ------ - -.. list-table:: - :widths: 6 25 - :header-rows: 0 - - * - **Alarm ID: 900.201** - - Software upgrade auto-apply in progress. - * - Entity Instance - - orchestration=sw-upgrade - * - Degrade Affecting Severity: - - None - * - Severity: - - M\* - * - Proposed Repair Action - - Wait for software upgrade auto-apply to complete; if problem persists - contact next level of support. - ------ - -.. list-table:: - :widths: 6 25 - :header-rows: 0 - - * - **Alarm ID: 900.202** - - Software upgrade auto-apply aborting. - * - Entity Instance - - orchestration=sw-upgrade - * - Degrade Affecting Severity: - - None - * - Severity: - - M\* - * - Proposed Repair Action - - Wait for software upgrade auto-apply abort to complete; if problem - persists contact next level of support. - ------ - -.. list-table:: - :widths: 6 25 - :header-rows: 0 - - * - **Alarm ID: 900.203** - - Software update auto-apply failed. - * - Entity Instance - - orchestration=sw-upgrade - * - Degrade Affecting Severity: - - None - * - Severity: - - M\* - * - Proposed Repair Action - - Attempt to apply software upgrade manually; if problem persists contact - next level of support. - ------ - -.. list-table:: - :widths: 6 25 - :header-rows: 0 - - * - **Alarm ID: 900.301** - - Firmware Update auto-apply in progress. - * - Entity Instance - - orchestration=fw-upgrade - * - Degrade Affecting Severity: - - None - * - Severity: - - M\* - * - Proposed Repair Action - - Wait for firmware update auto-apply to complete; if problem persists - contact next level of support. - ------ - -.. list-table:: - :widths: 6 25 - :header-rows: 0 - - * - **Alarm ID: 900.302** - - Firmware Update auto-apply aborting. - * - Entity Instance - - orchestration=fw-upgrade - * - Degrade Affecting Severity: - - None - * - Severity: - - M\* - * - Proposed Repair Action - - Wait for firmware update auto-apply abort to complete; if problem - persists contact next level of support. - ------ - -.. list-table:: - :widths: 6 25 - :header-rows: 0 - - * - **Alarm ID: 900.303** - - Firmware Update auto-apply failed. - * - Entity Instance - - orchestration=fw-upgrade - * - Degrade Affecting Severity: - - None - * - Severity: - - M\* - * - Proposed Repair Action - - Attempt to apply firmware update manually; if problem persists - contact next level of support. - ------ - -.. list-table:: - :widths: 6 25 - :header-rows: 0 - - * - **Alarm ID: 900.501** - - Kubernetes rootca update auto-apply in progress - * - Entity Instance - - orchestration=kube-rootca-update - * - Degrade Affecting Severity: - - None - * - Severity: - - M - * - Management Affecting Severity: - - w - * - Proposed Repair Action - - Wait for kubernetes rootca update auto-apply to complete; if problem - persists contact next level of support. - ------ - -.. list-table:: - :widths: 6 25 - :header-rows: 0 - - * - **Alarm ID: 900.502** - - Kubernetes rootca update auto-apply aborting. - * - Entity Instance - - orchestration=kube-rootca-update - * - Degrade Affecting Severity: - - None - * - Severity: - - M - * - Management Affecting Severity: - - w - * - Proposed Repair Action - - Wait for kubernetes rootca update auto-apply abort to complete; if - problem persists contact next level of support. - ------ - -.. list-table:: - :widths: 6 25 - :header-rows: 0 - - * - **Alarm ID: 900.503** - - Kubernetes rootca update auto-apply failed. - * - Entity Instance - - orchestration=kube-rootca-update - * - Degrade Affecting Severity: - - None - * - Severity: - - C - * - Management Affecting Severity: - - w - * - Proposed Repair Action - - Attempt to apply kubernetes rootca update manually; if problem persists - contact next level of support. \ No newline at end of file diff --git a/doc/source/fault-mgmt/kubernetes/900-series-orchestration-customer-log-messages.rst b/doc/source/fault-mgmt/kubernetes/900-series-orchestration-customer-log-messages.rst deleted file mode 100644 index bbc454e68..000000000 --- a/doc/source/fault-mgmt/kubernetes/900-series-orchestration-customer-log-messages.rst +++ /dev/null @@ -1,211 +0,0 @@ - -.. bdq1579700719122 -.. _900-series-orchestration-customer-log-messages: - -============================================== -900 Series Orchestration Customer Log Messages -============================================== - -The Customer Logs include events that do not require immediate user action. - -The following types of events are included in the Customer Logs. The severity -of the events is represented in the table by one or more letters, as follows: - -.. _900-series-orchestration-customer-log-messages-ul-jsd-jkg-vp: - -- C: Critical - -- M: Major - -- m: Minor - -- W: Warning - -- NA: Not applicable - -.. _900-series-orchestration-customer-log-messages-table-zgf-jvw-v5: - -.. table:: Table 1. Customer Log Messages - :widths: auto - - +-------------------+-------------------------------------------------------+----------+ - | Log ID | Description | Severity | - + +-------------------------------------------------------+----------+ - | | Entity Instance ID | | - +===================+============================================+==========+==========+ - | 900.111 | Software update auto-apply start | C | - | | | | - | | orchestration=sw-update | | - +-------------------+-------------------------------------------------------+----------+ - | 900.112 | Software update auto-apply in progress | C | - | | | | - | | orchestration=sw-update | | - +-------------------+-------------------------------------------------------+----------+ - | 900.113 | Software update auto-apply rejected | C | - | | | | - | | orchestration=sw-update | | - +-------------------+-------------------------------------------------------+----------+ - | 900.114 | Software update auto-apply canceled | C | - | | | | - | | orchestration=sw-update | | - +-------------------+-------------------------------------------------------+----------+ - | 900.115 | Software update auto-apply failed | C | - | | | | - | | orchestration=sw-update | | - +-------------------+-------------------------------------------------------+----------+ - | 900.116 | Software update auto-apply completed | C | - | | | | - | | orchestration=sw-update | | - +-------------------+-------------------------------------------------------+----------+ - | 900.117 | Software update auto-apply abort | C | - | | | | - | | orchestration=sw-update | | - +-------------------+-------------------------------------------------------+----------+ - | 900.118 | Software update auto-apply aborting | C | - | | | | - | | orchestration=sw-update | | - +-------------------+-------------------------------------------------------+----------+ - | 900.119 | Software update auto-apply abort rejected | C | - | | | | - | | orchestration=sw-update | | - +-------------------+-------------------------------------------------------+----------+ - | 900.120 | Software update auto-apply abort failed | C | - | | | | - | | orchestration=sw-update | | - +-------------------+-------------------------------------------------------+----------+ - | 900.121 | Software update auto-apply aborted | C | - | | | | - | | orchestration=sw-update | | - +-------------------+-------------------------------------------------------+----------+ - | 900.211 | Software upgrade auto-apply start | C | - | | | | - | | orchestration=sw-upgrade | | - +-------------------+-------------------------------------------------------+----------+ - | 900.212 | Software upgrade auto-apply in progress | C | - | | | | - | | orchestration=sw-upgrade | | - +-------------------+-------------------------------------------------------+----------+ - | 900.213 | Software upgrade auto-apply rejected | C | - | | | | - | | orchestration=sw-upgrade | | - +-------------------+-------------------------------------------------------+----------+ - | 900.214 | Software upgrade auto-apply canceled | C | - | | | | - | | orchestration=sw-upgrade | | - +-------------------+-------------------------------------------------------+----------+ - | 900.215 | Software upgrade auto-apply failed | C | - | | | | - | | orchestration=sw-upgrade | | - +-------------------+-------------------------------------------------------+----------+ - | 900.216 | Software upgrade auto-apply completed | C | - | | | | - | | orchestration=sw-upgrade | | - +-------------------+-------------------------------------------------------+----------+ - | 900.217 | Software upgrade auto-apply abort | C | - | | | | - | | orchestration=sw-upgrade | | - +-------------------+-------------------------------------------------------+----------+ - | 900.218 | Software upgrade auto-apply aborting | C | - | | | | - | | orchestration=sw-upgrade | | - +-------------------+-------------------------------------------------------+----------+ - | 900.219 | Software upgrade auto-apply abort rejected | C | - | | | | - | | orchestration=sw-upgrade | | - +-------------------+-------------------------------------------------------+----------+ - | 900.220 | Software upgrade auto-apply abort failed | C | - | | | | - | | orchestration=sw-upgrade | | - +-------------------+-------------------------------------------------------+----------+ - | 900.221 | Software upgrade auto-apply aborted | C | - | | | | - | | orchestration=sw-upgrade | | - +-------------------+-------------------------------------------------------+----------+ - | 900.311 | Firmware update auto-apply | C | - | | | | - | | orchestration=fw-update | | - +-------------------+-------------------------------------------------------+----------+ - | 900.312 | Firmware update auto-apply in progress | C | - | | | | - | | orchestration=fw-update | | - +-------------------+-------------------------------------------------------+----------+ - | 900.313 | Firmware update auto-apply rejected | C | - | | | | - | | orchestration=fw-update | | - +-------------------+-------------------------------------------------------+----------+ - | 900.314 | Firmware update auto-apply canceled | C | - | | | | - | | orchestration=fw-update | | - +-------------------+-------------------------------------------------------+----------+ - | 900.315 | Firmware update auto-apply failed | C | - | | | | - | | orchestration=fw-update | | - +-------------------+-------------------------------------------------------+----------+ - | 900.316 | Firmware update auto-apply completed | C | - | | | | - | | orchestration=fw-update | | - +-------------------+-------------------------------------------------------+----------+ - | 900.317 | Firmware update auto-apply aborted | C | - | | | | - | | orchestration=fw-update | | - +-------------------+-------------------------------------------------------+----------+ - | 900.318 | Firmware update auto-apply aborting | C | - | | | | - | | orchestration=fw-update | | - +-------------------+-------------------------------------------------------+----------+ - | 900.319 | Firmware update auto-apply abort rejected | C | - | | | | - | | orchestration=fw-update | | - +-------------------+-------------------------------------------------------+----------+ - | 900.320 | Firmware update auto-apply abort failed | C | - | | | | - | | orchestration=fw-update | | - +-------------------+-------------------------------------------------------+----------+ - | 900.321 | Firmware update auto-apply aborted | C | - | | | | - | | orchestration=fw-update | | - +-------------------+-------------------------------------------------------+----------+ - | 900.511 | Kubernetes rootca update auto-apply start | C | - | | | | - | | orchestration=kube-rootca-update | | - +-------------------+-------------------------------------------------------+----------+ - | 900.512 | Kubernetes rootca update auto-apply in progress | C | - | | | | - | | orchestration=kube-rootca-update | | - +-------------------+-------------------------------------------------------+----------+ - | 900.513 | Firmware Kubernetes rootca update auto-apply rejected | C | - | | | | - | | orchestration=kube-rootca-update | | - +-------------------+-------------------------------------------------------+----------+ - | 900.514 | Kubernetes rootca update auto-apply cancelled | C | - | | | | - | | orchestration=kube-rootca-update | | - +-------------------+-------------------------------------------------------+----------+ - | 900.515 | Kubernetes rootca update auto-apply failed | C | - | | | | - | | orchestration=kube-rootca-update | | - +-------------------+-------------------------------------------------------+----------+ - | 900.516 | Kubernetes rootca update auto-apply completed | C | - | | | | - | | orchestration=kube-rootca-update | | - +-------------------+-------------------------------------------------------+----------+ - | 900.517 | Kubernetes rootca update auto-apply abort | C | - | | | | - | | orchestration=kube-rootca-update | | - +-------------------+-------------------------------------------------------+----------+ - | 900.518 | Kubernetes rootca update auto-apply aborting | C | - | | | | - | | orchestration=kube-rootca-update | | - +-------------------+-------------------------------------------------------+----------+ - | 900.519 | Kubernetes rootca update auto-apply abort rejected | C | - | | | | - | | orchestration=kube-rootca-update | | - +-------------------+-------------------------------------------------------+----------+ - | 900.520 | Kubernetes rootca update auto-apply abort failed | C | - | | | | - | | orchestration=kube-rootca-update | | - +-------------------+-------------------------------------------------------+----------+ - | 900.521 | Kubernetes rootca update auto-apply aborted | C | - | | | | - | | orchestration=kube-rootca-update | | - +-------------------+-------------------------------------------------------+----------+ \ No newline at end of file diff --git a/doc/source/fault-mgmt/kubernetes/alarm-messages-overview-19c242d3d151.rst b/doc/source/fault-mgmt/kubernetes/alarm-messages-overview-19c242d3d151.rst new file mode 100644 index 000000000..39a5ef256 --- /dev/null +++ b/doc/source/fault-mgmt/kubernetes/alarm-messages-overview-19c242d3d151.rst @@ -0,0 +1,7 @@ +.. _alarm-messages-overview-19c242d3d151: + +======================= +Alarm Messages Overview +======================= + +.. include:: /shared/_includes/alarm-messages-overview-9d087b0170cf.rest diff --git a/doc/source/fault-mgmt/kubernetes/fault-management-overview.rst b/doc/source/fault-mgmt/kubernetes/fault-management-overview.rst index 17ccea70b..60d0c59b6 100644 --- a/doc/source/fault-mgmt/kubernetes/fault-management-overview.rst +++ b/doc/source/fault-mgmt/kubernetes/fault-management-overview.rst @@ -9,19 +9,17 @@ Fault Management Overview An admin user can view |prod-long| fault management alarms and logs in order to monitor and respond to fault conditions. -See :ref:`Alarm Messages <100-series-alarm-messages>` for the list of +See :ref:`Alarm Messages <100-series-alarm-messages-starlingx>` for the list of alarms and :ref:`Customer Log Messages -<200-series-maintenance-customer-log-messages>` +<200-series-log-messages-starlingx>` for the list of customer logs reported by |prod|. You can access active and historical alarms, and customer logs using the CLI, GUI, REST APIs and |SNMP|. -To use the CLI, see -:ref:`Viewing Active Alarms Using the CLI -` -and :ref:`Viewing the Event Log Using the CLI -`. +To use the CLI, see :ref:`Viewing Active Alarms Using the CLI +` and :ref:`Viewing the Event Log Using the +CLI `. Using the GUI, you can obtain fault management information in a number of places. @@ -38,11 +36,9 @@ places. - **Admin** \> **Fault Management** \> **Active Alarms**—Alarms that are currently set, and require user action to clear them. For more - information about active alarms, see - :ref:`Viewing Active Alarms Using the CLI - ` - and :ref:`Deleting an Alarm Using the CLI - `. + information about active alarms, see :ref:`Viewing Active Alarms Using + the CLI ` and :ref:`Deleting an + Alarm Using the CLI `. - **Admin** \> **Fault Management** \> **Events**—The event log consolidates historical alarms that have occurred in the past, that @@ -50,8 +46,7 @@ places. logs. For more about the event log, which includes historical alarms and - customer logs, see - :ref:`Viewing the Event Log Using Horizon + customer logs, see :ref:`Viewing the Event Log Using Horizon `. - **Admin** \> **Fault Management** \> **Events Suppression**—Individual @@ -66,4 +61,4 @@ places. .. xreflink For more information, see |datanet-doc|: :ref:`The Data Network Topology View `. -To use SNMP, see :ref:`SNMP Overview `. \ No newline at end of file +To use SNMP, see :ref:`SNMP Overview `. diff --git a/doc/source/fault-mgmt/kubernetes/index-fault-kub-f45ef76b6f16.rst b/doc/source/fault-mgmt/kubernetes/index-fault-kub-f45ef76b6f16.rst index 1d613b2b9..c1460d627 100644 --- a/doc/source/fault-mgmt/kubernetes/index-fault-kub-f45ef76b6f16.rst +++ b/doc/source/fault-mgmt/kubernetes/index-fault-kub-f45ef76b6f16.rst @@ -108,14 +108,13 @@ Alarm messages .. toctree:: :maxdepth: 1 - 100-series-alarm-messages - 200-series-alarm-messages - 300-series-alarm-messages - 400-series-alarm-messages - 500-series-alarm-messages - 750-series-alarm-messages - 800-series-alarm-messages - 900-series-alarm-messages + alarm-messages-overview-19c242d3d151 + +.. toctree:: + :maxdepth: 1 + :glob: + + *-series-alarm-messages ************ Log messages @@ -124,6 +123,10 @@ Log messages .. toctree:: :maxdepth: 1 - 200-series-maintenance-customer-log-messages - 400-series-customer-log-messages - 900-series-orchestration-customer-log-messages + log-entries-overview-597c2c453680 + +.. toctree:: + :maxdepth: 1 + :glob: + + *-series-log-messages diff --git a/doc/source/fault-mgmt/kubernetes/log-entries-overview-597c2c453680.rst b/doc/source/fault-mgmt/kubernetes/log-entries-overview-597c2c453680.rst new file mode 100644 index 000000000..c9cf51c63 --- /dev/null +++ b/doc/source/fault-mgmt/kubernetes/log-entries-overview-597c2c453680.rst @@ -0,0 +1,9 @@ +.. _log-entries-overview-597c2c453680: + +==================== +Log Entries Overview +==================== + +.. include:: /shared/_includes/log-entries-overview-6728006a298f.rest + + diff --git a/doc/source/fault-mgmt/openstack/alarm-messages-overview-a250f214f65a.rst b/doc/source/fault-mgmt/openstack/alarm-messages-overview-a250f214f65a.rst new file mode 100644 index 000000000..74edde289 --- /dev/null +++ b/doc/source/fault-mgmt/openstack/alarm-messages-overview-a250f214f65a.rst @@ -0,0 +1,7 @@ +.. _alarm-messages-overview-a250f214f65a: + +======================= +Alarm Messages Overview +======================= + +.. include:: /shared/_includes/alarm-messages-overview-9d087b0170cf.rest diff --git a/doc/source/fault-mgmt/openstack/index-fault-os-a1a5cae095b3.rst b/doc/source/fault-mgmt/openstack/index-fault-os-a1a5cae095b3.rst index ef09ccc4c..877b22d5c 100644 --- a/doc/source/fault-mgmt/openstack/index-fault-os-a1a5cae095b3.rst +++ b/doc/source/fault-mgmt/openstack/index-fault-os-a1a5cae095b3.rst @@ -21,10 +21,14 @@ OpenStack alarm messages .. toctree:: :maxdepth: 1 - openstack-alarm-messages-300s - openstack-alarm-messages-400s - openstack-alarm-messages-700s - openstack-alarm-messages-800s + alarm-messages-overview-a250f214f65a + +.. toctree:: + :maxdepth: 1 + :glob: + + *-series-alarm-messages + ******************************* OpenStack customer log messages @@ -33,6 +37,10 @@ OpenStack customer log messages .. toctree:: :maxdepth: 1 - openstack-customer-log-messages-270s-virtual-machines - openstack-customer-log-messages-401s-services - openstack-customer-log-messages-700s-virtual-machines + log-entries-overview-4f1dde7286c2 + +.. toctree:: + :maxdepth: 1 + :glob: + + *-series-log-messages diff --git a/doc/source/fault-mgmt/openstack/log-entries-overview-4f1dde7286c2.rst b/doc/source/fault-mgmt/openstack/log-entries-overview-4f1dde7286c2.rst new file mode 100644 index 000000000..dae5a9adc --- /dev/null +++ b/doc/source/fault-mgmt/openstack/log-entries-overview-4f1dde7286c2.rst @@ -0,0 +1,8 @@ +.. _log-entries-overview-4f1dde7286c2: + +==================== +Log Entries Overview +==================== + +.. include:: /shared/_includes/log-entries-overview-6728006a298f.rest + diff --git a/doc/source/fault-mgmt/openstack/openstack-alarm-messages-300s.rst b/doc/source/fault-mgmt/openstack/openstack-alarm-messages-300s.rst deleted file mode 100644 index aa2243576..000000000 --- a/doc/source/fault-mgmt/openstack/openstack-alarm-messages-300s.rst +++ /dev/null @@ -1,110 +0,0 @@ - -.. slf1579788051430 -.. _alarm-messages-300s: - -===================== -Alarm Messages - 300s -===================== - -The system inventory and maintenance service reports system changes with -different degrees of severity. Use the reported alarms to monitor the overall -health of the system. - -For more information, see :ref:`Overview -`. - -In the following tables, the severity of the alarms is represented by one or -more letters, as follows: - -.. _alarm-messages-300s-ul-jsd-jkg-vp: - -- C: Critical - -- M: Major - -- m: Minor - -- W: Warning - -A slash-separated list of letters is used when the alarm can be triggered with -one of several severity levels. - -An asterisk \(\*\) indicates the management-affecting severity, if any. A -management-affecting alarm is one that cannot be ignored at the indicated -severity level or higher by using relaxed alarm rules during an orchestrated -patch or upgrade operation. - -Differences exist between the terminology emitted by some alarms and that used -in the |CLI|, GUI, and elsewhere in the documentation: - -.. _alarm-messages-300s-ul-dsf-dxn-bhb: - -- References to provider networks in alarms refer to data networks. - -- References to data networks in alarms refer to physical networks. - -- References to tenant networks in alarms refer to project networks. - - -.. _alarm-messages-300s-table-zrd-tg5-v5: - -.. table:: Table 1. Alarm Messages - :widths: auto - - +----------+-------------------------------------------------------------------------------------+----------+---------------------------------------------------------------------------------------------------+ - | Alarm ID | Description | Severity | Proposed Repair Action | - + +-------------------------------------------------------------------------------------+----------+---------------------------------------------------------------------------------------------------+ - | | Entity Instance ID | - +==========+=====================================================================================+==========+===================================================================================================+ - | 300.003 | Networking Agent not responding. | M\* | If condition persists, attempt to clear issue by administratively locking and unlocking the Host. | - + +-------------------------------------------------------------------------------------+----------+---------------------------------------------------------------------------------------------------+ - | | host=.agent= | - +----------+-------------------------------------------------------------------------------------+----------+---------------------------------------------------------------------------------------------------+ - | 300.004 | No enabled compute host with connectivity to provider network. | M\* | Enable compute hosts with required provider network connectivity. | - + +-------------------------------------------------------------------------------------+----------+---------------------------------------------------------------------------------------------------+ - | | service=networking.providernet= | - +----------+-------------------------------------------------------------------------------------+----------+---------------------------------------------------------------------------------------------------+ - | 300.005 | Communication failure detected over provider network x% for ranges y% on host z%. | M\* | Check neighbour switch port VLAN assignments. | - | | | | | - | | or | | | - | | | | | - | | Communication failure detected over provider network x% on host z%. | | | - + +-------------------------------------------------------------------------------------+----------+---------------------------------------------------------------------------------------------------+ - | | host=.service=networking.providernet= | - +----------+-------------------------------------------------------------------------------------+----------+---------------------------------------------------------------------------------------------------+ - | 300.010 | ML2 Driver Agent non-reachable | M\* | Monitor and if condition persists, contact next level of support. | - | | | | | - | | or | | | - | | | | | - | | ML2 Driver Agent reachable but non-responsive | | | - | | | | | - | | or | | | - | | | | | - | | ML2 Driver Agent authentication failure | | | - | | | | | - | | or | | | - | | | | | - | | ML2 Driver Agent is unable to sync Neutron database | | | - + +-------------------------------------------------------------------------------------+----------+---------------------------------------------------------------------------------------------------+ - | | host=.ml2driver= | - +----------+-------------------------------------------------------------------------------------+----------+---------------------------------------------------------------------------------------------------+ - | 300.012 | Openflow Controller connection failed. | M\* | Check cabling and far-end port configuration and status on adjacent equipment. | - + +-------------------------------------------------------------------------------------+----------+---------------------------------------------------------------------------------------------------+ - | | host=.openflow-controller= | - +----------+-------------------------------------------------------------------------------------+----------+---------------------------------------------------------------------------------------------------+ - | 300.013 | No active Openflow controller connections found for this network. | C, M\* | Check cabling and far-end port configuration and status on adjacent equipment. | - | | | | | - | | or | | | - | | | | | - | | One or more Openflow controller connections in disconnected state for this network. | | | - + +-------------------------------------------------------------------------------------+----------+---------------------------------------------------------------------------------------------------+ - | | host=.openflow-network= | - +----------+-------------------------------------------------------------------------------------+----------+---------------------------------------------------------------------------------------------------+ - | 300.015 | No active OVSDB connections found. | C\* | Check cabling and far-end port configuration and status on adjacent equipment. | - + +-------------------------------------------------------------------------------------+----------+---------------------------------------------------------------------------------------------------+ - | | host= | - +----------+-------------------------------------------------------------------------------------+----------+---------------------------------------------------------------------------------------------------+ - | 300.016 | Dynamic routing agent x% lost connectivity to peer y% | M\* | If condition persists, fix connectivity to peer. | - + +-------------------------------------------------------------------------------------+----------+---------------------------------------------------------------------------------------------------+ - | | host=,agent=,bgp-peer= | - +----------+-------------------------------------------------------------------------------------+----------+---------------------------------------------------------------------------------------------------+ diff --git a/doc/source/fault-mgmt/openstack/openstack-alarm-messages-400s.rst b/doc/source/fault-mgmt/openstack/openstack-alarm-messages-400s.rst deleted file mode 100644 index 4be214b3c..000000000 --- a/doc/source/fault-mgmt/openstack/openstack-alarm-messages-400s.rst +++ /dev/null @@ -1,47 +0,0 @@ - -.. msm1579788069384 -.. _alarm-messages-400s: - -===================== -Alarm Messages - 400s -===================== - -.. include:: /_includes/openstack-alarm-messages-xxxs.rest - -.. _alarm-messages-400s-table-zrd-tg5-v5: - -.. table:: Table 1. Alarm Messages - :widths: auto - - +----------+------------------------------------------------------------------------------------------------------------------+----------+----------------------------------------------------------------------------------------------------+ - | Alarm ID | Description | Severity | Proposed Repair Action | - + +------------------------------------------------------------------------------------------------------------------+----------+----------------------------------------------------------------------------------------------------+ - | | Entity Instance ID | - +==========+==================================================================================================================+==========+====================================================================================================+ - | 400.001 | Service group failure; . | C/M/m\* | Contact next level of support. | - | | | | | - | | or | | | - | | | | | - | | Service group degraded; . | | | - | | | | | - | | or | | | - | | | | | - | | Service group warning; . | | | - | | | | | - + +------------------------------------------------------------------------------------------------------------------+----------+----------------------------------------------------------------------------------------------------+ - | | service_domain=.service_group=.host= | - +----------+------------------------------------------------------------------------------------------------------------------+----------+----------------------------------------------------------------------------------------------------+ - | 400.002 | Service group loss of redundancy; expected standby member but only standby member available. | M\* | Bring a controller node back in to service, otherwise contact next level of support. | - | | | | | - | | or | | | - | | | | | - | | Service group loss of redundancy; expected active member but no active members available. | | | - | | | | | - | | or | | | - | | | | | - | | Service group loss of redundancy; expected active member but only active member available. | | | - | | | | | - + +------------------------------------------------------------------------------------------------------------------+----------+----------------------------------------------------------------------------------------------------+ - | | service_domain=.service_group= | - +----------+------------------------------------------------------------------------------------------------------------------+----------+----------------------------------------------------------------------------------------------------+ - diff --git a/doc/source/fault-mgmt/openstack/openstack-alarm-messages-700s.rst b/doc/source/fault-mgmt/openstack/openstack-alarm-messages-700s.rst deleted file mode 100644 index beb3bd739..000000000 --- a/doc/source/fault-mgmt/openstack/openstack-alarm-messages-700s.rst +++ /dev/null @@ -1,87 +0,0 @@ - -.. uxo1579788086872 -.. _alarm-messages-700s: - -===================== -Alarm Messages - 700s -===================== - -.. include:: /_includes/openstack-alarm-messages-xxxs.rest - -.. _alarm-messages-700s-table-zrd-tg5-v5: - -.. table:: Table 1. Alarm Messages - :widths: auto - - +----------+---------------------------------------------------------------------------------------------------------------------+----------+-----------------------------------------------------------------------------------------------------------+ - | Alarm ID | Description | Severity | Proposed Repair Action | - + +---------------------------------------------------------------------------------------------------------------------+----------+-----------------------------------------------------------------------------------------------------------+ - | | Entity Instance ID | - +==========+=====================================================================================================================+==========+===========================================================================================================+ - | 700.001 | Instance owned by has failed on host . | C\* | The system will attempt recovery; no repair action required. | - | | | | | - | | Instance owned by has failed to schedule. | | | - | | | | | - + +---------------------------------------------------------------------------------------------------------------------+----------+-----------------------------------------------------------------------------------------------------------+ - | | tenant=.instance= | - +----------+---------------------------------------------------------------------------------------------------------------------+----------+-----------------------------------------------------------------------------------------------------------+ - | 700.002 | Instance owned by is paused on host . | C\* | Unpause the instance. | - + +---------------------------------------------------------------------------------------------------------------------+----------+-----------------------------------------------------------------------------------------------------------+ - | | tenant=.instance= | - +----------+---------------------------------------------------------------------------------------------------------------------+----------+-----------------------------------------------------------------------------------------------------------+ - | 700.003 | Instance owned by is suspended on host . | C\* | Resume the instance. | - + +---------------------------------------------------------------------------------------------------------------------+----------+-----------------------------------------------------------------------------------------------------------+ - | | tenant=.instance= | - +----------+---------------------------------------------------------------------------------------------------------------------+----------+-----------------------------------------------------------------------------------------------------------+ - | 700.004 | Instance owned by is stopped on host . | C\* | Start the instance. | - + +---------------------------------------------------------------------------------------------------------------------+----------+-----------------------------------------------------------------------------------------------------------+ - | | tenant=.instance= | - +----------+---------------------------------------------------------------------------------------------------------------------+----------+-----------------------------------------------------------------------------------------------------------+ - | 700.005 | Instance owned by is rebooting on host . | C\* | Wait for reboot to complete; if problem persists contact next level of support. | - + +---------------------------------------------------------------------------------------------------------------------+----------+-----------------------------------------------------------------------------------------------------------+ - | | tenant=.instance= | - +----------+---------------------------------------------------------------------------------------------------------------------+----------+-----------------------------------------------------------------------------------------------------------+ - | 700.006 | Instance owned by is rebuilding on host . | C\* | Wait for rebuild to complete; if problem persists contact next level of support. | - + +---------------------------------------------------------------------------------------------------------------------+----------+-----------------------------------------------------------------------------------------------------------+ - | | tenant=.instance= | - +----------+---------------------------------------------------------------------------------------------------------------------+----------+-----------------------------------------------------------------------------------------------------------+ - | 700.007 | Instance owned by is evacuating from host . | C\* | Wait for evacuate to complete; if problem persists contact next level of support. | - + +---------------------------------------------------------------------------------------------------------------------+----------+-----------------------------------------------------------------------------------------------------------+ - | | tenant=.instance= | - +----------+---------------------------------------------------------------------------------------------------------------------+----------+-----------------------------------------------------------------------------------------------------------+ - | 700.008 | Instance owned by is live migrating from host . | W\* | Wait for live migration to complete; if problem persists contact next level of support. | - + +---------------------------------------------------------------------------------------------------------------------+----------+-----------------------------------------------------------------------------------------------------------+ - | | tenant=.instance= | - +----------+---------------------------------------------------------------------------------------------------------------------+----------+-----------------------------------------------------------------------------------------------------------+ - | 700.009 | Instance owned by is cold migrating from host . | C\* | Wait for cold migration to complete; if problem persists contact next level of support. | - + +---------------------------------------------------------------------------------------------------------------------+----------+-----------------------------------------------------------------------------------------------------------+ - | | tenant=.instance= | - +----------+---------------------------------------------------------------------------------------------------------------------+----------+-----------------------------------------------------------------------------------------------------------+ - | 700.010 | Instance owned by has been cold-migrated to host waiting for confirmation.| C\* | Confirm or revert cold-migrate of instance. | - + +---------------------------------------------------------------------------------------------------------------------+----------+-----------------------------------------------------------------------------------------------------------+ - | | tenant=.instance= | - +----------+---------------------------------------------------------------------------------------------------------------------+----------+-----------------------------------------------------------------------------------------------------------+ - | 700.011 | Instance owned by is reverting cold migrate to host . | C\* | Wait for cold migration revert to complete; if problem persists contact next level of support. | - + +---------------------------------------------------------------------------------------------------------------------+----------+-----------------------------------------------------------------------------------------------------------+ - | | tenant=.instance= | - +----------+---------------------------------------------------------------------------------------------------------------------+----------+-----------------------------------------------------------------------------------------------------------+ - | 700.012 | Instance owned by is resizing on host . | C\* | Wait for resize to complete; if problem persists contact next level of support. | - + +---------------------------------------------------------------------------------------------------------------------+----------+-----------------------------------------------------------------------------------------------------------+ - | | tenant=.instance= | - +----------+---------------------------------------------------------------------------------------------------------------------+----------+-----------------------------------------------------------------------------------------------------------+ - | 700.013 | Instance owned by has been resized on host waiting for confirmation. | C\* | Confirm or revert resize of instance. | - + +---------------------------------------------------------------------------------------------------------------------+----------+-----------------------------------------------------------------------------------------------------------+ - | | tenant=.instance= | - +----------+---------------------------------------------------------------------------------------------------------------------+----------+-----------------------------------------------------------------------------------------------------------+ - | 700.014 | Instance owned by is reverting resize on host . | C\* | Wait for resize revert to complete; if problem persists contact next level of support. | - + +---------------------------------------------------------------------------------------------------------------------+----------+-----------------------------------------------------------------------------------------------------------+ - | | tenant=.instance= | - +----------+---------------------------------------------------------------------------------------------------------------------+----------+-----------------------------------------------------------------------------------------------------------+ - | 700.016 | Multi-Node Recovery Mode. | m\* |Wait for the system to exit out of this mode. | - + +---------------------------------------------------------------------------------------------------------------------+----------+-----------------------------------------------------------------------------------------------------------+ - | | subsystem=vim | - +----------+---------------------------------------------------------------------------------------------------------------------+----------+-----------------------------------------------------------------------------------------------------------+ - | 700.017 | Server group policy was not satisfied. | m\* | Migrate instances in an attempt to satisfy the policy; if problem persists contact next level of support. | - + +---------------------------------------------------------------------------------------------------------------------+----------+-----------------------------------------------------------------------------------------------------------+ - | | server-group | - +----------+---------------------------------------------------------------------------------------------------------------------+----------+-----------------------------------------------------------------------------------------------------------+ diff --git a/doc/source/fault-mgmt/openstack/openstack-alarm-messages-800s.rst b/doc/source/fault-mgmt/openstack/openstack-alarm-messages-800s.rst deleted file mode 100644 index 954f5a77d..000000000 --- a/doc/source/fault-mgmt/openstack/openstack-alarm-messages-800s.rst +++ /dev/null @@ -1,81 +0,0 @@ - -.. tsh1579788106505 -.. _alarm-messages-800s: - -===================== -Alarm Messages - 800s -===================== - -.. include:: /_includes/openstack-alarm-messages-xxxs.rest - -.. _alarm-messages-800s-table-zrd-tg5-v5: - -.. table:: Table 1. Alarm Messages - :widths: auto - - +----------+-------------------------------------------------------------------------------------------------------------------------------------+----------+-----------------------------------------------------------------------------------------------------------+ - | Alarm ID | Description | Severity | Proposed Repair Action | - + +-------------------------------------------------------------------------------------------------------------------------------------+----------+-----------------------------------------------------------------------------------------------------------+ - | | Entity Instance ID | - +==========+=====================================================================================================================================+==========+===========================================================================================================+ - | 800.002 | Image storage media is full: There is not enough disk space on the image storage media. | W\* | If problem persists, contact next level of support. | - | | | | | - | | or | | | - | | | | | - | | Instance snapshot failed: There is not enough disk space on the image storage media. | | | - | | | | | - | | or | | | - | | | | | - | | Supplied () and generated from uploaded image () did not match. Setting image status to 'killed'. | | | - | | | | | - | | or | | | - | | | | | - | | Error in store configuration. Adding images to store is disabled. | | | - | | | | | - | | or | | | - | | | | | - | | Forbidden upload attempt: . | | | - | | | | | - | | or | | | - | | | | | - | | Insufficient permissions on image storage media: . | | | - | | | | | - | | or | | | - | | | | | - | | Denying attempt to upload image larger than bytes. | | | - | | | | | - | | or | | | - | | | | | - | | Denying attempt to upload image because it exceeds the quota: . | | | - | | | | | - | | or | | | - | | | | | - | | Received HTTP error while uploading image . | | | - | | | | | - | | or | | | - | | | | | - | | Client disconnected before sending all data to backend. | | | - | | | | | - | | or | | | - | | | | | - | | Failed to upload image . | | | - | | | | | - + +-------------------------------------------------------------------------------------------------------------------------------------+----------+-----------------------------------------------------------------------------------------------------------+ - | | image=, instance= | - | | | - | | or | - | | | - | | tenant=, instance= | - +----------+-------------------------------------------------------------------------------------------------------------------------------------+----------+-----------------------------------------------------------------------------------------------------------+ - | 800.100 | Storage Alarm Condition: | M\* | Reduce the I/O load on the Cinder LVM backend. Use Cinder QoS mechanisms on high usage volumes. | - | | | | | - | | Cinder I/O Congestion is above normal range and is building. | | | - + +-------------------------------------------------------------------------------------------------------------------------------------+----------+-----------------------------------------------------------------------------------------------------------+ - | | cinder_io_monitor | - +----------+-------------------------------------------------------------------------------------------------------------------------------------+----------+-----------------------------------------------------------------------------------------------------------+ - | 800.101 | Storage Alarm Condition: | C\* | Reduce the I/O load on the Cinder LVM backend. Cinder actions may fail until congestion is reduced. | - | | | | Use Cinder QoS mechanisms on high usage volumes. | - | | Cinder I/O Congestion is high and impacting guest performance. | | | - + +-------------------------------------------------------------------------------------------------------------------------------------+----------+-----------------------------------------------------------------------------------------------------------+ - | | cinder_io_monitor | - +----------+-------------------------------------------------------------------------------------------------------------------------------------+----------+-----------------------------------------------------------------------------------------------------------+ diff --git a/doc/source/fault-mgmt/openstack/openstack-customer-log-messages-270s-virtual-machines.rst b/doc/source/fault-mgmt/openstack/openstack-customer-log-messages-270s-virtual-machines.rst deleted file mode 100644 index b4dccccf9..000000000 --- a/doc/source/fault-mgmt/openstack/openstack-customer-log-messages-270s-virtual-machines.rst +++ /dev/null @@ -1,38 +0,0 @@ - -.. ftb1579789103703 -.. _customer-log-messages-270s-virtual-machines: - -============================================= -Customer Log Messages 270s - Virtual Machines -============================================= - -.. include:: /_includes/openstack-customer-log-messages-xxxs.rest - -.. _customer-log-messages-270s-virtual-machines-table-zgf-jvw-v5: - -.. table:: Table 1. Customer Log Messages - Virtual Machines - :widths: auto - - +-----------+----------------------------------------------------------------------------------+----------+ - | Log ID | Description | Severity | - + +----------------------------------------------------------------------------------+----------+ - | | Entity Instance ID | | - +===========+==================================================================================+==========+ - | 270.101 | Host compute services failure\[, reason = \] | C | - | | | | - | | tenant=.instance= | | - +-----------+----------------------------------------------------------------------------------+----------+ - | 270.102 | Host compute services enabled | C | - | | | | - | | tenant=.instance= | | - +-----------+----------------------------------------------------------------------------------+----------+ - | 270.103 | Host compute services disabled | C | - | | | | - | | tenant=.instance= | | - +-----------+----------------------------------------------------------------------------------+----------+ - | 275.001 | Host hypervisor is now - | C | - | | | | - | | tenant=.instance= | | - +-----------+----------------------------------------------------------------------------------+----------+ - -See also :ref:`Customer Log Messages 700s - Virtual Machines ` diff --git a/doc/source/fault-mgmt/openstack/openstack-customer-log-messages-401s-services.rst b/doc/source/fault-mgmt/openstack/openstack-customer-log-messages-401s-services.rst deleted file mode 100644 index b3fdb3078..000000000 --- a/doc/source/fault-mgmt/openstack/openstack-customer-log-messages-401s-services.rst +++ /dev/null @@ -1,40 +0,0 @@ - -.. hwr1579789203684 -.. _customer-log-messages-401s-services: - -===================================== -Customer Log Messages 401s - Services -===================================== - -.. include:: /_includes/openstack-customer-log-messages-xxxs.rest - -.. _customer-log-messages-401s-services-table-zgf-jvw-v5: - -.. table:: Table 1. Customer Log Messages - Virtual Machines - :widths: auto - - +-----------+--------------------------------------------------------------------------------------------------------------------+----------+ - | Log ID | Description | Severity | - + +--------------------------------------------------------------------------------------------------------------------+----------+ - | | Entity Instance ID | | - +===========+====================================================================================================================+==========+ - | 401.001 | Service group state change from to on host | C | - | | | | - | | service_domain=.service_group=.host= | | - +-----------+--------------------------------------------------------------------------------------------------------------------+----------+ - | 401.002 | Service group loss of redundancy; expected standby member but no standby members available | C | - | | | | - | | or | | - | | | | - | | Service group loss of redundancy; expected standby member but only standby member(s) available | | - | | | | - | | or | | - | | | | - | | Service group has no active members available; expected active member(s). | | - | | | | - | | or | | - | | | | - | | Service group loss of redundancy; expected active member(s) but only active member(s) available. | | - | | | | - | | service_domain=.service_group= | | - +-----------+--------------------------------------------------------------------------------------------------------------------+----------+ diff --git a/doc/source/fault-mgmt/openstack/openstack-customer-log-messages-700s-virtual-machines.rst b/doc/source/fault-mgmt/openstack/openstack-customer-log-messages-700s-virtual-machines.rst deleted file mode 100644 index 28a50d318..000000000 --- a/doc/source/fault-mgmt/openstack/openstack-customer-log-messages-700s-virtual-machines.rst +++ /dev/null @@ -1,480 +0,0 @@ - -.. qfy1579789227230 -.. _customer-log-messages-700s-virtual-machines: - -============================================= -Customer Log Messages 700s - Virtual Machines -============================================= - -.. include:: /_includes/openstack-customer-log-messages-xxxs.rest - -.. _customer-log-messages-700s-virtual-machines-table-zgf-jvw-v5: - -.. table:: Table 1. Customer Log Messages - :widths: auto - - +----------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+----------+ - | Log ID | Description | Severity | - + +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+----------+ - | | Entity Instance ID | | - +==========+====================================================================================================================================================================================+==========+ - | 700.101 | Instance is enabled on host | C | - | | | | - | | tenant=.instance= | | - +----------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+----------+ - | 700.102 | Instance owned by has failed\[, reason = \]. | C | - | | Instance owned by has failed to schedule\[, reason = \] | | - | | | | - | | tenant=.instance= | | - +----------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+----------+ - | 700.103 | Create issued by or by the system against owned by | C | - | | | | - | | tenant=.instance= | | - +----------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+----------+ - | 700.104 | Creating instance owned by | C | - | | | | - | | tenant=.instance= | | - +----------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+----------+ - | 700.105 | Create rejected for instance \[, reason = \] | C | - | | | | - | | tenant=.instance= | | - +----------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+----------+ - | 700.106 | Create cancelled for instance \[, reason = \] | C | - | | | | - | | tenant=.instance= | | - +----------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+----------+ - | 700.107 | Create failed for instance \[, reason = \] | C | - | | | | - | | tenant=.instance= | | - +----------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+----------+ - | 700.108 | Instance owned by has been created | C | - | | | | - | | tenant=.instance= | | - +----------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+----------+ - | 700.109 | Delete issued by or by the system against instance owned by on host \[, reason = \] | C | - | | | | - | | tenant=.instance= | | - +----------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+----------+ - | 700.110 | Deleting instance owned by | C | - | | | | - | | tenant=.instance= | | - +----------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+----------+ - | 700.111 | Delete rejected for instance on host \[, reason = \] | C | - | | | | - | | tenant=.instance= | | - +----------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+----------+ - | 700.112 | Delete cancelled for instance on host \[, reason = \] | C | - | | | | - | | tenant=.instance= | | - +----------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+----------+ - | 700.113 | Delete failed for instance on host \[, reason = \] | C | - | | | | - | | tenant=.instance= | | - +----------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+----------+ - | 700.114 | Deleted instance owned by | C | - | | | | - | | tenant=.instance= | | - +----------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+----------+ - | 700.115 | Pause issued by or by the system against instance owned by on host \[, reason = \] | C | - | | | | - | | tenant=.instance= | | - +----------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+----------+ - | 700.116 | Pause inprogress for instance on host | C | - | | | | - | | tenant=.instance= | | - +----------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+----------+ - | 700.117 | Pause rejected for instance enabled on host \[, reason = \] | C | - | | | | - | | tenant=.instance= | | - +----------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+----------+ - | 700.118 | Pause cancelled for instance on host \[, reason = \] | C | - | | | | - | | tenant=.instance= | | - +----------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+----------+ - | 700.119 | Pause failed for instance on host \[, reason = \] | C | - | | | | - | | tenant=.instance= | | - +----------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+----------+ - | 700.120 | Pause complete for instance now paused on host | C | - | | | | - | | tenant=.instance= | | - +----------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+----------+ - | 700.121 | Unpause issued by or by the system against instance owned by on host \[, reason = \] | C | - | | | | - | | tenant=.instance= | | - +----------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+----------+ - | 700.122 | Unpause inprogress for instance on host | C | - | | | | - | | tenant=.instance= | | - +----------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+----------+ - | 700.123 | Unpause rejected for instance paused on host \[, reason = \] | C | - | | | | - | | tenant=.instance= | | - +----------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+----------+ - | 700.124 | Unpause cancelled for instance on host \[, reason = \] | C | - | | | | - | | tenant=.instance= | | - +----------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+----------+ - | 700.125 | Unpause failed for instance on host \[, reason = \] | C | - | | | | - | | tenant=.instance= | | - +----------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+----------+ - | 700.126 | Unpause complete for instance now enabled on host | C | - | | | | - | | tenant=.instance= | | - +----------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+----------+ - | 700.127 | Suspend issued by or by the system> against instance owned by on host \[, reason = \] | C | - | | | | - | | tenant=.instance= | | - +----------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+----------+ - | 700.128 | Suspend inprogress for instance on host | C | - | | | | - | | tenant=.instance= | | - +----------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+----------+ - | 700.129 | Suspend rejected for instance enabled on host \[, reason = \] | C | - | | | | - | | tenant=.instance= | | - +----------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+----------+ - | 700.130 | Suspend cancelled for instance on host \[, reason = \] | C | - | | | | - | | tenant=.instance= | | - +----------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+----------+ - | 700.131 | Suspend failed for instance on host \[, reason = \] | C | - | | | | - | | tenant=.instance= | | - +----------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+----------+ - | 700.132 | Suspend complete for instance now suspended on host | C | - | | | | - | | tenant=.instance= | | - +----------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+----------+ - | 700.133 | Resume issued by or by the system against instance owned by on host \[, reason = \] | C | - | | | | - | | tenant=.instance= | | - +----------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+----------+ - | 700.134 | Resume inprogress for instance on host | C | - | | | | - | | tenant=.instance= | | - +----------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+----------+ - | 700.135 | Resume rejected for instance suspended on host \[, reason = \] | C | - | | | | - | | tenant=.instance= | | - +----------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+----------+ - | 700.136 | Resume cancelled for instance on host \[, reason = \] | C | - | | | | - | | tenant=.instance= | | - +----------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+----------+ - | 700.137 | Resume failed for instance on host \[, reason = \] | C | - | | | | - | | tenant=.instance= | | - +----------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+----------+ - | 700.138 | Resume complete for instance now enabled on host | C | - | | | | - | | tenant=.instance= | | - +----------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+----------+ - | 700.139 | Start issued by or by the system against instance owned by on host \[, reason = \] | C | - | | | | - | | tenant=.instance= | | - +----------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+----------+ - | 700.140 | Start inprogress for instance on host | C | - | | | | - | | tenant=.instance= | | - +----------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+----------+ - | 700.141 | Start rejected for instance on host \[, reason = \] | C | - | | | | - | | tenant=.instance= | | - +----------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+----------+ - | 700.142 | Start cancelled for instance on host \[, reason = \] | C | - | | | | - | | tenant=.instance= | | - +----------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+----------+ - | 700.143 | Start failed for instance on host \[, reason = \] | C | - | | | | - | | tenant=.instance= | | - +----------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+----------+ - | 700.144 | Start complete for instance now enabled on host | C | - | | | | - | | tenant=.instance= | | - +----------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+----------+ - | 700.145 | Stop issued by \ or by the system or by the instance against instance owned by on host \[, reason = \] | C | - | | | | - | | tenant=.instance= | | - +----------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+----------+ - | 700.146 | Stop inprogress for instance on host | C | - | | | | - | | tenant=.instance= | | - +----------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+----------+ - | 700.147 | Stop rejected for instance enabled on host \[, reason = \] | C | - | | | | - | | tenant=.instance= | | - +----------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+----------+ - | 700.148 | Stop cancelled for instance on host \[, reason = \] | C | - | | | | - | | tenant=.instance= | | - +----------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+----------+ - | 700.149 | Stop failed for instance on host \[, reason = \] | C | - | | | | - | | tenant=.instance= | | - +----------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+----------+ - | 700.150 | Stop complete for instance now disabled on host | C | - | | | | - | | tenant=.instance= | | - +----------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+----------+ - | 700.151 | Live-Migrate issued by or by the system against instance owned by from host \[, reason = \] | C | - | | | | - | | tenant=.instance= | | - +----------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+----------+ - | 700.152 | Live-Migrate inprogress for instance from host | C | - | | | | - | | tenant=.instance= | | - +----------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+----------+ - | 700.153 | Live-Migrate rejected for instance now on host \[, reason = \] | C | - | | | | - | | tenant=.instance= | | - +----------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+----------+ - | 700.154 | Live-Migrate cancelled for instance now on host \[, reason = \] | C | - | | | | - | | tenant=.instance= | | - +----------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+----------+ - | 700.155 | Live-Migrate failed for instance now on host \[, reason = \] | C | - | | | | - | | tenant=.instance= | | - +----------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+----------+ - | 700.156 | Live-Migrate complete for instance now enabled on host | C | - | | | | - | | tenant=.instance= | | - +----------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+----------+ - | 700.157 | Cold-Migrate issued by or by the system against instance owned by from host \[, reason = \] | C | - | | | | - | | tenant=.instance= | | - +----------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+----------+ - | 700.158 | Cold-Migrate inprogress for instance from host | C | - | | | | - | | tenant=.instance= | | - +----------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+----------+ - | 700.159 | Cold-Migrate rejected for instance now on host \[, reason = \] | C | - | | | | - | | tenant=.instance= | | - +----------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+----------+ - | 700.160 | Cold-Migrate cancelled for instance now on host \[, reason = \] | C | - | | | | - | | tenant=.instance= | | - +----------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+----------+ - | 700.161 | Cold-Migrate failed for instance now on host \[, reason = \] | C | - | | | | - | | tenant=.instance= | | - +----------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+----------+ - | 700.162 | Cold-Migrate complete for instance now enabled on host | C | - | | | | - | | tenant=.instance= | | - +----------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+----------+ - | 700.163 | Cold-Migrate-Confirm issued by or by the system against instance owned by on host \[, reason = \] | C | - | | | | - | | tenant=.instance= | | - +----------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+----------+ - | 700.164 | Cold-Migrate-Confirm inprogress for instance on host | C | - | | | | - | | tenant=.instance= | | - +----------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+----------+ - | 700.165 | Cold-Migrate-Confirm rejected for instance now enabled on host \[, reason = \] | C | - | | | | - | | tenant=.instance= | | - +----------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+----------+ - | 700.166 | Cold-Migrate-Confirm cancelled for instance on host \[, reason = \] | C | - | | | | - | | tenant=.instance= | | - +----------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+----------+ - | 700.167 | Cold-Migrate-Confirm failed for instance on host \[, reason = \] | C | - | | | | - | | tenant=.instance= | | - +----------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+----------+ - | 700.168 | Cold-Migrate-Confirm complete for instance enabled on host | C | - | | | | - | | tenant=.instance= | | - +----------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+----------+ - | 700.169 | Cold-Migrate-Revert issued by or by the system\> against instance owned by on host \[, reason = \] | C | - | | | | - | | tenant=.instance= | | - +----------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+----------+ - | 700.170 | Cold-Migrate-Revert inprogress for instance from host | C | - | | | | - | | tenant=.instance= | | - +----------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+----------+ - | 700.171 | Cold-Migrate-Revert rejected for instance now on host \[, reason = \] | C | - | | | | - | | tenant=.instance= | | - +----------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+----------+ - | 700.172 | Cold-Migrate-Revert cancelled for instance on host \[, reason = \] | C | - | | | | - | | tenant=.instance= | | - +----------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+----------+ - | 700.173 | Cold-Migrate-Revert failed for instance on host \[, reason = \] | C | - | | | | - | | tenant=.instance= | | - +----------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+----------+ - | 700.174 | Cold-Migrate-Revert complete for instance now enabled on host | C | - | | | | - | | tenant=.instance= | | - +----------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+----------+ - | 700.175 | Evacuate issued by or by the system against instance owned by on host \[, reason = \] | C | - | | | | - | | tenant=.instance= | | - +----------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+----------+ - | 700.176 | Evacuating instance owned by from host | C | - | | | | - | | tenant=.instance= | | - +----------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+----------+ - | 700.177 | Evacuate rejected for instance on host \[, reason = \] | C | - | | | | - | | tenant=.instance= | | - +----------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+----------+ - | 700.178 | Evacuate cancelled for instance on host \[, reason = \] | C | - | | | | - | | tenant=.instance= | | - +----------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+----------+ - | 700.179 | Evacuate failed for instance on host \[, reason = \] | C | - | | | | - | | tenant=.instance= | | - +----------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+----------+ - | 700.180 | Evacuate complete for instance now enabled on host | C | - | | | | - | | tenant=.instance= | | - +----------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+----------+ - | 700.181 | Reboot <\(soft-reboot\) or \(hard-reboot\)> issued by or by the system or by the instance against instance owned by | C | - | | on host \[, reason = \] | | - | | | | - | | tenant=.instance= | | - +----------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+----------+ - | 700.182 | Reboot inprogress for instance on host | C | - | | | | - | | tenant=.instance= | | - +----------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+----------+ - | 700.183 | Reboot rejected for instance on host \[, reason = \] | C | - | | | | - | | tenant=.instance= | | - +----------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+----------+ - | 700.184 | Reboot cancelled for instance on host \[, reason = \] | C | - | | | | - | | tenant=.instance= | | - +----------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+----------+ - | 700.185 | Reboot failed for instance on host \[, reason = \] | C | - | | | | - | | tenant=.instance= | | - +----------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+----------+ - | 700.186 | Reboot complete for instance now enabled on host | C | - | | | | - | | tenant=.instance= | | - +----------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+----------+ - | 700.187 | Rebuild issued by or by the system against instance using image on host \[, reason = \] | C | - | | | | - | | tenant=.instance= | | - +----------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+----------+ - | 700.188 | Rebuild inprogress for instance on host | C | - | | | | - | | tenant=.instance= | | - +----------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+----------+ - | 700.189 | Rebuild rejected for instance on host \[, reason = \] | C | - | | | | - | | tenant=.instance= | | - +----------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+----------+ - | 700.190 | Rebuild cancelled for instance on host \[, reason = \] | C | - | | | | - | | tenant=.instance= | | - +----------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+----------+ - | 700.191 | Rebuild failed for instance on host \[, reason = \] | C | - | | | | - | | tenant=.instance= | | - +----------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+----------+ - | 700.192 | Rebuild complete for instance now enabled on host | C | - | | | | - | | tenant=.instance= | | - +----------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+----------+ - | 700.193 | Resize issued by or by the system against instance owned by on host \[, reason = \] | C | - | | | | - | | tenant=.instance= | | - +----------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+----------+ - | 700.194 | Resize inprogress for instance on host | C | - | | | | - | | tenant=.instance= | | - +----------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+----------+ - | 700.195 | Resize rejected for instance on host \[, reason = \] | C | - | | | | - | | tenant=.instance= | | - +----------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+----------+ - | 700.196 | Resize cancelled for instance on host \[, reason = \] | C | - | | | | - | | tenant=.instance= | | - +----------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+----------+ - | 700.197 | Resize failed for instance on host \[, reason = \] | C | - | | | | - | | tenant=.instance= | | - +----------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+----------+ - | 700.198 | Resize complete for instance enabled on host waiting for confirmation | C | - | | | | - | | tenant=.instance= | | - +----------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+----------+ - | 700.199 | Resize-Confirm issued by or by the system against instance owned by on host \[, reason = \] | C | - | | | | - | | tenant=.instance= | | - +----------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+----------+ - | 700.200 | Resize-Confirm inprogress for instance on host | C | - | | | | - | | tenant=.instance= | | - +----------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+----------+ - | 700.201 | Resize-Confirm rejected for instance on host \[, reason = \] | C | - | | | | - | | tenant=.instance= | | - +----------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+----------+ - | 700.202 | Resize-Confirm cancelled for instance on host \[, reason = \] | C | - | | | | - | | tenant=.instance= | | - +----------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+----------+ - | 700.203 | Resize-Confirm failed for instance on host \[, reason = \] | C | - | | | | - | | tenant=.instance= | | - +----------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+----------+ - | 700.204 | Resize-Confirm complete for instance enabled on host | C | - | | | | - | | tenant=.instance= | | - +----------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+----------+ - | 700.205 | Resize-Revert issued by or by the system against instance owned by on host \[, reason = \] | C | - | | | | - | | tenant=.instance= | | - +----------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+----------+ - | 700.206 | Resize-Revert inprogress for instance on host | C | - | | | | - | | tenant=.instance= | | - +----------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+----------+ - | 700.207 | Resize-Revert rejected for instance owned by on host \[, reason = \] | C | - | | | | - | | tenant=.instance= | | - +----------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+----------+ - | 700.208 | Resize-Revert cancelled for instance on host \[, reason = \] | C | - | | | | - | | tenant=.instance= | | - +----------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+----------+ - | 700.209 | Resize-Revert failed for instance on host \[, reason = \] | C | - | | | | - | | tenant=.instance= | | - +----------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+----------+ - | 700.210 | Resize-Revert complete for instance enabled on host | C | - | | | | - | | tenant=.instance= | | - +----------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+----------+ - | 700.214 | Instance has been renamed to owned by on host | C | - | | | | - | | tenant=.instance= | | - +----------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+----------+ - | 700.215 | Guest Health Check failed for instance \[, reason = \] | C | - | | | | - | | tenant=.instance= | | - +----------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+----------+ - | 700.216 | Entered Multi-Node Recovery Mode | C | - | | | | - | | subsystem-vim | | - +----------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+----------+ - | 700.217 | Exited Multi-Node Recovery Mode | C | - | | | | - | | subsystem-vim | | - +----------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+----------+ - -See also :ref:`Customer Log Messages 270s - Virtual Machines ` diff --git a/doc/source/security/kubernetes/alarm-expiring-soon-and-expired-certificates-baf5b8f73009.rst b/doc/source/security/kubernetes/alarm-expiring-soon-and-expired-certificates-baf5b8f73009.rst index 22863c04b..148edfbb4 100644 --- a/doc/source/security/kubernetes/alarm-expiring-soon-and-expired-certificates-baf5b8f73009.rst +++ b/doc/source/security/kubernetes/alarm-expiring-soon-and-expired-certificates-baf5b8f73009.rst @@ -147,4 +147,4 @@ issues with cert-manager auto-renewal of a certificate with :command:`kubectl .. seealso:: - :ref:`500-series-alarm-messages` + :ref:`500-series-alarm-messages-starlingx` diff --git a/doc/source/shared/_includes/alarm-messages-overview-9d087b0170cf.rest b/doc/source/shared/_includes/alarm-messages-overview-9d087b0170cf.rest new file mode 100644 index 000000000..7269fadb7 --- /dev/null +++ b/doc/source/shared/_includes/alarm-messages-overview-9d087b0170cf.rest @@ -0,0 +1,18 @@ +The system inventory and maintenance service reports system changes with +different degrees of severity. Use the reported alarms to monitor the overall +health of the system. + +Alarm messages are numerically coded by the type of alarm. + +A management-affecting alarm is one that cannot be ignored at the indicated +severity level or higher by using relaxed alarm rules during an orchestrated +patch or upgrade operation. + +For more information, see :ref:`fault-management-overview`. + +.. note:: + + **Degrade Affecting Severity**: Critical indicates a node will be degraded if + the alarm reaches a critical level. + + diff --git a/doc/source/shared/_includes/log-entries-overview-6728006a298f.rest b/doc/source/shared/_includes/log-entries-overview-6728006a298f.rest new file mode 100644 index 000000000..de3325959 --- /dev/null +++ b/doc/source/shared/_includes/log-entries-overview-6728006a298f.rest @@ -0,0 +1,16 @@ +The Customer Logs include events that do not require immediate user action. + +The following types of events are included in the Customer Logs. The severity +of the events can be one of: + +* Critical + +* Major + +* Minor + +* Warning + +* NA (Not applicable) + + diff --git a/doc/source/updates/kubernetes/configure-firmware-update-orchestration.rst b/doc/source/updates/kubernetes/configure-firmware-update-orchestration.rst index b69084610..864826db0 100644 --- a/doc/source/updates/kubernetes/configure-firmware-update-orchestration.rst +++ b/doc/source/updates/kubernetes/configure-firmware-update-orchestration.rst @@ -13,7 +13,7 @@ You can configure *Firmware Update Orchestration Strategy* using the Management-affecting alarms cannot be ignored using relaxed alarm rules during an orchestrated firmware update operation. For a list of management-affecting alarms, see |fault-doc|: - :ref:`Alarm Messages <100-series-alarm-messages>`. To display + :ref:`Alarm Messages <100-series-alarm-messages-starlingx>`. To display management-affecting active alarms, use the following command: .. code-block:: none diff --git a/doc/source/updates/kubernetes/configuring-kubernetes-update-orchestration.rst b/doc/source/updates/kubernetes/configuring-kubernetes-update-orchestration.rst index 61b32793c..a88f59619 100644 --- a/doc/source/updates/kubernetes/configuring-kubernetes-update-orchestration.rst +++ b/doc/source/updates/kubernetes/configuring-kubernetes-update-orchestration.rst @@ -19,7 +19,7 @@ You can configure *Kubernetes Version Upgrade Orchestration Strategy* using the Management-affecting alarms cannot be ignored using relaxed alarm rules during an orchestrated Kubernetes version upgrade operation. For a list of management-affecting alarms, see |fault-doc|: :ref:`Alarm Messages - <100-series-alarm-messages>`. To display management-affecting active + <100-series-alarm-messages-starlingx>`. To display management-affecting active alarms, use the following command: .. code-block:: none diff --git a/doc/source/updates/kubernetes/configuring-update-orchestration.rst b/doc/source/updates/kubernetes/configuring-update-orchestration.rst index df83bc810..a358d0b38 100644 --- a/doc/source/updates/kubernetes/configuring-update-orchestration.rst +++ b/doc/source/updates/kubernetes/configuring-update-orchestration.rst @@ -18,7 +18,7 @@ Management** in the left-hand pane. Management-affecting alarms cannot be ignored at the indicated severity level or higher by using relaxed alarm rules during an orchestrated update operation. For a list of management-affecting alarms, see |fault-doc|: - :ref:`Alarm Messages <100-series-alarm-messages>`. To display + :ref:`Alarm Messages <100-series-alarm-messages-starlingx>`. To display management-affecting active alarms, use the following command: .. code-block:: none diff --git a/doc/source/updates/kubernetes/firmware-update-orchestration-using-the-cli.rst b/doc/source/updates/kubernetes/firmware-update-orchestration-using-the-cli.rst index 88c144227..7f269782d 100644 --- a/doc/source/updates/kubernetes/firmware-update-orchestration-using-the-cli.rst +++ b/doc/source/updates/kubernetes/firmware-update-orchestration-using-the-cli.rst @@ -25,7 +25,7 @@ About this task level or higher by using relaxed alarm rules during an orchestrated firmware update operation. For a list of management-affecting alarms, see |fault-doc|: :ref:`Alarm Messages - <100-series-alarm-messages>`. To display management-affecting active + <100-series-alarm-messages-starlingx>`. To display management-affecting active alarms, use the following command: .. code-block:: none diff --git a/doc/source/updates/kubernetes/performing-an-orchestrated-upgrade-using-the-cli.rst b/doc/source/updates/kubernetes/performing-an-orchestrated-upgrade-using-the-cli.rst index 6464077f2..8cb115e45 100644 --- a/doc/source/updates/kubernetes/performing-an-orchestrated-upgrade-using-the-cli.rst +++ b/doc/source/updates/kubernetes/performing-an-orchestrated-upgrade-using-the-cli.rst @@ -42,7 +42,7 @@ controller host has been manually upgraded and returned to a stability state. Management-affecting alarms cannot be ignored at the indicated severity level or higher by using relaxed alarm rules during an orchestrated upgrade operation. For a list of management-affecting alarms, see |fault-doc|: - :ref:`Alarm Messages `. To display + :ref:`alarm-messages-overview-19c242d3d151`. To display management-affecting active alarms, use the following command: .. code-block:: none diff --git a/doc/source/updates/kubernetes/performing-an-orchestrated-upgrade.rst b/doc/source/updates/kubernetes/performing-an-orchestrated-upgrade.rst index 3c03f30d7..e6b84d222 100644 --- a/doc/source/updates/kubernetes/performing-an-orchestrated-upgrade.rst +++ b/doc/source/updates/kubernetes/performing-an-orchestrated-upgrade.rst @@ -17,7 +17,7 @@ remaining nodes of the |prod|. Management-affecting alarms cannot be ignored at the indicated severity level or higher by using relaxed alarm rules during an orchestrated upgrade operation. For a list of management-affecting alarms, see |fault-doc|: - :ref:`Alarm Messages `. To display + :ref:`alarm-messages-overview-19c242d3d151`. To display management-affecting active alarms, use the following command: .. code-block:: none diff --git a/doc/source/updates/kubernetes/update-orchestration-cli.rst b/doc/source/updates/kubernetes/update-orchestration-cli.rst index 887a6bafa..d18dcfa19 100644 --- a/doc/source/updates/kubernetes/update-orchestration-cli.rst +++ b/doc/source/updates/kubernetes/update-orchestration-cli.rst @@ -23,7 +23,7 @@ interface dialog, described in :ref:`Configuring Update Orchestration Management-affecting alarms cannot be ignored at the indicated severity level or higher by using relaxed alarm rules during an orchestrated update operation. For a list of management-affecting alarms, see |fault-doc|: - :ref:`Alarm Messages <100-series-alarm-messages>`. To display + :ref:`Alarm Messages <100-series-alarm-messages-starlingx>`. To display management-affecting active alarms, use the following command: .. code-block:: none diff --git a/get-remote-files.sh b/get-remote-files.sh index 83d109fed..9a5b5f109 100755 --- a/get-remote-files.sh +++ b/get-remote-files.sh @@ -1,15 +1,14 @@ #!/usr/bin/env bash -# Fetch arbitrary files from a remote repo for processing/ +# Fetch arbitrary files from a remote location for processing/ # inclusion in local build. message () { echo -e "$@" 1>&2; } usage_error () { - message "\nUsage: $0 config-file -o [-f -b] - contains fetch and save locations for files + message "\nUsage: $0 -c config-file -o [-f -b] + -c contains fetch and save locations for files -o sets the output path to the save locations in or to STDOUT - **Note** Do not set to \"stdout\" if you are downloading a binary file. -f optionally forces existing output files to be overwritten -b skips branch lookup. Use this if downloading from a non-git URL\n" exit 1 @@ -65,6 +64,7 @@ fetch_files () { "file") _outfile="$common_target${remote_files[$f]}" + if [ ! -d $(dirname $_outfile) ]; then mkdir -p `dirname $_outfile`; fi ;; "stdout") _outfile="-" diff --git a/parser.py b/parser.py new file mode 100755 index 000000000..f7db87c3c --- /dev/null +++ b/parser.py @@ -0,0 +1,246 @@ + +import argparse +import yaml +from yaml.loader import SafeLoader +import re +import os + +def parseLayoutFile(): + layoutFile = open(args.layout,"r") + data = layoutFile.read() + layoutFile.close() + regex= r'(?<=\")([\s\S]*?)(?=\")' + data = re.findall(regex, data) + if(len(data) != 5): + raise Exception('layout file has a different structure from expected') + return [data[0], data[2], data[4]] + + +def parseEventFile(): + eventFile = open(args.event, "r") + data = yaml.load(eventFile, Loader=SafeLoader) + eventFile.close() + return data + +def alignMultiLine(characterCounter, multilineString): + index = 0 + strings = multilineString.split('\n') + for string in strings: + if(index !=0): + string = string.lstrip() + string = "\n"+"".join(" " for i in range(characterCounter)) + string + strings[index] = string + index+=1 + return "\n".join(strings) + +def replaceSymbols(text, oldSymbols, newSymbols): + counter = 0 + for oldSymbol in oldSymbols: + if(len(newSymbols[counter]) - len(oldSymbols[counter]) > 0): + text = str(text).replace(oldSymbol, " "+newSymbols[counter]+" ") + else: + text = str(text).replace(oldSymbol, newSymbols[counter]) + counter+=1 + return text + +def getTitleLength(header): + idx1 = header.find("")+14 + idx2 = header.find("") + return idx2 - idx1 + +# def getContext(context): +# if(len(context) == 3): +# return "starlingx-openstack-empty" +# elif(len(context) == 2): +# if("starlingx" in context): +# if("openstack" in context): +# return "starlingx-openstack" +# else: +# return "starlingx-empty" +# else: +# return "openstack-empty" +# else: +# return context[0] + +# RS - 11-16-22 - generalize getContext to all runtime scenarios +def getContext(context): + return '-'.join(map(str, context)) + +def seriesFilter(key, serie): + if(float(serie)%100 > 1): + return (float(key)//10) == (float(serie)//10) + else: + return (float(key)//100) == (float(serie)//100) + +def seriesFile(layout, events, types, fileExtension, oldSymbols, newSymbols, products, sort ): + series = args.series.split(",") + for serie in series: + matchingKeys = [key for key in events.keys() if seriesFilter(key, serie) and events[key]["Type"] in types and events[key]["Context"] in products ] + if(sort): + matchingKeys.sort() + if(len(matchingKeys) > 0): + serieFile = open(args.outputPath+str(serie)+"-series-"+'-'.join(types).lower()+"-messages"+fileExtension, "w") + header = layout[0] + header = header.replace("",serie).replace("",serie) + score = "".join(args.titleSymbol for i in range(getTitleLength(header))) + header = header.replace("", score) + header = header.replace("", getContext(products)) + header = header.replace("", score) + serieFile.write(header) + for matchingKey in matchingKeys: + body = layout[1] + body = body.replace("", format(matchingKey, '.3f')) + fields = re.findall('(?<=\<)(.*?)(?=\>)', body) + for field in fields: + if(field in events[matchingKey]): + if(type(events[matchingKey][field]) == type(events)): + value = [] + for subkey in events[matchingKey][field]: + value.append(events[matchingKey][field][subkey]) + events[matchingKey][field] = "\n".join(value) + else: + events[matchingKey][field] = (re.sub(r'\n\s*\n','\n',str(events[matchingKey][field]),re.MULTILINE)) + if(oldSymbols != None and newSymbols != None): + events[matchingKey][field]= replaceSymbols(events[matchingKey][field], oldSymbols,newSymbols) + if('\n' in events[matchingKey][field]): + index = body.index('<'+field+'>') + characterCounter= 0 + while(body[index] != '\n'): + index-=1 + characterCounter+=1 + body = body.replace('<'+field+'>',alignMultiLine(characterCounter-1, events[matchingKey][field])) + else: + body = body.replace('<'+field+'>',events[matchingKey][field]) + else: + body = body.replace('<'+field+'>','N/A') + serieFile.write(body) + footer = layout[2] + serieFile.write(footer) + serieFile.close + +def recordsFile(layout, events, fileExtension, oldSymbols, newSymbols, sort): + records = args.records.split(",") + if(len(records) > 0): + matchingKeys = [float(record) for record in records for key in events.keys() if float(key) == float(record)] + if(sort): + matchingKeys.sort() + if(len(matchingKeys) > 0): + serieFile = open(args.outputPath+args.fileName+fileExtension, "w") + header = layout[0] + score = "".join(args.titleSymbol for i in range(getTitleLength(header))) + header = header.replace("", score) + header = header.replace("", score) + serieFile.write(header) + for matchingKey in matchingKeys: + body = layout[1] + body = body.replace("", format(matchingKey, '.3f')) + fields = re.findall('(?<=\<)(.*?)(?=\>)', body) + for field in fields: + if(field in events[matchingKey]): + if(type(events[matchingKey][field]) == type(events)): + value = [] + for subkey in events[matchingKey][field]: + value.append(events[matchingKey][field][subkey]) + events[matchingKey][field] = "\n".join(value) + else: + events[matchingKey][field] = (re.sub(r'\n\s*\n','\n',str(events[matchingKey][field]),re.MULTILINE)) + if(oldSymbols != None and newSymbols != None): + events[matchingKey][field]= replaceSymbols(events[matchingKey][field], oldSymbols,newSymbols) + if('\n' in events[matchingKey][field]): + index = body.index('<'+field+'>') + characterCounter= 0 + while(body[index] != '\n'): + index-=1 + characterCounter+=1 + body = body.replace('<'+field+'>',alignMultiLine(characterCounter-1, events[matchingKey][field])) + else: + body = body.replace('<'+field+'>',events[matchingKey][field]) + else: + body = body.replace('<'+field+'>','N/A') + serieFile.write(body) + footer = layout[2] + serieFile.write(footer) + serieFile.close + + +parser = argparse.ArgumentParser() +invalidArguments = False +parser.add_argument("-l", "--layout", required=True, help = "path for the layout file") +parser.add_argument("-e", "--event", required=True, help = "path for the events.yaml file") +parser.add_argument("-s", "--series", help = "list of the desired series") +parser.add_argument("-ts", "--titleSymbol", required=True, help = "Symbol used between the title") +parser.add_argument("-replace", "--replace", required=False, help = "replaces a symbol with another") +parser.add_argument("-type", "--type", help = "type can be Alarm or Log, it also can be both") +parser.add_argument("-outputPath", "--outputPath", required=True, help="Path where the output will be saved") +parser.add_argument("-records", "--records", help="list of the desired records") +parser.add_argument("-fileName", "--fileName", help="file name for the output file") +parser.add_argument("-product", "--product", help="product type for filtering") +parser.add_argument("-sort", "--sort", help="argument that defines if the output will be sorted") + + +args = parser.parse_args() +oldSymbol = None +newSymbol = None +types = [] + +if(args.series == None and args.records == None): + invalidArguments = True + print("Expected either series or records as an argument") +if(args.series != None and args.product == None): + invalidArguments = True + print("Expected product as an argument") +if(args.series != None and args.type == None): + invalidArguments=True + print("Expected type as an argument") +if(args.replace != None): + replaceItems =args.replace.split(",") + oldSymbols = [] + newSymbols = [] + for replaceItem in replaceItems: + replace = replaceItem.lstrip().split(" ") + if(len(replace) == 2): + oldSymbols.append(replace[0]) + newSymbols.append(replace[1]) +if(args.type != None): + types = args.type.split(",") + counter = 0 + for recordtype in types: + types[counter] = recordtype.lstrip().rstrip().capitalize() + if(types[counter] != "Alarm" and types[counter]!= "Log"): + invalidArguments = True + print("Invalid type argument") + counter +=1 +if(args.records != None): + if(args.fileName == None): + invalidArguments = True + print("Expected fileName as an argument") +if(args.product != None): + products = args.product.split(",") + counter = 0 + for product in products: + products[counter] = product.lstrip().rstrip().lower() + if(products[counter] != "openstack" and products[counter] != "starlingx"): + if(products[counter] == "empty"): + products[counter] = None + else: + print("Invalid product argument") + invalidArguments= True + counter +=1 +if(args.sort != None): + if(args.sort.upper() == 'TRUE'): + sort = True + else: + sort = False +else: + sort = False + + + +if(invalidArguments == False): + fileName, fileExtension = os.path.splitext(args.layout) + layout = parseLayoutFile() + events = parseEventFile() + if(args.records != None): + recordsFile(layout,events,fileExtension, oldSymbols, newSymbols,sort) + else: + seriesFile(layout, events, types, fileExtension, oldSymbols, newSymbols, products,sort) \ No newline at end of file diff --git a/templates/alarms_template.rst b/templates/alarms_template.rst new file mode 100644 index 000000000..de26fd686 --- /dev/null +++ b/templates/alarms_template.rst @@ -0,0 +1,40 @@ +head_format=" + +.. _-series-alarm-messages-: + + + Series Alarm Messages + + +" + +record_format=" + +.. list-table:: + :widths: 6 25 + :header-rows: 0 + + * - **Alarm ID: ** + - + * - Entity Instance + - + * - Degrade Affecting Severity: + - + * - Severity: + - + * - Proposed Repair Action + - + * - Management Affecting Severity + - + +----- +" + +foot_format=" + +.. raw:: html + +   + +" + diff --git a/templates/events.sh b/templates/events.sh index e5b7956f5..0175b4885 100644 --- a/templates/events.sh +++ b/templates/events.sh @@ -1,5 +1,5 @@ # If set, will be prepended to output paths -common_target="doc/source/fault-mgmt/kubernetes/" +common_target="tmp/" # The repo to download from remote_repo="opendev.org/starlingx/fault/raw/branch" @@ -9,5 +9,5 @@ remote_repo="opendev.org/starlingx/fault/raw/branch" # value is path and name to be saved, relative to CWD and common_target if set remote_files=( - ["fm-doc/fm_doc/events.yaml"]="test-events.yaml" + ["fm-doc/fm_doc/events.yaml"]="events.yaml" ) diff --git a/templates/logs_template.rst b/templates/logs_template.rst new file mode 100644 index 000000000..340f0dddf --- /dev/null +++ b/templates/logs_template.rst @@ -0,0 +1,35 @@ +head_format=" +.. _-series-log-messages-: + + + Log Messages + + +" + +record_format=" + +.. list-table:: + :widths: 6 25 + :header-rows: 0 + + * - **Log ID: ** + - + * - Entity Instance ID + - + * - Severity + - + +----- +" + + +foot_format=" + +.. raw:: html + +   + +" + + diff --git a/tox.ini b/tox.ini index 5089cb33b..6b725bf23 100644 --- a/tox.ini +++ b/tox.ini @@ -16,10 +16,20 @@ deps = # -c{env:TOX_CONSTRAINTS_FILE:doc/upper-constraints.txt} -r{toxinidir}/doc/requirements.txt commands = + git clean -dfx doc/source/fault-mgmt/ + ./get-remote-files.sh -c templates/events.sh -o file -f + python3 parser.py -l templates/alarms_template.rst -e tmp/events.yaml -s 100,200,300,400,500,750,800,900 -ts = -type Alarm -outputPath doc/source/fault-mgmt/kubernetes/ -sort Yes -product starlingx -replace "|,OR" + python3 parser.py -l templates/logs_template.rst -e tmp/events.yaml -s 100,200,300,400,500,750,800,900 -ts = -type Log -outputPath doc/source/fault-mgmt/kubernetes/ -sort Yes -product starlingx -replace "|,OR" + python3 parser.py -l templates/alarms_template.rst -e tmp/events.yaml -s 100,200,300,400,500,750,800,900 -ts = -type Alarm -outputPath doc/source/fault-mgmt/openstack/ -sort Yes -product openstack -replace "|,OR" + python3 parser.py -l templates/logs_template.rst -e tmp/events.yaml -s 100,200,300,400,500,750,800,900 -ts = -type Log -outputPath doc/source/fault-mgmt/openstack/ -sort Yes -product openstack -replace "|,OR" sphinx-build -a -E -W --keep-going -d doc/build/doctrees -t starlingx -t openstack -b html doc/source doc/build/html {posargs} + git clean -dfx doc/source/fault-mgmt/ bash htmlChecks.sh doc/build/html whitelist_externals = bash htmlChecks.sh + get-remote-files.sh + git + [testenv:api-ref] deps = {[testenv:docs]deps}