kakatur commented on code in PR #64745:
URL: https://github.com/apache/airflow/pull/64745#discussion_r3068393683
##########
providers/amazon/src/airflow/providers/amazon/aws/operators/batch.py:
##########
@@ -404,6 +416,52 @@ def monitor_job(self, context: Context):
**awslogs[0],
)
+ def monitor_job(self, context: Context):
+ """
+ Monitor an AWS Batch job.
+
+ This can raise an exception or an AirflowTaskTimeout if the task was
+ created with ``execution_timeout``.
+ """
+ if not self.job_id:
+ raise AirflowException("AWS Batch job - job_id was not found")
+
+ # Persist job definition and queue links
+ self._persist_links(context)
+
+ if self.awslogs_enabled:
+ if self.waiters:
+ self.waiters.wait_for_job(self.job_id,
get_batch_log_fetcher=self._get_batch_log_fetcher)
+ else:
+ self.hook.wait_for_job(self.job_id,
get_batch_log_fetcher=self._get_batch_log_fetcher)
+ else:
+ if self.waiters:
+ self.waiters.wait_for_job(self.job_id)
+ else:
+ self.hook.wait_for_job(self.job_id)
+
+ # After job completes, fetch and persist CloudWatch logs once
+ try:
+ awslogs = self.hook.get_job_all_awslogs_info(self.job_id)
+ if awslogs:
+ self.log.info(
+ "AWS Batch job (%s) CloudWatch Events details found. Links
to logs:", self.job_id
+ )
+ link_builder = CloudWatchEventsLink()
+ for log in awslogs:
+ self.log.info(link_builder.format_link(**log))
+
+ # Persist the first log stream as the UI link
+ CloudWatchEventsLink.persist(
+ context=context,
+ operator=self,
+ region_name=self.hook.conn_region_name,
+ aws_partition=self.hook.conn_partition,
+ **awslogs[0],
+ )
+ except AirflowException as ae:
+ self.log.warning("Cannot determine where to find the AWS logs for
this Batch job: %s", ae)
+
Review Comment:
Replaced duplicate CloudWatch logic in monitor_job() with call to
_persist_cloudwatch_link(context) (line 444).
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]