This is an automated email from the ASF dual-hosted git repository. morningman pushed a commit to branch dev-1.0.1 in repository https://gitbox.apache.org/repos/asf/doris.git
commit 926138c6a031677e44211e0f707ad404a6ff06d8 Author: Mingyu Chen <morningman....@gmail.com> AuthorDate: Wed Jun 29 12:02:27 2022 +0800 [log] add more error info for hdfs reader writer (#10475) --- be/src/exec/hdfs_file_reader.cpp | 19 ++++++++----------- be/src/exec/hdfs_writer.cpp | 29 +++++++++++++++-------------- 2 files changed, 23 insertions(+), 25 deletions(-) diff --git a/be/src/exec/hdfs_file_reader.cpp b/be/src/exec/hdfs_file_reader.cpp index b042018e55..c71dc4a39a 100644 --- a/be/src/exec/hdfs_file_reader.cpp +++ b/be/src/exec/hdfs_file_reader.cpp @@ -62,21 +62,18 @@ Status HdfsFileReader::open() { if (_hdfs_file == nullptr) { std::stringstream ss; ss << "open file failed. " - << "(BE: " << BackendOptions::get_localhost() << ")" << _namenode << _path - << ", err: " << strerror(errno); - ; + << "(BE: " << BackendOptions::get_localhost() << ")" + << " namenode:" << _namenode << ", path:" << _path << ", err: " << hdfsGetLastError(); return Status::InternalError(ss.str()); } - LOG(INFO) << "open file. " << _namenode << _path; + VLOG_NOTICE << "open file, namenode:" << _namenode << ", path:" << _path; return seek(_current_offset); } void HdfsFileReader::close() { if (!closed()) { if (_hdfs_file != nullptr && _hdfs_fs != nullptr) { - std::stringstream ss; - ss << "close hdfs file: " << _namenode << _path; - LOG(INFO) << ss.str(); + VLOG_NOTICE << "close hdfs file: " << _namenode << _path; //If the hdfs file was valid, the memory associated with it will // be freed at the end of this call, even if there was an I/O error hdfsCloseFile(_hdfs_fs, _hdfs_file); @@ -125,7 +122,7 @@ Status HdfsFileReader::readat(int64_t position, int64_t nbytes, int64_t* bytes_r std::stringstream ss; ss << "hdfsSeek failed. " << "(BE: " << BackendOptions::get_localhost() << ")" << _namenode << _path - << ", err: " << strerror(errno); + << ", err: " << hdfsGetLastError(); ; return Status::InternalError(ss.str()); } @@ -136,7 +133,7 @@ Status HdfsFileReader::readat(int64_t position, int64_t nbytes, int64_t* bytes_r std::stringstream ss; ss << "Read hdfs file failed. " << "(BE: " << BackendOptions::get_localhost() << ")" << _namenode << _path - << ", err: " << strerror(errno); + << ", err: " << hdfsGetLastError(); ; return Status::InternalError(ss.str()); } @@ -156,7 +153,7 @@ int64_t HdfsFileReader::size() { hdfsFileInfo* file_info = hdfsGetPathInfo(_hdfs_fs, _path.c_str()); if (file_info == nullptr) { LOG(WARNING) << "get path info failed: " << _namenode << _path - << ", err: " << strerror(errno); + << ", err: " << hdfsGetLastError(); ; close(); return -1; @@ -176,7 +173,7 @@ Status HdfsFileReader::seek(int64_t position) { std::stringstream ss; ss << "Seek to offset failed. " << "(BE: " << BackendOptions::get_localhost() << ")" - << " offset=" << position << ", err: " << strerror(errno); + << " offset=" << position << ", err: " << hdfsGetLastError(); return Status::InternalError(ss.str()); } return Status::OK(); diff --git a/be/src/exec/hdfs_writer.cpp b/be/src/exec/hdfs_writer.cpp index b45fbe4449..38bb98704e 100644 --- a/be/src/exec/hdfs_writer.cpp +++ b/be/src/exec/hdfs_writer.cpp @@ -64,13 +64,14 @@ Status HDFSWriter::open() { std::string hdfs_dir = hdfs_path.parent_path().string(); exists = hdfsExists(_hdfs_fs, hdfs_dir.c_str()); if (exists != 0) { - LOG(INFO) << "hdfs dir doesn't exist, create it: " << hdfs_dir; + VLOG_NOTICE << "hdfs dir doesn't exist, create it: " << hdfs_dir; int ret = hdfsCreateDirectory(_hdfs_fs, hdfs_dir.c_str()); if (ret != 0) { std::stringstream ss; - ss << "create dir failed. " << "(BE: " << BackendOptions::get_localhost() << ")" - << " namenode: " << _namenode << " path: " << hdfs_dir - << ", err: " << strerror(errno); + ss << "create dir failed. " + << "(BE: " << BackendOptions::get_localhost() << ")" + << " namenode: " << _namenode << " path: " << hdfs_dir + << ", err: " << hdfsGetLastError(); LOG(WARNING) << ss.str(); return Status::InternalError(ss.str()); } @@ -79,13 +80,13 @@ Status HDFSWriter::open() { _hdfs_file = hdfsOpenFile(_hdfs_fs, _path.c_str(), O_WRONLY, 0, 0, 0); if (_hdfs_file == nullptr) { std::stringstream ss; - ss << "open file failed. " << "(BE: " << BackendOptions::get_localhost() << ")" - << " namenode:" << _namenode << " path:" << _path - << ", err: " << strerror(errno); + ss << "open file failed. " + << "(BE: " << BackendOptions::get_localhost() << ")" + << " namenode:" << _namenode << " path:" << _path << ", err: " << hdfsGetLastError(); LOG(WARNING) << ss.str(); return Status::InternalError(ss.str()); } - LOG(INFO) << "open file. namenode:" << _namenode << " path:" << _path; + VLOG_NOTICE << "open file. namenode:" << _namenode << ", path:" << _path; return Status::OK(); } @@ -97,9 +98,9 @@ Status HDFSWriter::write(const uint8_t* buf, size_t buf_len, size_t* written_len int32_t result = hdfsWrite(_hdfs_fs, _hdfs_file, buf, buf_len); if (result < 0) { std::stringstream ss; - ss << "write file failed. " << "(BE: " << BackendOptions::get_localhost() << ")" - << "namenode:" << _namenode << " path:" << _path - << ", err: " << strerror(errno); + ss << "write file failed. " + << "(BE: " << BackendOptions::get_localhost() << ")" + << "namenode:" << _namenode << " path:" << _path << ", err: " << hdfsGetLastError(); LOG(WARNING) << ss.str(); return Status::InternalError(ss.str()); } @@ -124,9 +125,9 @@ Status HDFSWriter::close() { int result = hdfsFlush(_hdfs_fs, _hdfs_file); if (result == -1) { std::stringstream ss; - ss << "failed to flush hdfs file. " << "(BE: " << BackendOptions::get_localhost() << ")" - << "namenode:" << _namenode << " path:" << _path - << ", err: " << strerror(errno); + ss << "failed to flush hdfs file. " + << "(BE: " << BackendOptions::get_localhost() << ")" + << "namenode:" << _namenode << " path:" << _path << ", err: " << hdfsGetLastError(); LOG(WARNING) << ss.str(); return Status::InternalError(ss.str()); } --------------------------------------------------------------------- To unsubscribe, e-mail: commits-unsubscr...@doris.apache.org For additional commands, e-mail: commits-h...@doris.apache.org