Use 24.3 CE and 17.2.7 ceph clusters to configure distributed storage, an error message is reported when you start the cluster and quit in error. I’m sure the configuration of the s3 is correct.
org.apache.hadoop.fs.s3a.AWSBadRequestException: PUT 0-byte object on dist/uploads: com.amazonaws.services.s3.model.AmazonS3Exception: null (Service: Amazon S3; Status Code: 400; Error Code: XAmzContentSHA256Mismatch; Request ID: tx0000068258129cbf14590-00658a7371-76ebb-xibahe; S3 Extended Request ID: 76ebb-xibahe-sjzx; Proxy: null), S3 Extended Request ID: 76ebb-xibahe-sjzx:XAmzContentSHA256Mismatch: null (Service: Amazon S3; Status Code: 400; Error Code: XAmzContentSHA256Mismatch; Request ID: tx0000068258129cbf14590-00658a7371-76ebb-xibahe; S3 Extended Request ID: 76ebb-xibahe-sjzx; Proxy: null)
at org.apache.hadoop.fs.s3a.S3AUtils.translateException(S3AUtils.java:249)
at org.apache.hadoop.fs.s3a.Invoker.once(Invoker.java:119)
at org.apache.hadoop.fs.s3a.Invoker.lambda$retry$4(Invoker.java:322)
at org.apache.hadoop.fs.s3a.Invoker.retryUntranslated(Invoker.java:414)
at org.apache.hadoop.fs.s3a.Invoker.retry(Invoker.java:318)
at org.apache.hadoop.fs.s3a.Invoker.retry(Invoker.java:293)
at org.apache.hadoop.fs.s3a.S3AFileSystem.createEmptyObject(S3AFileSystem.java:4536)
at org.apache.hadoop.fs.s3a.S3AFileSystem.access$1900(S3AFileSystem.java:260)
at org.apache.hadoop.fs.s3a.S3AFileSystem$MkdirOperationCallbacksImpl.createFakeDirectory(S3AFileSystem.java:3465)
at org.apache.hadoop.fs.s3a.impl.MkdirOperation.execute(MkdirOperation.java:121)
at org.apache.hadoop.fs.s3a.impl.MkdirOperation.execute(MkdirOperation.java:45)
at org.apache.hadoop.fs.s3a.impl.ExecutingStoreOperation.apply(ExecutingStoreOperation.java:76)
at org.apache.hadoop.fs.statistics.impl.IOStatisticsBinding.lambda$trackDurationOfOperation$5(IOStatisticsBinding.java:499)
at org.apache.hadoop.fs.statistics.impl.IOStatisticsBinding.trackDuration(IOStatisticsBinding.java:444)
at org.apache.hadoop.fs.s3a.S3AFileSystem.trackDurationAndSpan(S3AFileSystem.java:2341)
at org.apache.hadoop.fs.s3a.S3AFileSystem.trackDurationAndSpan(S3AFileSystem.java:2360)
at org.apache.hadoop.fs.s3a.S3AFileSystem.mkdirs(S3AFileSystem.java:3432)
at com.dremio.plugins.util.ContainerFileSystem.mkdirs(ContainerFileSystem.java:476)
at com.dremio.exec.hadoop.HadoopFileSystem.mkdirs(HadoopFileSystem.java:291)
at com.dremio.io.file.FilterFileSystem.mkdirs(FilterFileSystem.java:92)
at com.dremio.exec.store.dfs.LoggedFileSystem.mkdirs(LoggedFileSystem.java:127)
at com.dremio.dac.homefiles.HomeFileSystemStoragePlugin.start(HomeFileSystemStoragePlugin.java:106)
at com.dremio.exec.catalog.ManagedStoragePlugin.lambda$newStartSupplier$2(ManagedStoragePlugin.java:623)
at com.dremio.exec.catalog.ManagedStoragePlugin.lambda$nameSupplier$4(ManagedStoragePlugin.java:694)
at com.dremio.exec.catalog.ManagedStoragePlugin.lambda$refreshState$8(ManagedStoragePlugin.java:1080)
at java.util.concurrent.CompletableFuture$AsyncSupply.run(CompletableFuture.java:1604)
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
at java.lang.Thread.run(Thread.java:750)