goosefs.master.embedded.journal.addresses=<master1>:9202,<master2>:9202,<master3>:9202goosefs.master.metastore=ROCKSUse when it's uncertain whether rocksdb is fixedgoosefs.master.metastore.block=HEAPDepends on the memory sizegoosefs.master.metastore.inode.cache.max.size=10000000# rocksdb data storage placegoosefs.master.metastore.dir=/meta-1/metastore# Mount path for the root directory must be placed in a secure directory to prevent accidental deletiongoosefs.master.mount.table.root.ufs=/meta-1/underFSStorage# raft log storage placegoosefs.master.journal.folder=/meta-1/journal# Timeout period for triggering master switchover should not be too low (jvm gc can cause switchover oscillation) or too large (will impact recovery availability time)goosefs.master.embedded.journal.election.timeout=20s# For large data volume, strongly recommend disablinggoosefs.master.startup.block.integrity.check.enabled=falseThe timing to trigger a checkpoint should not be too small (frequent checkpoints will prevent participation in leader election during the checkpoint period) or too large (affecting the service restart duration). It can be estimated based on the checkpoint loading duration.goosefs.master.journal.checkpoint.period.entries=20000000# acl authentication switch, set based on scenariogoosefs.security.authorization.permission.enabled=false# Recommend enabling, otherwise hostname will be used, and hostnames may be identical.goosefs.network.ip.address.used=true# Worker propertiesgoosefs.worker.tieredstore.levels=1goosefs.worker.tieredstore.level0.alias=HDDgoosefs.worker.tieredstore.level0.dirs.quota=7TB,7TBgoosefs.worker.tieredstore.level0.dirs.path=/data-1,/data-2# worker restart timeout period, increase as much as possible for large quantities.goosefs.worker.registry.get.timeout.ms=3600s# read data response timeout, default 1hgoosefs.user.streaming.data.timeout=60sWrite policy, LocalFirstPolicy is selected by default, possibly causing data imbalancegoosefs.user.block.write.location.policy.class=com.qcloud.cos.goosefs.client.block.policy.RoundRobinPolicy# Impacts distributedLoad speed. Without considering online read impact, set it to cpu count * 2.gosefs.job.worker.threadpool.size=50
goosefs.master.embedded.journal.addresses=<master1>:9202,<master2>:9202,<master3>:9202goosefs.master.metastore=ROCKSUse when it's uncertain whether rocksdb is fixedgoosefs.master.metastore.block=HEAP# Based on memory sizegoosefs.master.metastore.inode.cache.max.size=10000000# rocksdb data storage placegoosefs.master.metastore.dir=/meta-1/metastore# Mount path for the root directory must be placed in a secure directory to prevent accidental deletiongoosefs.master.mount.table.root.ufs=/meta-1/underFSStorage# raft log storage placegoosefs.master.journal.folder=/meta-1/journal# Timeout period for triggering master switchover should not be too low (jvm gc can cause switchover oscillation) or too large (will impact recovery availability time)goosefs.master.embedded.journal.election.timeout=20s# For large data volume, strongly recommend disablinggoosefs.master.startup.block.integrity.check.enabled=falseThe timing to trigger a checkpoint should not be too low (frequent checkpoints will prevent participation in leader election during the checkpoint period) or too large (impacting service restart duration). It can be estimated based on checkpoint loading duration.goosefs.master.journal.checkpoint.period.entries=20000000# acl authentication switch, based on scenariogoosefs.security.authorization.permission.enabled=false# Recommend enabling, otherwise hostname will be used, and hostnames may be identical.goosefs.network.ip.address.used=true# Worker propertiesgoosefs.worker.tieredstore.levels=1goosefs.worker.tieredstore.level0.alias=HDDgoosefs.worker.tieredstore.level0.dirs.quota=7TB,7TBgoosefs.worker.tieredstore.level0.dirs.path=/data-1,/data-2# worker restart timeout period, increase as much as possible for large quantities.goosefs.worker.registry.get.timeout.ms=3600s# read data response timeout, default 1hgoosefs.user.streaming.data.timeout=60s# Write policy, LocalFirstPolicy is selected by default, possibly causing data imbalancegoosefs.user.block.write.location.policy.class=com.qcloud.cos.goosefs.client.block.policy.RoundRobinPolicy# For random read cases, it is advisable to reduce the value (default 1MB) to prevent read bloatgoosefs.user.streaming.reader.chunk.size.bytes=256KBgoosefs.user.local.reader.chunk.size.bytes=256KB# Time to wait for worker read stream to close. For large number of small file reads or random read cases, it is advisable to reduce the value (default 5s) to avoid performance decrease caused by long tail.goosefs.user.streaming.reader.close.timeout=100ms
Feedback