#!-r -np -nH --cut-dirs=4 -R "index.html*" wget -c -r -np -nH --cut-dirs=4 -R "index.html*" wget -c -r -np -nH --cut-dirs=4 -R "index.html*" wget -c -r -np -nH --cut-dirs=4 -R "index.html*" wget -c -r -np -nH --cut-dirs=4 -R "index.html*"ibin/bash
# 定义下载链接数组
urls=(
"https://irsa.ipac.caltech.edu/ibe/data/euclid/q1/MER/"
"https://irsa.ipac.caltech.edu/ibe/data/euclid/q1/MER_SEG/"
"https://irsa.ipac.caltech.edu/ibe/data/euclid/q1/NIR/"
"https://irsa.ipac.caltech.edu/ibe/data/euclid/q1/RAW/"
"https://irsa.ipac.caltech.edu/ibe/data/euclid/q1/SIR/"
"https://irsa.ipac.caltech.edu/ibe/data/euclid/q1/VIS/"
"https://irsa.ipac.caltech.edu/ibe/data/euclid/q1/VMPZ/"
"https://irsa.ipac.caltech.edu/ibe/data/euclid/q1/catalogs/"
)
# 创建日志目录
log_dir="download_logs"
mkdir -p "$log_dir"
# 遍历数组中的每个链接并同时下载
for url in "${urls[@]}"; do
# 提取链接的最后一个目录名作为日志文件名
log_name=$(basename "$url")
log_file="$log_dir/${log_name}.log"
echo "Starting download of $url, logging to $log_file..."
wget -c -r -np -nH --cut-dirs=4 -R "index.html*" "$url" -o "$log_file" &
done
# 等待所有后台下载任务完成
wait
echo "All downloads completed."