为什么80%的码农都做不了架构师?>>>
环境介绍:
1: 公司目前有5个项目 A B C D E 日后可能会有所增加.
2: 使用fastdfs存储这5各项目的文件,要求各各项目的文件分开存储,也就是每个项目的文件存储到一个固定的位置.
3: 三台机器ip地址分配如下
tracker角色 IP:192.168.1.219
storageA角色 IP: 192.168.1.215
storageB角色 IP:192.168.1.216
4: 这里主要记录下实现思路,具体的配置方法都很简单.
---------tracker这台机器 主要有两个服务 fdfs_tracker 和 nginx---------------
tracker.conf的配置文件很简单这里不贴出来了
nginx的配置文件如下: nginx负责把访问请求转发给后台的两台storage.
[root@trackerB ~]# cat /usr/local/nginx/conf/nginx.conf | grep -v "#" | grep -v "^$"
user www;
worker_processes 1;
error_log logs/error.log;
events {
worker_connections 1024;
}
http {
include mime.types;
default_type application/octet-stream;
server_names_hash_bucket_size 128;
client_header_buffer_size 32k;
large_client_header_buffers 4 32k;
client_max_body_size 300m;
sendfile on;
tcp_nopush on;
keepalive_timeout 120;
gzip on;
gzip_min_length 1k;
gzip_buffers 4 16k;
gzip_http_version 1.1;
gzip_comp_level 2;
gzip_types text/plain application/x-javascript text/css application/xml;
gzip_vary on;
gzip_disable "MSIE[1-6].";
proxy_redirect off;
proxy_set_header Host $http_host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_connect_timeout 90;
proxy_send_timeout 90;
proxy_read_timeout 90;
proxy_buffer_size 16k;
proxy_buffers 4 64k;
proxy_busy_buffers_size 128k;
proxy_temp_file_write_size 128k;
log_format access '$remote_addr - $remote_user [$time_local] "$request" '
'$status $body_bytes_sent "$http_referer" '
'"$http_user_agent" "$upstream_addr" "$upstream_response_time" "$host"';
proxy_cache_path /var/cache/nginx/proxy_cache levels=1:2 keys_zone=http-cache:500m max_size=10g inactive=30d;
proxy_temp_path /var/cache/nginx/proxy_cache/tmp;
upstream group1 {
server 192.168.1.215:80;
server 192.168.1.216:80;
}
server {
listen 80;
server_name image.ty.com;
location ~* /group1/(M00|M01|M02|M03|M04|M05) { #这里我使用了6个存储路径,每个项目一个
proxy_next_upstream http_502 http_504 error timeout invalid_header;
proxy_cache http-cache;
proxy_cache_valid 200 304 12h;
proxy_cache_key $uri$is_args$args;
proxy_pass http://group1;
expires 3d;
}
access_log logs/image.access.log access;
location / {
root html;
index index.html index.htm;
}
error_page 500 502 503 504 /50x.html;
location = /50x.html {
root html;
}
}
include /usr/local/nginx/conf/vhost/*.conf;
}
---------------storage角色的两台机器-------------------
这两天机器的配置完全一样. 都运行了fdfs_storage 和 nginx , 其中nginx添加了fastdfs模块
fdfs_storage的配置文件storage.conf内容如下:
[root@localhost ~]# cat /etc/fdfs/storage.conf | grep -v "^#" | grep -v "^$"
disabled=false
group_name=group1
bind_addr=
client_bind=true
port=23000
connect_timeout=30
network_timeout=60
heart_beat_interval=30
stat_report_interval=60
base_path=/data/
max_connections=256
buff_size = 256KB
accept_threads=1
work_threads=4
disk_rw_separated = true
disk_reader_threads = 1
disk_writer_threads = 1
sync_wait_msec=50
sync_interval=0
sync_start_time=00:00
sync_end_time=23:59
write_mark_file_freq=500
store_path_count=6 一共 store_path0-5 六个存储路径.
store_path0=/data/data1
store_path1=/data/data2
store_path2=/data/data3
store_path3=/data/data4
store_path4=/data/data5
store_path5=/data/data6
subdir_count_per_path=256
tracker_server=192.168.1.219:22122
log_level=info
run_by_group=
run_by_user=
allow_hosts=*
file_distribute_path_mode=0
file_distribute_rotate_count=100
fsync_after_written_bytes=0
sync_log_buff_interval=10
sync_binlog_buff_interval=10
sync_stat_file_interval=300
thread_stack_size=512KB
upload_priority=10
if_alias_prefix=
check_file_duplicate=0
file_signature_method=hash
key_namespace=FastDFS
keep_alive=0
use_access_log = false
rotate_access_log = false
access_log_rotate_time=00:00
rotate_error_log = false
error_log_rotate_time=00:00
rotate_access_log_size = 0
rotate_error_log_size = 0
log_file_keep_days = 0
file_sync_skip_invalid_record=false
use_connection_pool = false
connection_pool_max_idle_time = 3600
http.domain_name=
http.server_port=8888
nginx配置文件如下
[root@localhost ~]# cat /usr/local/nginx/conf/nginx.conf | grep -v "^#" | grep -v "^$"
worker_processes 1;
events {
worker_connections 1024;
}
http {
include mime.types;
default_type application/octet-stream;
log_format main '$remote_addr - $remote_user [$time_local] "$request" '
'$status $body_bytes_sent "$http_referer" '
'"$http_user_agent" "$http_x_forwarded_for"';
access_log logs/access.log main;
sendfile on;
keepalive_timeout 65;
client_max_body_size 2m;
server {
listen 80;
server_name localhost;
#charset koi8-r;
access_log logs/host.access.log main;
location /group1/M00 { #根据请求路径配置对应的存储路径.
root /data/data1;
ngx_fastdfs_module; #使用nginxmodule处理请求
}
location /group1/M01 {
root /data/data2;
ngx_fastdfs_module;
}
location /group1/M02 {
root /data/data3;
ngx_fastdfs_module;
}
location /group1/M03 {
root /data/data4;
ngx_fastdfs_module;
}
location /group1/M04 {
root /data/data5;
ngx_fastdfs_module;
}
location /group1/M05 {
root /data/data6;
ngx_fastdfs_module;
}
error_page 500 502 503 504 /50x.html;
location = /50x.html {
root html;
}
}
include /usr/local/nginx/conf/vhosts/*.conf;
}
nginx的fastdfs模块的配置文件如下:
[root@localhost ~]# cat /etc/fdfs/mod_fastdfs.conf | grep -v "^#" | grep -v "^$"
connect_timeout=2
network_timeout=30
base_path=/tmp
load_fdfs_parameters_from_tracker=true
storage_sync_file_max_delay = 86400
use_storage_id = false
storage_ids_filename = storage_ids.conf
tracker_server=192.168.1.219:22122
storage_server_port=23000
group_name=group1
url_have_group_name = true
store_path_count=6
store_path0=/data/data1
store_path1=/data/data2
store_path2=/data/data3
store_path3=/data/data4
store_path4=/data/data5
store_path5=/data/data6
log_level=info
log_filename=/var/log/mod_fastdfs.log
response_mode=proxy
if_alias_prefix=
flv_support = true
flv_extension = flv
group_count = 0
-----------------以上就是用到的所有配置文件----------------------------
想实现按照不通的项目存储到不同的路径,主要通过代码来实现 以java为例
首先: 我们可以通过tracker获取一个可以使用的storage的ip, -------java代码如下---------
/**
* 获得可用的storage IP
*
* @param trackerClient
* @param trackerServer
* @return 返回storage IP
*/
private static String getStorageServerIp(TrackerClient trackerClient, TrackerServer trackerServer) {
String storageIp = null;
if (trackerClient != null && trackerServer != null) {
try {
StorageServer storageServer = trackerClient.getStoreStorage(trackerServer, STORAGE_SERVER_GROUP);
storageIp = storageServer.getSocket().getInetAddress().getHostAddress();
} catch (IOException e) {
e.printStackTrace();
}
}
log.info("——获取组中可用的storage IP——" + storageIp);
return storageIp;
}
其次:可以根据获取的ip把需要上传的文件指定到要上传到的路径里面
/**
* 得到Storage服务
*
* @param storageIp
* @return 返回Storage服务
*/
private static StorageServer getStorageServer(String storageIp) {
StorageServer storageServer = null;
if (storageIp != null && !("").equals(storageIp)) {
try {
storageServer = new StorageServer(storageIp, Integer.parseInt(STORAGE_SERVER_PORT), Integer.parseInt(STORAGE_SERVER_M00));
} catch (IOException e) {
e.printStackTrace();
}
}
log.info("——storage server生成");
return storageServer;
}
这样就通过fastdfs支持多路径这个功能实现了不同项目分开存储的需求.
Linux下也有对应的命令:[root@localhost ~]# fdfs_upload_file -h
Usage: fdfs_upload_file <config_file> <local_filename> [storage_ip:port] [store_path_index]
[root@localhost ~]# fdfs_upload_file /etc/fdfs/storage.conf anni.jpg 192.168.1.215:23000 5
group1/M05/00/D4/wKgB11W4SQGASXcFAAFAB2RsPpA542.jpg