run_experiments.bat 4.74 KB
Newer Older
20200318029 committed
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83
%# batch size%
python gmf.py --batch_size 512 --lr 0.01 --n_emb 8 --epochs 30
python gmf.py --batch_size 1024 --lr 0.01 --n_emb 8 --epochs 30
python gmf.py --batch_size 1024 --lr 0.01 --n_emb 8 --epochs 30 --validate_every 2

%# learning rates%
python gmf.py --batch_size 1024 --lr 0.001 --n_emb 8 --epochs 30 --validate_every 2
python gmf.py --batch_size 1024 --lr 0.005 --n_emb 8 --epochs 30 --validate_every 2
python gmf.py --batch_size 1024 --lr 0.01 --n_emb 8 --lr_scheduler --epochs 30 --validate_every 2

%# Embeddings%
python gmf.py --batch_size 1024 --lr 0.01 --n_emb 16 --epochs 30 --validate_every 2
python gmf.py --batch_size 1024 --lr 0.01 --n_emb 32 --epochs 30 --validate_every 2
python gmf.py --batch_size 1024 --lr 0.01 --n_emb 64 --epochs 30 --validate_every 2

%# batch size%
python mlp.py --batch_size 512 --lr 0.01 --layers "[32, 16, 8]" --epochs 30 --validate_every 2
python mlp.py --batch_size 1024 --lr 0.01 --layers "[32, 16, 8]" --epochs 30 --validate_every 2

%# learning rates%
python mlp.py --batch_size 1024 --lr 0.001 --layers "[32, 16, 8]" --epochs 30 --validate_every 2
python mlp.py --batch_size 1024 --lr 0.005 --layers "[32, 16, 8]" --epochs 30 --validate_every 2
python mlp.py --batch_size 1024 --lr 0.01 --layers "[32, 16, 8]" --epochs 30 --lr_scheduler --validate_every 2

%# Embeddings%
python mlp.py --batch_size 1024 --lr 0.01 --layers "[64, 32, 16]" --epochs 30 --validate_every 2
python mlp.py --batch_size 1024 --lr 0.01 --layers "[128, 64, 32]" --epochs 30 --validate_every 2

%# higher lr and lr_scheduler%
python mlp.py --batch_size 1024 --lr 0.03 --layers "[64, 32, 16]" --epochs 30 --validate_every 2
python mlp.py --batch_size 1024 --lr 0.03 --layers "[128, 64, 32]" --epochs 30 --validate_every 2
python mlp.py --batch_size 1024 --lr 0.03 --layers "[64, 32, 16]" --epochs 30 --lr_scheduler --validate_every 2
python mlp.py --batch_size 1024 --lr 0.03 --layers "[128, 64, 32]" --epochs 30 --lr_scheduler --validate_every 2

%# neumf%
python neumf.py --batch_size 1024 --lr 0.01 --n_emb 8 --lr_scheduler --layers "[32, 16, 8]" --dropouts "[0.,0.]" \
--mf_pretrain "GMF_bs_512_lr_001_n_emb_8_lrnr_adam_lrs_wolrs.pt" \
--mlp_pretrain "MLP_bs_512_reg_00_lr_001_n_emb_16_ll_8_dp_wodp_lrnr_adam_lrs_wolrs.pt" \
--epochs 1 --learner "SGD"

python neumf.py --batch_size 1024 --lr 0.01 --n_emb 8 --lr_scheduler --layers "[128, 64, 32]" --dropouts "[0.,0.]" \
--mf_pretrain "GMF_bs_1024_lr_001_n_emb_8_lrnr_adam_lrs_wolrs.pt" \
--mlp_pretrain "MLP_bs_1024_reg_00_lr_003_n_emb_64_ll_32_dp_wodp_lrnr_adam_lrs_wlrs.pt" \
--epochs 20 --learner "SGD" --validate_every 2

python neumf.py --batch_size 1024 --lr 0.01 --n_emb 8 --lr_scheduler --layers "[128, 64, 32]" --dropouts "[0.,0.]" \
--mf_pretrain "GMF_bs_1024_lr_001_n_emb_8_lrnr_adam_lrs_wolrs.pt" \
--mlp_pretrain "MLP_bs_1024_reg_00_lr_003_n_emb_64_ll_32_dp_wodp_lrnr_adam_lrs_wlrs.pt" \
--epochs 20 --learner "SGD" --validate_every 2

python neumf.py --batch_size 1024 --lr 0.01 --n_emb 8 --lr_scheduler --layers "[128, 64, 32]" --dropouts "[0.,0.]" \
--mf_pretrain "GMF_bs_1024_lr_001_n_emb_8_lrnr_adam_lrs_wolrs.pt" \
--mlp_pretrain "MLP_bs_1024_reg_00_lr_003_n_emb_64_ll_32_dp_wodp_lrnr_adam_lrs_wlrs.pt" \
--epochs 20 --learner "SGD" --validate_every 2

python neumf.py --batch_size 1024 --lr 0.01 --n_emb 8 --lr_scheduler --layers "[128, 64, 32]" --dropouts "[0.,0.]" \
--mf_pretrain "GMF_bs_1024_lr_001_n_emb_8_lrnr_adam_lrs_wolrs.pt" \
--mlp_pretrain "MLP_bs_1024_reg_00_lr_003_n_emb_64_ll_32_dp_wodp_lrnr_adam_lrs_wlrs.pt" \
--epochs 20 --validate_every 2

%# I repeated this experiment 3 times: with and without momentum and a 3rd time
# with MSE but did not save it%
python neumf.py --batch_size 1024 --lr 0.001 --n_emb 8 --layers "[128, 64, 32]" --dropouts "[0.,0.]" \
--mf_pretrain "GMF_bs_1024_lr_001_n_emb_8_lrnr_adam_lrs_wolrs.pt" \
--mlp_pretrain "MLP_bs_1024_reg_00_lr_003_n_emb_64_ll_32_dp_wodp_lrnr_adam_lrs_wlrs.pt" \
--freeze 1 --epochs 4 --learner "SGD"

python neumf.py --batch_size 1024 --lr 0.001 --n_emb 8 --layers "[128, 64, 32]" --dropouts "[0.,0.]" \
--mf_pretrain "GMF_bs_1024_lr_001_n_emb_8_lrnr_adam_lrs_wolrs.pt" \
--mlp_pretrain "MLP_bs_1024_reg_00_lr_003_n_emb_64_ll_32_dp_wodp_lrnr_adam_lrs_wlrs.pt" \
--freeze 1 --epochs 4 --learner "SGD"

python neumf.py --batch_size 1024 --lr 0.001 --n_emb 8 --layers "[128, 64, 32]" --dropouts "[0.,0.]" \
--mf_pretrain "GMF_bs_1024_lr_001_n_emb_8_lrnr_adam_lrs_wolrs.pt" \
--mlp_pretrain "MLP_bs_1024_reg_00_lr_003_n_emb_64_ll_32_dp_wodp_lrnr_adam_lrs_wlrs.pt" \
--freeze 1 --epochs 4 --learner "SGD"

python neumf.py --batch_size 1024 --lr 0.001 --n_emb 8 --layers "[128, 64, 32]" --dropouts "[0.,0.]" \
--mf_pretrain "GMF_bs_1024_lr_001_n_emb_8_lrnr_adam_lrs_wolrs.pt" \
--mlp_pretrain "MLP_bs_1024_reg_00_lr_003_n_emb_64_ll_32_dp_wodp_lrnr_adam_lrs_wlrs.pt" \
--freeze 1 --epochs 4