-
Notifications
You must be signed in to change notification settings - Fork 9.6k
/
Copy pathrun_python_examples.sh
executable file
·229 lines (199 loc) · 7.34 KB
/
run_python_examples.sh
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
#!/bin/bash
#
# This script runs through the code in each of the python examples.
# The purpose is just as an integration test, not to actually train models in any meaningful way.
# For that reason, most of these set epochs = 1 and --dry-run.
#
# Optionally specify a comma separated list of examples to run. Can be run as:
# * To run all examples:
# ./run_python_examples.sh
# * To run few specific examples:
# ./run_python_examples.sh "dcgan,fast_neural_style"
#
# To test examples on CUDA accelerator, run as:
# USE_CUDA=True ./run_python_examples.sh
#
# Script requires uv to be installed. When executed, script will install prerequisites from
# `requirements.txt` for each example. If ran within activated virtual environment (uv venv,
# python -m venv, conda) this might reinstall some of the packages. To change pip installation
# index or to pass additional pip install options, run as:
# PIP_INSTALL_ARGS="--pre -f https://download.pytorch.org/whl/nightly/cpu/torch_nightly.html" \
# ./run_python_examples.sh
#
# To force script to create virtual environment for each example, run as:
# VIRTUAL_ENV=".venv" ./run_python_examples.sh
# Script will remove environments it creates in a teardown step after execution of each example.
BASE_DIR="$(pwd)/$(dirname $0)"
source $BASE_DIR/utils.sh
USE_CUDA=${USE_CUDA:-False}
case $USE_CUDA in
"True")
echo "using cuda"
CUDA=1
CUDA_FLAG="--cuda"
;;
"False")
echo "not using cuda"
CUDA=0
CUDA_FLAG=""
;;
"")
exit 1;
;;
esac
function dcgan() {
uv run main.py --dataset fake $CUDA_FLAG --mps --dry-run || error "dcgan failed"
}
function fast_neural_style() {
if [ ! -d "saved_models" ]; then
echo "downloading saved models for fast neural style"
uv run download_saved_models.py
fi
test -d "saved_models" || { error "saved models not found"; return; }
echo "running fast neural style model"
uv run neural_style/neural_style.py eval --content-image images/content-images/amber.jpg --model saved_models/candy.pth --output-image images/output-images/amber-candy.jpg --cuda $CUDA --mps || error "neural_style.py failed"
}
function imagenet() {
if [[ ! -d "sample/val" || ! -d "sample/train" ]]; then
mkdir -p sample/val/n
mkdir -p sample/train/n
curl -O "https://upload.wikimedia.org/wikipedia/commons/5/5a/Socks-clinton.jpg" || { error "couldn't download sample image for imagenet"; return; }
mv Socks-clinton.jpg sample/train/n
cp sample/train/n/* sample/val/n/
fi
uv run main.py --epochs 1 sample/ || error "imagenet example failed"
}
function language_translation() {
uv run -m spacy download en || error "couldn't download en package from spacy"
uv run -m spacy download de || error "couldn't download de package from spacy"
uv run main.py -e 1 --enc_layers 1 --dec_layers 1 --backend cpu --logging_dir output/ --dry_run || error "language translation example failed"
}
function mnist() {
uv run main.py --epochs 1 --dry-run || error "mnist example failed"
}
function mnist_forward_forward() {
uv run main.py --epochs 1 --no_mps --no_cuda || error "mnist forward forward failed"
}
function mnist_hogwild() {
uv run main.py --epochs 1 --dry-run $CUDA_FLAG || error "mnist hogwild failed"
}
function mnist_rnn() {
uv run main.py --epochs 1 --dry-run || error "mnist rnn example failed"
}
function regression() {
uv run main.py --epochs 1 $CUDA_FLAG || error "regression failed"
}
function siamese_network() {
uv run main.py --epochs 1 --dry-run || error "siamese network example failed"
}
function reinforcement_learning() {
uv run reinforce.py || error "reinforcement learning reinforce failed"
uv run actor_critic.py || error "reinforcement learning actor_critic failed"
}
function snli() {
echo "installing 'en' model if not installed"
uv run -m spacy download en || { error "couldn't download 'en' model needed for snli"; return; }
echo "training..."
uv run train.py --epochs 1 --dev_every 1 --no-bidirectional --dry-run || error "couldn't train snli"
}
function fx() {
# uv run custom_tracer.py || error "fx custom tracer has failed" UnboundLocalError: local variable 'tabulate' referenced before assignment
uv run invert.py || error "fx invert has failed"
uv run module_tracer.py || error "fx module tracer has failed"
uv run primitive_library.py || error "fx primitive library has failed"
uv run profiling_tracer.py || error "fx profiling tracer has failed"
uv run replace_op.py || error "fx replace op has failed"
uv run subgraph_rewriter_basic_use.py || error "fx subgraph has failed"
uv run wrap_output_dynamically.py || error "vmap output dynamically has failed"
}
function super_resolution() {
uv run main.py --upscale_factor 3 --batchSize 4 --testBatchSize 100 --nEpochs 1 --lr 0.001 --mps || error "super resolution failed"
}
function time_sequence_prediction() {
uv run generate_sine_wave.py || { error "generate sine wave failed"; return; }
uv run train.py --steps 2 || error "time sequence prediction training failed"
}
function vae() {
uv run main.py --epochs 1 || error "vae failed"
}
function vision_transformer() {
uv run main.py --epochs 1 --dry-run || error "vision transformer example failed"
}
function word_language_model() {
uv run main.py --epochs 1 --dry-run $CUDA_FLAG --mps || error "word_language_model failed"
}
function gcn() {
uv run main.py --epochs 1 --dry-run || error "graph convolutional network failed"
}
function gat() {
uv run main.py --epochs 1 --dry-run || error "graph attention network failed"
}
eval "base_$(declare -f stop)"
function stop() {
cd $BASE_DIR
rm -rf dcgan/fake_samples_epoch_000.png \
dcgan/netD_epoch_0.pth \
dcgan/netG_epoch_0.pth \
dcgan/real_samples.png \
fast_neural_style/saved_models.zip \
fast_neural_style/saved_models/ \
imagenet/checkpoint.pth.tar \
imagenet/lsun/ \
imagenet/model_best.pth.tar \
imagenet/sample/ \
language_translation/output/ \
snli/.data/ \
snli/.vector_cache/ \
snli/results/ \
super_resolution/dataset/ \
super_resolution/model_epoch_1.pth \
time_sequence_prediction/predict*.pdf \
time_sequence_prediction/traindata.pt \
word_language_model/model.pt \
gcn/cora/ \
gat/cora/ || error "couldn't clean up some files"
git checkout fast_neural_style/images/output-images/amber-candy.jpg || error "couldn't clean up fast neural style image"
base_stop "$1"
}
function run_all() {
# cpp moved to `run_cpp_examples.sh```
run dcgan
# distributed moved to `run_distributed_examples.sh`
run fast_neural_style
run imagenet
# language_translation
run mnist
run mnist_forward_forward
run mnist_hogwild
run mnist_rnn
run regression
run reinforcement_learning
run siamese_network
run super_resolution
run time_sequence_prediction
run vae
# vision_transformer - example broken see https://github.com/pytorch/examples/issues/1184 and https://github.com/pytorch/examples/pull/1258 for more details
run word_language_model
run fx
run gcn
run gat
}
# by default, run all examples
if [ "" == "$EXAMPLES" ]; then
run_all
else
for i in $(echo $EXAMPLES | sed "s/,/ /g")
do
echo "Starting $i"
run $i
echo "Finished $i, status $?"
done
fi
if [ "" == "$ERRORS" ]; then
echo "Completed successfully with status $?"
else
echo "Some python examples failed:"
printf "$ERRORS\n"
#Exit with error (0-255) in case of failure in one of the tests.
exit 1
fi