This repository was archived by the owner on Jan 15, 2024. It is now read-only.
-
Notifications
You must be signed in to change notification settings - Fork 530
/
Copy pathprepare_music_midi.py
183 lines (136 loc) · 7.71 KB
/
prepare_music_midi.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
import argparse
import os
import tarfile
from gluonnlp.base import get_data_home_dir
from gluonnlp.utils.misc import download, load_checksum_stats
import zipfile
_CITATIONS = """
@phdthesis{raffel2016learning,
title={Learning-based methods for comparing sequences, with applications to audio-to-midi alignment and matching},
author={Raffel, Colin},
year={2016},
school={Columbia University}
}
@inproceedings{hawthorne2018enabling,
title={Enabling Factorized Piano Music Modeling and Generation with the {MAESTRO} Dataset},
author={Curtis Hawthorne and Andriy Stasyuk and Adam Roberts and Ian Simon and Cheng-Zhi Anna Huang and Sander Dieleman and Erich Elsen and Jesse Engel and Douglas Eck},
booktitle={International Conference on Learning Representations},
year={2019},
url={https://openreview.net/forum?id=r1lYRjC9F7},
}
"""
_CURR_DIR = os.path.realpath(os.path.dirname(os.path.realpath(__file__)))
_BASE_DATASET_PATH = os.path.join(get_data_home_dir(), 'music_midi_data')
_URL_FILE_STATS_PATH = os.path.join(_CURR_DIR, '..', 'url_checksums', 'music_midi.txt')
_URL_FILE_STATS = load_checksum_stats(_URL_FILE_STATS_PATH)
_URLS = {
'lmd_full': 'http://hog.ee.columbia.edu/craffel/lmd/lmd_full.tar.gz',
'lmd_matched': 'http://hog.ee.columbia.edu/craffel/lmd/lmd_matched.tar.gz',
'lmd_aligned': 'http://hog.ee.columbia.edu/craffel/lmd/lmd_aligned.tar.gz',
'clean_midi': 'http://hog.ee.columbia.edu/craffel/lmd/clean_midi.tar.gz',
'maestro_v1': 'https://storage.googleapis.com/magentadata/datasets/maestro/v1.0.0/maestro-v1.0.0-midi.zip',
'maestro_v2': 'https://storage.googleapis.com/magentadata/datasets/maestro/v2.0.0/maestro-v2.0.0-midi.zip',
'geocities': 'https://archive.org/download/archiveteam-geocities-midi-collection-2009/2009.GeoCities.MIDI.ArchiveTeam.zip'
}
def get_parser():
parser = argparse.ArgumentParser(description='Download the Music Midi Datasets.')
parser.add_argument('--dataset', type=str, required=True,
choices=['lmd_full', 'lmd_matched', 'lmd_aligned', 'clean_midi',
'maestro_v1', 'maestro_v2', 'geocities'],
help='The dataset to download.')
parser.add_argument('--save-dir', type=str, default=None,
help='The directory to save the dataset.'
' By default, it will save to a folder with the same name as the '
'dataset')
parser.add_argument('--overwrite', action='store_true',
help='Whether to overwrite the directory.')
parser.add_argument('--cache-path', type=str, default=_BASE_DATASET_PATH,
help='The temporary path to download the compressed dataset.')
return parser
def main(args):
# Download the data
url = _URLS[args.dataset]
file_hash = _URL_FILE_STATS[url]
target_download_location = os.path.join(args.cache_path, os.path.basename(url))
download(url, target_download_location, sha1_hash=file_hash)
if args.save_dir is None:
save_dir = args.dataset
else:
save_dir = args.save_dir
if not args.overwrite and os.path.exists(save_dir):
print('{} found, skip! Turn on --overwrite to force overwrite'.format(save_dir))
print('Extract the data from {} into {}'.format(target_download_location,
save_dir))
if args.dataset == 'lmd_full':
with tarfile.open(target_download_location) as f:
def is_within_directory(directory, target):
abs_directory = os.path.abspath(directory)
abs_target = os.path.abspath(target)
prefix = os.path.commonprefix([abs_directory, abs_target])
return prefix == abs_directory
def safe_extract(tar, path=".", members=None, *, numeric_owner=False):
for member in tar.getmembers():
member_path = os.path.join(path, member.name)
if not is_within_directory(path, member_path):
raise Exception("Attempted Path Traversal in Tar File")
tar.extractall(path, members, numeric_owner=numeric_owner)
safe_extract(f, save_dir)
elif args.dataset == 'lmd_matched':
with tarfile.open(target_download_location) as f:
def is_within_directory(directory, target):
abs_directory = os.path.abspath(directory)
abs_target = os.path.abspath(target)
prefix = os.path.commonprefix([abs_directory, abs_target])
return prefix == abs_directory
def safe_extract(tar, path=".", members=None, *, numeric_owner=False):
for member in tar.getmembers():
member_path = os.path.join(path, member.name)
if not is_within_directory(path, member_path):
raise Exception("Attempted Path Traversal in Tar File")
tar.extractall(path, members, numeric_owner=numeric_owner)
safe_extract(f, save_dir)
elif args.dataset == 'lmd_aligned':
with tarfile.open(target_download_location) as f:
def is_within_directory(directory, target):
abs_directory = os.path.abspath(directory)
abs_target = os.path.abspath(target)
prefix = os.path.commonprefix([abs_directory, abs_target])
return prefix == abs_directory
def safe_extract(tar, path=".", members=None, *, numeric_owner=False):
for member in tar.getmembers():
member_path = os.path.join(path, member.name)
if not is_within_directory(path, member_path):
raise Exception("Attempted Path Traversal in Tar File")
tar.extractall(path, members, numeric_owner=numeric_owner)
safe_extract(f, save_dir)
elif args.dataset == 'clean_midi':
with tarfile.open(target_download_location) as f:
def is_within_directory(directory, target):
abs_directory = os.path.abspath(directory)
abs_target = os.path.abspath(target)
prefix = os.path.commonprefix([abs_directory, abs_target])
return prefix == abs_directory
def safe_extract(tar, path=".", members=None, *, numeric_owner=False):
for member in tar.getmembers():
member_path = os.path.join(path, member.name)
if not is_within_directory(path, member_path):
raise Exception("Attempted Path Traversal in Tar File")
tar.extractall(path, members, numeric_owner=numeric_owner)
safe_extract(f, save_dir)
elif args.dataset == 'maestro_v1':
with zipfile.ZipFile(target_download_location, 'r') as fobj:
fobj.extractall(save_dir)
elif args.dataset == 'maestro_v2':
with zipfile.ZipFile(target_download_location, 'r') as fobj:
fobj.extractall(save_dir)
elif args.dataset == 'geocities':
with zipfile.ZipFile(target_download_location, 'r') as fobj:
fobj.extractall(save_dir)
else:
raise NotImplementedError
def cli_main():
parser = get_parser()
args = parser.parse_args()
main(args)
if __name__ == '__main__':
cli_main()