|
32 | 32 | system_ram_kb = min(slave_ram_kb, master_ram_kb)
|
33 | 33 |
|
34 | 34 | system_ram_mb = system_ram_kb / 1024
|
| 35 | +slave_ram_mb = slave_ram_kb / 1024 |
35 | 36 | # Leave some RAM for the OS, Hadoop daemons, and system caches
|
36 |
| -if system_ram_mb > 100*1024: |
37 |
| - spark_mb = system_ram_mb - 15 * 1024 # Leave 15 GB RAM |
38 |
| -elif system_ram_mb > 60*1024: |
39 |
| - spark_mb = system_ram_mb - 10 * 1024 # Leave 10 GB RAM |
40 |
| -elif system_ram_mb > 40*1024: |
41 |
| - spark_mb = system_ram_mb - 6 * 1024 # Leave 6 GB RAM |
42 |
| -elif system_ram_mb > 20*1024: |
43 |
| - spark_mb = system_ram_mb - 3 * 1024 # Leave 3 GB RAM |
44 |
| -elif system_ram_mb > 10*1024: |
45 |
| - spark_mb = system_ram_mb - 2 * 1024 # Leave 2 GB RAM |
| 37 | +if slave_ram_mb > 100*1024: |
| 38 | + slave_ram_mb = slave_ram_mb - 15 * 1024 # Leave 15 GB RAM |
| 39 | +elif slave_ram_mb > 60*1024: |
| 40 | + slave_ram_mb = slave_ram_mb - 10 * 1024 # Leave 10 GB RAM |
| 41 | +elif slave_ram_mb > 40*1024: |
| 42 | + slave_ram_mb = slave_ram_mb - 6 * 1024 # Leave 6 GB RAM |
| 43 | +elif slave_ram_mb > 20*1024: |
| 44 | + slave_ram_mb = slave_ram_mb - 3 * 1024 # Leave 3 GB RAM |
| 45 | +elif slave_ram_mb > 10*1024: |
| 46 | + slave_ram_mb = slave_ram_mb - 2 * 1024 # Leave 2 GB RAM |
46 | 47 | else:
|
47 |
| - spark_mb = max(512, system_ram_mb - 1300) # Leave 1.3 GB RAM |
| 48 | + slave_ram_mb = max(512, slave_ram_mb - 1300) # Leave 1.3 GB RAM |
48 | 49 |
|
49 |
| -# Make tachyon_mb as spark_mb for now. |
50 |
| -tachyon_mb = spark_mb |
| 50 | +# Make tachyon_mb as slave_ram_mb for now. |
| 51 | +tachyon_mb = slave_ram_mb |
51 | 52 |
|
52 | 53 | worker_instances_str = ""
|
53 | 54 | worker_cores = slave_cpus
|
|
65 | 66 | "hdfs_data_dirs": os.getenv("HDFS_DATA_DIRS"),
|
66 | 67 | "mapred_local_dirs": os.getenv("MAPRED_LOCAL_DIRS"),
|
67 | 68 | "spark_local_dirs": os.getenv("SPARK_LOCAL_DIRS"),
|
68 |
| - "default_spark_mem": "%dm" % spark_mb, |
| 69 | + "spark_worker_mem": "%dm" % slave_ram_mb, |
69 | 70 | "spark_worker_instances": worker_instances_str,
|
70 | 71 | "spark_worker_cores": "%d" % worker_cores,
|
71 | 72 | "spark_master_opts": os.getenv("SPARK_MASTER_OPTS", ""),
|
|
0 commit comments