Skip to content

Commit 1ce1910

Browse files
committed
Update .ipynb files to what scripts/format_ipynb.py produces
After updating nbformat, running scripts/format_ipynb.py changed the contents of all the ipython notebook files. Most frustratingly, one of the changes is that the indentation of the .ipynb files changed, making the diffs particularly noisy. Spot-inspecting files manually reveals a number of real changes, some being worthwhile (e.g., adding missing spaces in argument lists) and some being just differences in formatting such as where line breaks are introduced. The latter set of changes is puzzling because the style settings haven't changed. I tried but couldn't find a way to avoid these changes except by outright changing the style parameters, but then, that would mean the TFQ .ipynb file formatting would become non-standard, and that seems worse. So, for lack of a better solution, I'm checking in all the reformatting notebooks with the hope that future versions of yapf and nbformat don't keep introducing more .ipynb format changes.
1 parent 51531dd commit 1ce1910

9 files changed

+6209
-6060
lines changed

Diff for: docs/tutorials/barren_plateaus.ipynb

+525-524
Large diffs are not rendered by default.

Diff for: docs/tutorials/gradients.ipynb

+827-826
Large diffs are not rendered by default.

Diff for: docs/tutorials/hello_many_worlds.ipynb

+1,338-1,334
Large diffs are not rendered by default.

Diff for: docs/tutorials/mnist.ipynb

+1,137-1,131
Large diffs are not rendered by default.

Diff for: docs/tutorials/noise.ipynb

+834-803
Large diffs are not rendered by default.

Diff for: docs/tutorials/qcnn.ipynb

+1,211-1,210
Large diffs are not rendered by default.

Diff for: docs/tutorials/quantum_data.ipynb

+130-105
Large diffs are not rendered by default.

Diff for: docs/tutorials/quantum_reinforcement_learning.ipynb

+148-91
Large diffs are not rendered by default.

Diff for: docs/tutorials/research_tools.ipynb

+59-36
Original file line numberDiff line numberDiff line change
@@ -86,22 +86,23 @@
8686
"!pip install tensorflow==2.15.0 tensorflow-quantum==0.7.3 tensorboard_plugin_profile==2.15.0"
8787
]
8888
},
89-
{
90-
"cell_type": "code",
91-
"execution_count": 0,
92-
"metadata": {
93-
"colab": {},
94-
"colab_type": "code",
95-
"id": "4Ql5PW-ACO0J"
96-
},
97-
"outputs": [],
98-
"source": [
99-
"# Update package resources to account for version changes.\n",
100-
"import importlib, pkg_resources\n",
101-
"importlib.reload(pkg_resources)"
102-
]
103-
},
104-
{
89+
{
90+
"cell_type": "code",
91+
"execution_count": 0,
92+
"metadata": {
93+
"colab": {},
94+
"colab_type": "code",
95+
"id": "4Ql5PW-ACO0J"
96+
},
97+
"outputs": [],
98+
"source": [
99+
"# Update package resources to account for version changes.\n",
100+
"import importlib, pkg_resources\n",
101+
"\n",
102+
"importlib.reload(pkg_resources)"
103+
]
104+
},
105+
{
105106
"cell_type": "code",
106107
"execution_count": null,
107108
"metadata": {
@@ -159,9 +160,11 @@
159160
" qubits, depth=2)\n",
160161
" return random_circuit\n",
161162
"\n",
163+
"\n",
162164
"def generate_data(circuit, n_samples):\n",
163165
" \"\"\"Draw n_samples samples from circuit into a tf.Tensor.\"\"\"\n",
164-
" return tf.squeeze(tfq.layers.Sample()(circuit, repetitions=n_samples).to_tensor())"
166+
" return tf.squeeze(tfq.layers.Sample()(circuit,\n",
167+
" repetitions=n_samples).to_tensor())"
165168
]
166169
},
167170
{
@@ -270,16 +273,20 @@
270273
" \"\"\"Convert tensor of bitstrings to tensor of ints.\"\"\"\n",
271274
" sigs = tf.constant([1 << i for i in range(N_QUBITS)], dtype=tf.int32)\n",
272275
" rounded_bits = tf.clip_by_value(tf.math.round(\n",
273-
" tf.cast(bits, dtype=tf.dtypes.float32)), clip_value_min=0, clip_value_max=1)\n",
274-
" return tf.einsum('jk,k->j', tf.cast(rounded_bits, dtype=tf.dtypes.int32), sigs)\n",
276+
" tf.cast(bits, dtype=tf.dtypes.float32)),\n",
277+
" clip_value_min=0,\n",
278+
" clip_value_max=1)\n",
279+
" return tf.einsum('jk,k->j', tf.cast(rounded_bits, dtype=tf.dtypes.int32),\n",
280+
" sigs)\n",
281+
"\n",
275282
"\n",
276283
"@tf.function\n",
277284
"def xeb_fid(bits):\n",
278285
" \"\"\"Compute linear XEB fidelity of bitstrings.\"\"\"\n",
279286
" final_probs = tf.squeeze(\n",
280-
" tf.abs(tfq.layers.State()(REFERENCE_CIRCUIT).to_tensor()) ** 2)\n",
287+
" tf.abs(tfq.layers.State()(REFERENCE_CIRCUIT).to_tensor())**2)\n",
281288
" nums = bits_to_ints(bits)\n",
282-
" return (2 ** N_QUBITS) * tf.reduce_mean(tf.gather(final_probs, nums)) - 1.0"
289+
" return (2**N_QUBITS) * tf.reduce_mean(tf.gather(final_probs, nums)) - 1.0"
283290
]
284291
},
285292
{
@@ -334,6 +341,8 @@
334341
"outputs": [],
335342
"source": [
336343
"LATENT_DIM = 100\n",
344+
"\n",
345+
"\n",
337346
"def make_generator_model():\n",
338347
" \"\"\"Construct generator model.\"\"\"\n",
339348
" model = tf.keras.Sequential()\n",
@@ -345,6 +354,7 @@
345354
"\n",
346355
" return model\n",
347356
"\n",
357+
"\n",
348358
"def make_discriminator_model():\n",
349359
" \"\"\"Constrcut discriminator model.\"\"\"\n",
350360
" model = tf.keras.Sequential()\n",
@@ -387,17 +397,21 @@
387397
"outputs": [],
388398
"source": [
389399
"cross_entropy = tf.keras.losses.BinaryCrossentropy(from_logits=True)\n",
400+
"\n",
401+
"\n",
390402
"def discriminator_loss(real_output, fake_output):\n",
391403
" \"\"\"Compute discriminator loss.\"\"\"\n",
392404
" real_loss = cross_entropy(tf.ones_like(real_output), real_output)\n",
393405
" fake_loss = cross_entropy(tf.zeros_like(fake_output), fake_output)\n",
394406
" total_loss = real_loss + fake_loss\n",
395407
" return total_loss\n",
396408
"\n",
409+
"\n",
397410
"def generator_loss(fake_output):\n",
398411
" \"\"\"Compute generator loss.\"\"\"\n",
399412
" return cross_entropy(tf.ones_like(fake_output), fake_output)\n",
400413
"\n",
414+
"\n",
401415
"generator_optimizer = tf.keras.optimizers.Adam(1e-4)\n",
402416
"discriminator_optimizer = tf.keras.optimizers.Adam(1e-4)"
403417
]
@@ -410,7 +424,8 @@
410424
},
411425
"outputs": [],
412426
"source": [
413-
"BATCH_SIZE=256\n",
427+
"BATCH_SIZE = 256\n",
428+
"\n",
414429
"\n",
415430
"@tf.function\n",
416431
"def train_step(images):\n",
@@ -425,8 +440,8 @@
425440
" gen_loss = generator_loss(fake_output)\n",
426441
" disc_loss = discriminator_loss(real_output, fake_output)\n",
427442
"\n",
428-
" gradients_of_generator = gen_tape.gradient(\n",
429-
" gen_loss, generator.trainable_variables)\n",
443+
" gradients_of_generator = gen_tape.gradient(gen_loss,\n",
444+
" generator.trainable_variables)\n",
430445
" gradients_of_discriminator = disc_tape.gradient(\n",
431446
" disc_loss, discriminator.trainable_variables)\n",
432447
"\n",
@@ -480,29 +495,37 @@
480495
"def train(dataset, epochs, start_epoch=1):\n",
481496
" \"\"\"Launch full training run for the given number of epochs.\"\"\"\n",
482497
" # Log original training distribution.\n",
483-
" tf.summary.histogram('Training Distribution', data=bits_to_ints(dataset), step=0)\n",
498+
" tf.summary.histogram('Training Distribution',\n",
499+
" data=bits_to_ints(dataset),\n",
500+
" step=0)\n",
484501
"\n",
485-
" batched_data = tf.data.Dataset.from_tensor_slices(dataset).shuffle(N_SAMPLES).batch(512)\n",
502+
" batched_data = tf.data.Dataset.from_tensor_slices(dataset).shuffle(\n",
503+
" N_SAMPLES).batch(512)\n",
486504
" t = time.time()\n",
487505
" for epoch in range(start_epoch, start_epoch + epochs):\n",
488506
" for i, image_batch in enumerate(batched_data):\n",
489507
" # Log batch-wise loss.\n",
490508
" gl, dl = train_step(image_batch)\n",
491-
" tf.summary.scalar(\n",
492-
" 'Generator loss', data=gl, step=epoch * len(batched_data) + i)\n",
493-
" tf.summary.scalar(\n",
494-
" 'Discriminator loss', data=dl, step=epoch * len(batched_data) + i)\n",
509+
" tf.summary.scalar('Generator loss',\n",
510+
" data=gl,\n",
511+
" step=epoch * len(batched_data) + i)\n",
512+
" tf.summary.scalar('Discriminator loss',\n",
513+
" data=dl,\n",
514+
" step=epoch * len(batched_data) + i)\n",
495515
"\n",
496516
" # Log full dataset XEB Fidelity and generated distribution.\n",
497517
" generated_samples = generator(tf.random.normal([N_SAMPLES, 100]))\n",
498-
" tf.summary.scalar(\n",
499-
" 'Generator XEB Fidelity Estimate', data=xeb_fid(generated_samples), step=epoch)\n",
500-
" tf.summary.histogram(\n",
501-
" 'Generator distribution', data=bits_to_ints(generated_samples), step=epoch)\n",
518+
" tf.summary.scalar('Generator XEB Fidelity Estimate',\n",
519+
" data=xeb_fid(generated_samples),\n",
520+
" step=epoch)\n",
521+
" tf.summary.histogram('Generator distribution',\n",
522+
" data=bits_to_ints(generated_samples),\n",
523+
" step=epoch)\n",
502524
" # Log new samples drawn from this particular random circuit.\n",
503525
" random_new_distribution = generate_data(REFERENCE_CIRCUIT, N_SAMPLES)\n",
504-
" tf.summary.histogram(\n",
505-
" 'New round of True samples', data=bits_to_ints(random_new_distribution), step=epoch)\n",
526+
" tf.summary.histogram('New round of True samples',\n",
527+
" data=bits_to_ints(random_new_distribution),\n",
528+
" step=epoch)\n",
506529
"\n",
507530
" if epoch % 10 == 0:\n",
508531
" print('Epoch {}, took {}(s)'.format(epoch, time.time() - t))\n",

0 commit comments

Comments
 (0)