row_id
int64 0
37.6k
| type
stringclasses 13
values | text
stringlengths 1
5.72M
⌀ |
|---|---|---|
0
|
tab
|
from typing import Dict, Any\n\nimport optax\nimport jax\nimport jax.numpy as jnp\nimport flax.linen as nn\nfrom jax import NamedSharding\nfrom flax.training.train_state import TrainState\nfrom flax.training import orbax_utils\nfrom orbax.checkpoint import PyTreeCheckpointer\n\nfrom models.dynamics import DynamicsMaskGIT\nfrom models.lam import LatentActionModel\nfrom models.tokenizer import TokenizerVQVAE\n\n\nclass Genie(nn.Module):\n """Genie model"""\n\n # --- Tokenizer ---\n in_dim: int\n tokenizer_dim: int\n latent_patch_dim: int\n num_patch_latents: int\n patch_size: int\n [HIGH_ENTROPY]: int\n [HIGH_ENTROPY]: int\n # --- LAM ---\n lam_dim: int\n latent_action_dim: int\n num_latent_actions: int\n lam_patch_size: int\n lam_num_blocks: int\n lam_num_heads: int\n # --- Dynamics ---\n dyna_dim: int\n dyna_num_blocks: int\n dyna_num_heads: int\n dropout: float = 0.0\n mask_limit: float = 0.0\n\n def setup(self):\n self.tokenizer = TokenizerVQVAE(\n in_dim=self.in_dim,\n model_dim=self.tokenizer_dim,\n latent_dim=self.latent_patch_dim,\n [HIGH_ENTROPY].num_patch_latents,\n patch_size=self.patch_size,\n num_blocks=self.[HIGH_ENTROPY],\n num_heads=self.[HIGH_ENTROPY],\n dropout=0.0,\n [HIGH_ENTROPY].0,\n )\n self.lam = LatentActionModel(\n in_dim=self.in_dim,\n model_dim=self.lam_dim,\n latent_dim=self.latent_patch_dim,\n [HIGH_ENTROPY].num_latent_actions,\n patch_size=self.lam_patch_size,\n num_blocks=self.lam_num_blocks,\n num_heads=self.lam_num_heads,\n dropout=0.0,\n [HIGH_ENTROPY].0,\n )\n self.dynamics = DynamicsMaskGIT(\n model_dim=self.dyna_dim,\n [HIGH_ENTROPY].num_patch_latents,\n num_blocks=self.dyna_num_blocks,\n num_heads=self.dyna_num_heads,\n dropout=self.dropout,\n mask_limit=self.mask_limit,\n )\n\n def __call__(self, batch: Dict[str, Any], training: bool = True) -> Dict[str, Any]:\n [HIGH_ENTROPY] = self.tokenizer.vq_encode(batch["videos"], training=False)\n lam_outputs = self.lam.vq_encode(batch["videos"], training=False)\n outputs = dict(\n [HIGH_ENTROPY].lax.[HIGH_ENTROPY]["indices"]),\n [HIGH_ENTROPY].lax.[HIGH_ENTROPY]["z_q"]),\n )\n outputs["mask_rng"] = batch["mask_rng"]\n dyna_outputs = self.[HIGH_ENTROPY], training)\n outputs.[HIGH_ENTROPY])\n mle_indices = jnp.argmax(outputs["token_logits"], axis=-1)\n outputs["recon"] = self.tokenizer.decode(\n mle_indices, batch["videos"].shape[2:4]\n )\n return outputs\n\n @nn.compact\n def sample(\n self,\n batch: Dict[str, Any],\n steps: int = 25,\n temperature: int = 1,\n sample_argmax: bool = False,\n ) -> Any:\n # --- Encode videos and actions ---\n tokenizer_out = self.tokenizer.vq_encode(batch["videos"], training=False)\n token_idxs = tokenizer_out["indices"]\n new_frame_idxs = jnp.[HIGH_ENTROPY])[:, 0]\n action_tokens = self.lam.vq.get_codes(batch["latent_actions"])\n\n # --- Initialize MaskGIT ---\n init_mask = jnp.[HIGH_ENTROPY], dtype=bool)[:, 0]\n init_carry = (\n batch["rng"],\n new_frame_idxs,\n init_mask,\n token_idxs,\n action_tokens,\n )\n MaskGITLoop = nn.scan(\n MaskGITStep,\n variable_broadcast="params",\n split_rngs={"params": False},\n in_axes=0,\n out_axes=0,\n length=steps,\n )\n\n # --- Run MaskGIT loop ---\n loop_fn = MaskGITLoop(\n dynamics=self.dynamics,\n tokenizer=self.tokenizer,\n temperature=temperature,\n [HIGH_ENTROPY],\n steps=steps,\n )\n final_carry, _ = [HIGH_ENTROPY], jnp.arange(steps))\n new_frame_idxs = final_carry[1]\n new_frame_pixels = self.tokenizer.decode(\n jnp.[HIGH_ENTROPY], 1),\n video_hw=batch["videos"].shape[2:4],\n )\n return new_frame_pixels\n\n def vq_encode(self, batch, training) -> Dict[str, Any]:\n # --- Preprocess videos ---\n lam_output = self.lam.vq_encode(batch["videos"], training=training)\n return lam_output["indices"]\n\n\nclass MaskGITStep(nn.Module):\n dynamics: nn.Module\n tokenizer: nn.Module\n temperature: float\n sample_argmax: bool\n steps: int\n\n @nn.compact\n def __call__(self, carry, x):\n rng, [HIGH_ENTROPY], mask, token_idxs, action_tokens = carry\n step = x\n B, T, N = token_idxs.shape[:3]\n\n # --- Construct + encode video ---\n vid_token_idxs = jnp.concatenate(\n (token_idxs, jnp.[HIGH_ENTROPY], 1)), axis=1\n )\n vid_embed = self.dynamics.[HIGH_ENTROPY])\n curr_masked_frame = jnp.where(\n jnp.[HIGH_ENTROPY], -1),\n self.dynamics.mask_token[0],\n vid_embed[:, -1],\n )\n vid_embed = vid_embed.at[:, -1].[HIGH_ENTROPY])\n\n # --- Predict transition ---\n act_embed = self.dynamics.[HIGH_ENTROPY])\n vid_embed += jnp.pad(act_embed, ((0, 0), (1, 0), (0, 0), (0, 0)))\n unmasked_ratio = jnp.cos(jnp.pi * (step + 1) / (self.steps * 2))\n step_temp = self.temperature * (1.0 - unmasked_ratio)\n final_logits = self.dynamics.[HIGH_ENTROPY])[:, -1] / step_temp\n\n # --- Sample new tokens for final frame ---\n if self.sample_argmax:\n [HIGH_ENTROPY] = jnp.[HIGH_ENTROPY], axis=-1)\n else:\n rng, _rng = jax.random.split(rng)\n [HIGH_ENTROPY] = jnp.where(\n step == self.steps - 1,\n jnp.[HIGH_ENTROPY], axis=-1),\n jax.random.[HIGH_ENTROPY], final_logits),\n )\n gather_fn = jax.vmap(jax.vmap(lambda x, y: x[y]))\n [HIGH_ENTROPY] = gather_fn(jax.nn.[HIGH_ENTROPY]), [HIGH_ENTROPY])\n [HIGH_ENTROPY] += ~mask\n # Update masked tokens only\n new_token_idxs = jnp.where(mask, [HIGH_ENTROPY], [HIGH_ENTROPY])\n\n # --- Update mask ---\n [HIGH_ENTROPY] = jnp.round(N * (1.0 - unmasked_ratio)).astype(int)\n idx_mask = jnp.[HIGH_ENTROPY].shape[-1]) > [HIGH_ENTROPY]\n sorted_idxs = jnp.[HIGH_ENTROPY], axis=-1, descending=True)\n mask_update_fn = jax.vmap(lambda msk, ids: msk.at[ids].set(idx_mask))\n new_mask = [HIGH_ENTROPY], sorted_idxs)\n\n new_carry = (rng, new_token_idxs, new_mask, token_idxs, action_tokens)\n return new_carry, None\n\n\ndef [HIGH_ENTROPY]: TrainState, sharding: NamedSharding, inputs: Dict[str, jax.Array], rng: jax.Array, args):\n """Restore pre-trained Genie components"""\n rng, _rng = jax.random.split(rng)\n \n dummy_tokenizer = TokenizerVQVAE(\n in_dim=args.image_channels,\n model_dim=args.tokenizer_dim,\n latent_dim=args.latent_patch_dim,\n [HIGH_ENTROPY].num_patch_latents,\n patch_size=args.patch_size,\n num_blocks=args.[HIGH_ENTROPY],\n num_heads=args.[HIGH_ENTROPY],\n dropout=args.dropout,\n [HIGH_ENTROPY].dropout,\n )\n dummy_lam = LatentActionModel(\n in_dim=args.image_channels,\n model_dim=args.lam_dim,\n latent_dim=args.latent_patch_dim,\n [HIGH_ENTROPY].num_latent_actions,\n patch_size=args.lam_patch_size,\n num_blocks=args.lam_num_blocks,\n num_heads=args.lam_num_heads,\n dropout=args.dropout,\n [HIGH_ENTROPY].dropout,\n )\n [HIGH_ENTROPY] = dummy_tokenizer.init(_rng, inputs)\n lam_init_params = dummy_lam.init(_rng, inputs)\n\n # dummy values since we only use tx to initialize the dummy train states\n dummy_tx = optax.[HIGH_ENTROPY].[HIGH_ENTROPY].max_lr), b1=0.9, b2=0.9, [HIGH_ENTROPY])\n\n [HIGH_ENTROPY] = TrainState.[HIGH_ENTROPY].apply, [HIGH_ENTROPY], tx=dummy_tx)\n dummy_lam_train_state = TrainState.[HIGH_ENTROPY].apply, [HIGH_ENTROPY], tx=dummy_tx)\n\n def [HIGH_ENTROPY], sharding_spec):\n """Replaces arrays in a pytree with ShapeDtypeStructs having the given sharding."""\n def [HIGH_ENTROPY]):\n if [HIGH_ENTROPY], 'shape') and [HIGH_ENTROPY], 'dtype'):\n return jax.ShapeDtypeStruct(leaf_template.shape, leaf_template.dtype, [HIGH_ENTROPY])\n return leaf_template\n return jax.tree_util.tree_map(map_fn, pytree_template)\n\n [HIGH_ENTROPY] = create_abstract_sharded_pytree(\n [HIGH_ENTROPY], sharding\n )\n abstract_sharded_lam_state = create_abstract_sharded_pytree(\n dummy_lam_train_state, sharding\n )\n\n [HIGH_ENTROPY] = {"model": [HIGH_ENTROPY]}\n lam_restore_target = {"model": abstract_sharded_lam_state}\n\n [HIGH_ENTROPY] = orbax_utils.[HIGH_ENTROPY])\n lam_restore_args = orbax_utils.[HIGH_ENTROPY])\n\n [HIGH_ENTROPY] = PyTreeCheckpointer().restore(args.[HIGH_ENTROPY], [HIGH_ENTROPY], [HIGH_ENTROPY])["model"].params["params"]\n restored_lam_params = PyTreeCheckpointer().restore(args.lam_checkpoint, [HIGH_ENTROPY], [HIGH_ENTROPY])["model"].params["params"]\n # Genie does not initialize all LAM modules, thus we omit those extra modules during restoration\n # (f.srambical) FIXME: Currently, this is a small HBM memory crunch since the LAM's decoder is loaded into HBM and immediately dicarded.\n # A workaround would be to restore to host memory first, and only move the weights to HBM after pruning the decoder\n restored_lam_params = {k: v for k, v in restored_lam_params.items() if k in train_state.params["params"]["lam"]}\n \n train_state.params["params"]["tokenizer"].update(\n [HIGH_ENTROPY]\n )\n train_state.params["params"]["lam"].update(\n restored_lam_params\n )\n\n return train_state
|
1
|
tab
|
9:39:16 AM [info] Activating crowd-code\n9:39:16 AM [info] Recording started\n
|
2
|
terminal_command
|
/usr/bin/python3 [USER_PATH]/.vscode/extensions/ms-python.python-2025.6.1-darwin-arm64/python_files/printEnvVariablesToFile.py [USER_PATH]/.vscode/extensions/ms-python.python-2025.6.1-darwin-arm64/python_files/deactivate/zsh/envVars.txt
|
3
|
terminal_output
|
]633;C[1m[7m%[27m[1m[0m \r \r
|
4
|
tab
| null |
5
|
selection_mouse
| null |
6
|
tab
|
*.pyc\n*.npy\n*.png\n*.gif\n\nwandb_key\ncheckpoints/\nwandb/\n__pycache__/\n
|
7
|
selection_command
| null |
8
|
selection_command
| null |
9
|
selection_command
| null |
10
|
content
|
\n
|
11
|
content
|
*
|
12
|
selection_keyboard
| null |
13
|
content
|
.
|
14
|
selection_keyboard
| null |
15
|
content
|
c
|
16
|
selection_keyboard
| null |
17
|
content
|
s
|
18
|
selection_keyboard
| null |
19
|
content
|
v
|
20
|
selection_keyboard
| null |
21
|
content
|
\n
|
22
|
content
|
*
|
23
|
selection_keyboard
| null |
24
|
content
|
.
|
25
|
selection_keyboard
| null |
26
|
content
|
j
|
27
|
selection_keyboard
| null |
28
|
content
|
s
|
29
|
selection_keyboard
| null |
30
|
content
|
o
|
31
|
selection_keyboard
| null |
32
|
content
|
n
|
33
|
selection_keyboard
| null |
34
|
content
|
\n
|
35
|
content
|
*
|
36
|
selection_keyboard
| null |
37
|
content
|
.
|
38
|
selection_keyboard
| null |
39
|
content
|
s
|
40
|
selection_keyboard
| null |
41
|
content
|
r
|
42
|
selection_keyboard
| null |
43
|
content
|
t
|
44
|
selection_keyboard
| null |
45
|
selection_command
| null |
46
|
tab
|
from dataclasses import dataclass\nimport os\nimport time\n\nimport einops\nfrom flax.training import orbax_utils\nfrom flax.training.train_state import TrainState\nfrom jax.sharding import Mesh, PartitionSpec, NamedSharding\nfrom jax.experimental.mesh_utils import create_device_mesh\nimport optax\nimport orbax\nfrom orbax.checkpoint import PyTreeCheckpointer\nimport numpy as np\nimport dm_pix as pix\nimport jax\nimport jax.numpy as jnp\nimport tyro\nimport wandb\n\nfrom models.tokenizer import TokenizerVQVAE\nfrom utils.dataloader import get_dataloader\n\nts = int(time.time())\n\n\n@dataclass\nclass Args:\n # Experiment\n num_steps: int = 300_000\n seed: int = 0\n seq_len: int = 16\n image_channels: int = 3\n image_resolution: int = 64\n data_dir: str = "data_tfrecords/coinrun"\n checkpoint: str = ""\n # Optimization\n vq_beta: float = 0.25\n batch_size: int = 48\n min_lr: float = 3e-4\n max_lr: float = 3e-4\n warmup_steps: int = 10000\n # Tokenizer\n model_dim: int = 512\n latent_dim: int = 32\n num_latents: int = 1024\n patch_size: int = 4\n num_blocks: int = 8\n num_heads: int = 8\n dropout: float = 0.0\n codebook_dropout: float = 0.01\n # Logging\n log: bool = False\n entity: str = ""\n project: str = ""\n log_interval: int = 5\n log_image_interval: int = 250\n ckpt_dir: str = ""\n log_checkpoint_interval: int = 10000\n log_gradients: bool = False\n\n\nargs = tyro.cli(Args)\n\n\ndef [HIGH_ENTROPY], state, inputs):\n # --- Compute loss ---\n outputs = state.apply_fn(\n params, inputs, training=True, rngs={"dropout": inputs["rng"]}\n )\n mse = jnp.square(inputs["videos"] - outputs["recon"]).mean()\n q_loss = jnp.square(jax.lax.[HIGH_ENTROPY]["emb"]) - outputs["z"]).mean()\n commitment_loss = jnp.square(\n outputs["emb"] - jax.lax.[HIGH_ENTROPY]["z"])\n ).mean()\n loss = mse + q_loss + args.vq_beta * commitment_loss\n\n # --- Compute validation metrics ---\n gt = inputs["videos"].clip(0, 1).reshape(-1, *inputs["videos"].shape[2:])\n recon = outputs["recon"].clip(0, 1).reshape(-1, *outputs["recon"].shape[2:])\n psnr = pix.psnr(gt, recon).mean()\n ssim = pix.ssim(gt, recon).mean()\n _, index_counts = jnp.unique_counts(\n jnp.ravel(outputs["indices"]), size=args.num_latents, fill_value=0\n )\n codebook_usage = (index_counts != 0).mean()\n metrics = dict(\n loss=loss,\n mse=mse,\n q_loss=q_loss,\n [HIGH_ENTROPY],\n psnr=psnr,\n ssim=ssim,\n [HIGH_ENTROPY],\n )\n return loss, (outputs["recon"], metrics)\n\n\[EMAIL]\ndef [HIGH_ENTROPY], inputs):\n grad_fn = jax.[HIGH_ENTROPY], has_aux=True, allow_int=True)\n (loss, (recon, metrics)), grads = grad_fn(state.params, state, inputs)\n state = state.[HIGH_ENTROPY])\n if args.log_gradients:\n metrics["encoder_gradients_std/"] = jax.tree.map(\n lambda x: x.std(), grads["params"]["encoder"]\n )\n metrics["vq_gradients_std/"] = jax.tree.map(\n lambda x: x.std(), grads["params"]["vq"]\n )\n metrics["decoder_gradients_std/"] = jax.tree.map(\n lambda x: x.std(), grads["params"]["decoder"]\n )\n return state, loss, recon, metrics\n\n\nif __name__ == "__main__":\n jax.distributed.initialize()\n num_devices = jax.device_count()\n if num_devices == 0:\n raise ValueError("No JAX devices found.")\n print(f"Running on {num_devices} devices.")\n\n if args.batch_size % num_devices != 0:\n raise ValueError(\n f"Global batch size {args.batch_size} must be divisible by "\n f"number of devices {num_devices}."\n )\n \n per_device_batch_size_for_init = args.batch_size // num_devices\n\n rng = jax.random.PRNGKey(args.seed)\n if args.log and jax.process_index() == 0:\n wandb.[HIGH_ENTROPY].entity, project=args.project, group="debug", config=args)\n\n # --- Initialize model ---\n tokenizer = TokenizerVQVAE(\n in_dim=args.image_channels,\n model_dim=args.model_dim,\n latent_dim=args.latent_dim,\n [HIGH_ENTROPY].num_latents,\n patch_size=args.patch_size,\n num_blocks=args.num_blocks,\n num_heads=args.num_heads,\n dropout=args.dropout,\n [HIGH_ENTROPY].codebook_dropout,\n )\n rng, _rng = jax.random.split(rng)\n image_shape = (args.image_resolution, args.image_resolution, args.image_channels)\n inputs = dict(\n videos=jnp.zeros(\n (per_device_batch_size_for_init, args.seq_len, *image_shape), dtype=jnp.float32\n ),\n )\n init_params = tokenizer.init(_rng, inputs)\n\n # --- Initialize optimizer ---\n lr_schedule = optax.warmup_cosine_decay_schedule(\n args.min_lr, args.max_lr, args.warmup_steps, args.num_steps\n )\n tx = optax.[HIGH_ENTROPY], b1=0.9, b2=0.9, [HIGH_ENTROPY])\n train_state = TrainState.[HIGH_ENTROPY].apply, [HIGH_ENTROPY], tx=tx)\n \n # FIXME: switch to create_hybrid_device_mesh for runs spanning multiple nodes\n device_mesh_arr = [HIGH_ENTROPY],))\n mesh = Mesh(devices=device_mesh_arr, axis_names=('data',))\n\n replicated_sharding = NamedSharding(mesh, PartitionSpec())\n train_state = jax.[HIGH_ENTROPY], replicated_sharding)\n\n # --- Load checkpoint ---\n step = 0\n if args.checkpoint:\n restore_target = {"model": train_state}\n restore_args = orbax_utils.[HIGH_ENTROPY])\n train_state.params["params"].update(\n PyTreeCheckpointer().restore(args.checkpoint, [HIGH_ENTROPY], restore_args=restore_args)["model"].params["params"]\n )\n # Assume checkpoint is of the form tokenizer_<timestamp>_<step>\n step += int(args.checkpoint.split("_")[-1])\n\n # --- TRAIN LOOP ---\n tfrecord_files = [\n os.path.join(args.data_dir, x)\n for x in os.listdir(args.data_dir)\n if x.endswith(".tfrecord")\n ]\n dataloader = get_dataloader(\n # NOTE: We deliberately pass the global batch size\n # The dataloader shards the dataset across all processes\n tfrecord_files, args.seq_len, args.batch_size, *image_shape\n )\n print(f"Starting training from step {step}...")\n while step < args.num_steps:\n for videos in dataloader:\n # --- Train step ---\n rng, _rng = jax.random.split(rng)\n \n videos_sharding = NamedSharding(mesh, PartitionSpec('data', None, None, None, None))\n videos = jax.[HIGH_ENTROPY], videos)\n \n inputs = [HIGH_ENTROPY], rng=_rng)\n train_state, loss, recon, metrics = [HIGH_ENTROPY], inputs)\n print(f"Step {step}, loss: {loss}")\n step += 1\n\n # --- Logging ---\n if args.log and jax.process_index() == 0:\n if step % args.log_interval == 0:\n wandb.log({"loss": loss, "step": step, **metrics})\n if step % args.log_image_interval == 0:\n gt_seq = inputs["videos"][0]\n recon_seq = recon[0].clip(0, 1)\n comparison_seq = jnp.[HIGH_ENTROPY], recon_seq), axis=1)\n comparison_seq = einops.rearrange(\n comparison_seq * 255, "t h w c -> h (t w) c"\n )\n log_images = dict(\n image=wandb.Image(np.asarray(gt_seq[0])),\n recon=wandb.Image(np.[HIGH_ENTROPY][0])),\n [HIGH_ENTROPY].Image(\n np.[HIGH_ENTROPY].astype(np.uint8))\n ),\n )\n wandb.log(log_images)\n if step % args.log_checkpoint_interval == 0:\n ckpt = {"model": train_state}\n orbax_checkpointer = orbax.checkpoint.PyTreeCheckpointer()\n save_args = orbax_utils.[HIGH_ENTROPY])\n orbax_checkpointer.save(\n os.path.join(\n os.getcwd(), args.ckpt_dir, f"tokenizer_{ts}_{step}"\n ),\n ckpt,\n save_args=save_args,\n )\n if step >= args.num_steps:\n break\n
|
47
|
selection_command
| null |
48
|
selection_command
| null |
49
|
selection_command
| null |
50
|
selection_command
| null |
51
|
selection_command
| null |
52
|
selection_command
| null |
53
|
selection_command
| null |
54
|
selection_command
| null |
55
|
selection_command
| null |
56
|
selection_command
| null |
57
|
selection_command
| null |
58
|
selection_command
| null |
59
|
selection_command
| null |
60
|
selection_command
| null |
61
|
selection_command
| null |
62
|
selection_command
| null |
63
|
selection_command
| null |
64
|
selection_command
| null |
65
|
selection_command
| null |
66
|
selection_command
| null |
67
|
selection_command
| null |
68
|
selection_command
| null |
69
|
selection_command
| null |
70
|
selection_command
| null |
71
|
selection_command
| null |
72
|
selection_command
| null |
73
|
selection_command
| null |
74
|
selection_command
| null |
75
|
selection_command
| null |
76
|
selection_command
| null |
77
|
selection_command
| null |
78
|
selection_command
| null |
79
|
selection_command
| null |
80
|
selection_command
| null |
81
|
selection_command
| null |
82
|
selection_command
| null |
83
|
selection_command
| null |
84
|
selection_command
| null |
85
|
selection_command
| null |
86
|
selection_command
| null |
87
|
selection_command
| null |
88
|
selection_command
| null |
89
|
selection_command
| null |
90
|
selection_command
| null |
91
|
selection_command
| null |
92
|
selection_command
| null |
93
|
selection_command
| null |
94
|
selection_command
| null |
95
|
selection_command
| null |
96
|
selection_command
| null |
97
|
selection_command
| null |
98
|
selection_command
| null |
99
|
selection_command
| null |
End of preview. Expand
in Data Studio
Install crowd-code 2.0 to help crowd-source the next-generation coding dataset.
crowd-code-dataset-1.0 is an anonymized dataset of fine-grained IDE interactions crowd-sourced across 25 people over the last 6 months using crowd-code 1.0, a VS Code/Cursor extension capturing large parts of the software engineering workflow. The dataset captures real research engineering workflows (character-level edits, navigation, terminal use, iterative debugging).
- Downloads last month
- 40