# 전체 실행 스크립트 — projection-only 튜닝 (명시적 head 변수 사용) import os, json, requests, numpy as np, tensorflow as tf from tensorflow.keras import layers, Model import sentencepiece as spm from tqdm import tqdm # ========== 설정 ========== TOKENIZER_PATH = "bpe.model" DATA_PATH = "dataset_shuffled.jsonl" MODEL_PATH = "encoder_fit.weights.h5" MAX_LEN = 384 EMBED_DIM = 512 LATENT_DIM = 512 BATCH_SIZE = 768 EPOCHS = 1 SHUFFLE_BUFFER = 200000 LEARNING_RATE = 5e-4 TEMPERATURE = 0.05 SEED = 42 np.random.seed(SEED) tf.random.set_seed(SEED) tf.get_logger().setLevel("ERROR") # ========== TPU / 분산 전략 ========== try: resolver = tf.distribute.cluster_resolver.TPUClusterResolver(tpu="local") tf.tpu.experimental.initialize_tpu_system(resolver) strategy = tf.distribute.TPUStrategy(resolver) ON_TPU = True print("✅ TPU 초기화 완료") except Exception as e: strategy = tf.distribute.get_strategy() ON_TPU = False print("⚠️ TPU 미사용, GPU/CPU 진행:", e) from tensorflow.keras import mixed_precision policy = mixed_precision.Policy("mixed_bfloat16" if ON_TPU else "float32") mixed_precision.set_global_policy(policy) print("Mixed precision policy:", policy) # ========== Tokenizer ========== sp = spm.SentencePieceProcessor() sp.load(TOKENIZER_PATH) pad_id = sp.piece_to_id("") if pad_id == -1: pad_id = 0 vocab_size = sp.get_piece_size() print("vocab_size:", vocab_size, "pad_id:", pad_id) def encode_sentence_np(s: str, max_len=MAX_LEN): ids = sp.encode(s, out_type=int)[:max_len] if len(ids) < max_len: ids = ids + [pad_id] * (max_len - len(ids)) return np.array(ids, dtype=np.int32) # ========== 모델 정의 (원본 구조 유지) ========== class DynamicConv(layers.Layer): def __init__(self, d_model, k=7): super().__init__() assert k % 2 == 1 self.k = k self.dense = layers.Dense(d_model, activation='silu') self.proj = layers.Dense(d_model) self.generator = layers.Dense(k, dtype='float32') def call(self, x): x_in = x x = tf.cast(x, tf.float32) B = tf.shape(x)[0]; L = tf.shape(x)[1]; D = tf.shape(x)[2] kernels = self.generator(self.dense(x)) kernels = tf.nn.softmax(kernels, axis=-1) pad = (self.k - 1) // 2 x_pad = tf.pad(x, [[0,0],[pad,pad],[0,0]]) x_pad_4d = tf.expand_dims(x_pad, axis=1) patches = tf.image.extract_patches(images=x_pad_4d, sizes=[1,1,self.k,1], strides=[1,1,1,1], rates=[1,1,1,1], padding='VALID') patches = tf.reshape(patches, [B, L, self.k, D]) out = tf.reduce_sum(patches * tf.expand_dims(kernels, -1), axis=2) out = self.proj(out) return tf.cast(out, x_in.dtype) class EncoderBlock(layers.Layer): def __init__(self, embed_dim=EMBED_DIM, ff_dim=1152, num_conv_layers=2): super().__init__() self.fc1 = layers.Dense(ff_dim) self.fc2 = layers.Dense(embed_dim) self.blocks = [DynamicConv(d_model=embed_dim, k=7) for _ in range(num_conv_layers)] self.ln = layers.LayerNormalization(epsilon=1e-5) self.ln1 = layers.LayerNormalization(epsilon=1e-5) self.ln2 = layers.LayerNormalization(epsilon=1e-5) def call(self, x, training=None): x_norm = self.ln(x) out = x_norm for block in self.blocks: out = block(out) x = x_norm + self.ln1(out) v = out h = self.fc1(v) g, v_split = tf.split(h, 2, axis=-1) h = tf.nn.silu(g) * v_split h = self.fc2(h) x = x + self.ln2(h) return x class L2NormLayer(layers.Layer): def __init__(self, axis=1, epsilon=1e-10): super().__init__() self.axis = axis self.epsilon = epsilon def call(self, inputs): return tf.math.l2_normalize(inputs, axis=self.axis, epsilon=self.epsilon) class SentenceEncoder(Model): def __init__(self, vocab_size, embed_dim=EMBED_DIM, latent_dim=LATENT_DIM, max_len=MAX_LEN, pad_id=pad_id, dropout_rate=0.1): super().__init__() self.pad_id = pad_id self.embed = layers.Embedding(vocab_size, embed_dim) self.pos_embed = layers.Embedding(input_dim=max_len, output_dim=embed_dim) self.dropout = layers.Dropout(dropout_rate) self.blocks = [EncoderBlock() for _ in range(2)] self.attn_pool = layers.Dense(1) self.ln_f = layers.LayerNormalization(epsilon=1e-5, dtype=tf.float32) self.latent = layers.Dense(latent_dim) self.l2norm = L2NormLayer(axis=1) def call(self, x, training=None): positions = tf.range(tf.shape(x)[1])[tf.newaxis, :] x_embed = self.embed(x) + self.pos_embed(positions) x_embed = self.dropout(x_embed, training=training) mask = tf.cast(tf.not_equal(x, self.pad_id), tf.float32) h = x_embed for block in self.blocks: h = block(h, training=training) h = self.ln_f(h) scores = self.attn_pool(h) scores = tf.cast(scores, tf.float32) scores = tf.where(mask[..., tf.newaxis] == 0, tf.constant(-1e9, tf.float32), scores) scores = tf.nn.softmax(scores, axis=1) pooled = tf.reduce_sum(h * scores, axis=1) latent = self.latent(pooled) latent = self.l2norm(latent) return tf.cast(latent, tf.float32) # ========== 모델 생성·build·가중치 로드 및 head 변수 명시적 수집 ========== with strategy.scope(): encoder = SentenceEncoder(vocab_size=vocab_size) # 1) build (필수) encoder(np.zeros((1, MAX_LEN), dtype=np.int32)) # 2) load weights if exist if os.path.exists(MODEL_PATH): try: encoder.load_weights(MODEL_PATH) print("Loaded weights from", MODEL_PATH) except Exception as e: print("Warning: load_weights failed:", e) # 3) freeze 전체(편하게) encoder.trainable = False # 4) ensure head layers exist and set them trainable (layer-level) head_layers = [] for name in ("attn_pool", "ln_f", "latent"): layer = getattr(encoder, name, None) if layer is None: print(f"Warning: encoder has no attribute '{name}'") else: layer.trainable = True head_layers.append(layer) # 5) call once more to ensure any lazy variable creation runs encoder(np.zeros((1, MAX_LEN), dtype=np.int32)) # 6) collect trainable variables explicitly from head_layers trainable_vars = [] for layer in head_layers: # layer.trainable_weights gives variables of that layer which are trainable for v in layer.trainable_weights: trainable_vars.append(v) # safety: if still empty, dump info and raise if len(trainable_vars) == 0: print("ERROR: no head trainable vars found. Dumping all variables:") for v in encoder.variables: print(v.name, "shape", v.shape, "trainable:", v.trainable) raise RuntimeError("No trainable head variables found - aborting.") total_trainable = sum(int(np.prod(v.shape)) for v in trainable_vars) print("Collected head layers:", [l.name for l in head_layers]) print("Trainable var count (head):", len(trainable_vars), "params:", total_trainable) # 7) optimizer must be created in scope optimizer = tf.keras.optimizers.Adam(learning_rate=LEARNING_RATE) # ========== tf.data parsing ========== AUTOTUNE = tf.data.AUTOTUNE def _py_encode_line(line): raw = line.numpy() if isinstance(raw, bytes): s = raw.decode("utf-8") else: s = str(raw) j = json.loads(s) q = encode_sentence_np(j.get("query","")) d = encode_sentence_np(j.get("document","")) n = encode_sentence_np(j.get("hard_negative","")) return q, d, n def parse_line(line): q,d,n = tf.py_function(_py_encode_line, [line], [tf.int32, tf.int32, tf.int32]) q.set_shape([MAX_LEN]); d.set_shape([MAX_LEN]); n.set_shape([MAX_LEN]) return q,d,n ds = tf.data.TextLineDataset(DATA_PATH) ds = ds.map(lambda x: tf.strings.strip(x), num_parallel_calls=AUTOTUNE) ds = ds.filter(lambda x: tf.not_equal(x, "")) ds = ds.map(parse_line, num_parallel_calls=AUTOTUNE) ds = ds.shuffle(SHUFFLE_BUFFER, seed=SEED) ds = ds.repeat() ds = ds.batch(BATCH_SIZE, drop_remainder=True) ds = ds.prefetch(AUTOTUNE) # sample check try: sample = next(iter(ds.take(1))) print("Sample batch shapes:", [t.shape for t in sample]) except Exception as e: print("Warning: sample extraction failed:", e) # ========== loss function ========== @tf.function def compute_loss_and_logits(q_emb, p_emb, n_emb, temperature): docs = tf.concat([p_emb, n_emb], axis=0) # (2B, D) logits = tf.matmul(q_emb, docs, transpose_b=True) # (B, 2B) logits = logits / tf.cast(temperature, logits.dtype) labels = tf.range(tf.shape(q_emb)[0], dtype=tf.int32) loss = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=labels, logits=logits) return tf.reduce_mean(loss), logits # ========== train step (explicit trainable_vars) ========== @tf.function def train_step(q_batch, p_batch, n_batch): def step_fn(q, p, n): with tf.GradientTape() as tape: q_emb = encoder(q, training=True) p_emb = encoder(p, training=True) n_emb = encoder(n, training=True) loss, _ = compute_loss_and_logits(q_emb, p_emb, n_emb, TEMPERATURE) reg_loss = tf.add_n(encoder.losses) if encoder.losses else 0.0 total_loss = loss + reg_loss grads = tape.gradient(total_loss, trainable_vars) # replace None grads with zeros (safe) grads = [tf.zeros_like(v) if g is None else g for g, v in zip(grads, trainable_vars)] optimizer.apply_gradients(zip(grads, trainable_vars)) return total_loss per_replica_loss = strategy.run(step_fn, args=(q_batch, p_batch, n_batch)) return strategy.reduce(tf.distribute.ReduceOp.MEAN, per_replica_loss, axis=None) # ========== training loop ========== with open(DATA_PATH, "r", encoding="utf-8") as f: num_lines = sum(1 for _ in f) steps_per_epoch = max(1, num_lines // BATCH_SIZE) print("num_lines:", num_lines, "steps_per_epoch:", steps_per_epoch) it = iter(ds) global_step = 0 for epoch in range(EPOCHS): print(f"\nEpoch {epoch+1}/{EPOCHS}") pbar = tqdm(range(steps_per_epoch), desc="training", ncols=120) for step in pbar: batch = next(it) loss = train_step(batch[0], batch[1], batch[2]) global_step += 1 pbar.set_postfix({"loss": f"{float(loss.numpy()):.4f}"}) encoder.save_weights(MODEL_PATH) print("Saved weights:", MODEL_PATH) print("Training finished.")