#!/usr/bin/env python3

# program snippet for (micro) LLM training preparation
# open text from a book
# extract vocabulary, tokens == letters
# token embedding

# Wilhelm Meisters Lehrjahre — Band 1 by Johann Wolfgang von Goethe
# https://www.gutenberg.org/ebooks/2335.txt.utf-8
# cleaned version stored in 
# https://itp.uni-frankfurt.de/~gros/Vorlesungen/ML/Goethe_Meister.txt
# 2383  21196 141215 Goethe_Meister.txt

import torch

f_in = open("data.txt", encoding='utf-8-sig')  
trainingString = f_in.read()
f_in.close()

# reducing vocabulary size:  upper --> lower
trainingString = trainingString.lower()          

trainingString = trainingString.replace("\n", " ")   # cleaning
trainingString = trainingString.replace("  ", " ")
#print(trainingString)
trainingText = list(trainingString)

vocabulary = list(set(trainingText))  # sets contain unique elements
#vocabulary = ["aa", "bb", "cc"]      # for quick testing
dim = len(vocabulary)                 # equal to embedding dimension
print("# vocabulary dimension ", dim)
print(vocabulary)

# embedding dictionary:    token (letter) --> tensor
letterEmbedding = {letter: torch.zeros(dim) for letter in vocabulary}  

# orthogonal embedding tensors  (0, ..., 1, 0, ...)
count = 0
for letter in vocabulary:
  letterEmbedding[letter][count] = 1 
  count += 1

#print("# ", letterEmbedding)

#
# training data example
#

iStart    = 0          # start slicing
nContext = 10          # context length
inputLetters  = trainingText[iStart  :iStart+nContext]
targetLetters = trainingText[iStart+1:iStart+nContext+1]
print()
print(inputLetters)
print(targetLetters)
