From 273b5f3add4be4f2a09f623e06f3f52c1ded28e9 Mon Sep 17 00:00:00 2001 From: groupuser Date: Thu, 8 May 2025 02:45:34 +0000 Subject: [PATCH] "completed experiment caost-test-2" --- .gitattributes | 39 + README.md | 536 + adapter_config.json | 44 + adapter_model.safetensors | 3 + checkpoint-450/README.md | 202 + checkpoint-450/adapter_config.json | 44 + checkpoint-450/adapter_model.safetensors | 3 + checkpoint-450/optimizer.pt | 3 + checkpoint-450/rng_state.pth | 3 + checkpoint-450/scheduler.pt | 3 + checkpoint-450/special_tokens_map.json | 27 + checkpoint-450/tokenizer.json | 3 + checkpoint-450/tokenizer_config.json | 51348 ++++++++++++++++ checkpoint-450/trainer_state.json | 33 + checkpoint-450/training_args.bin | 3 + cheetah-fine-tuning-config.json | 79 + ...ne-tuning-01jtptcd849pbnyct65k6d05x2-kzmmc | 3 + special_tokens_map.json | 27 + tokenizer.json | 3 + tokenizer_config.json | 51348 ++++++++++++++++ training_args.bin | 3 + 21 files changed, 103757 insertions(+) create mode 100644 .gitattributes create mode 100644 README.md create mode 100644 adapter_config.json create mode 100644 adapter_model.safetensors create mode 100644 checkpoint-450/README.md create mode 100644 checkpoint-450/adapter_config.json create mode 100644 checkpoint-450/adapter_model.safetensors create mode 100644 checkpoint-450/optimizer.pt create mode 100644 checkpoint-450/rng_state.pth create mode 100644 checkpoint-450/scheduler.pt create mode 100644 checkpoint-450/special_tokens_map.json create mode 100644 checkpoint-450/tokenizer.json create mode 100644 checkpoint-450/tokenizer_config.json create mode 100644 checkpoint-450/trainer_state.json create mode 100644 checkpoint-450/training_args.bin create mode 100644 cheetah-fine-tuning-config.json create mode 100644 runs/May08_02-15-44_fine-tuning-01jtptcd849pbnyct65k6d05x2-kzmmc/events.out.tfevents.1746670545.fine-tuning-01jtptcd849pbnyct65k6d05x2-kzmmc create mode 100644 special_tokens_map.json create mode 100644 tokenizer.json create mode 100644 tokenizer_config.json create mode 100644 training_args.bin diff --git a/.gitattributes b/.gitattributes new file mode 100644 index 0000000..e6d43da --- /dev/null +++ b/.gitattributes @@ -0,0 +1,39 @@ +*.7z filter=lfs diff=lfs merge=lfs -text +*.arrow filter=lfs diff=lfs merge=lfs -text +*.bin filter=lfs diff=lfs merge=lfs -text +*.bz2 filter=lfs diff=lfs merge=lfs -text +*.ckpt filter=lfs diff=lfs merge=lfs -text +*.ftz filter=lfs diff=lfs merge=lfs -text +*.gz filter=lfs diff=lfs merge=lfs -text +*.h5 filter=lfs diff=lfs merge=lfs -text +*.joblib filter=lfs diff=lfs merge=lfs -text +*.lfs.* filter=lfs diff=lfs merge=lfs -text +*.mlmodel filter=lfs diff=lfs merge=lfs -text +*.model filter=lfs diff=lfs merge=lfs -text +*.msgpack filter=lfs diff=lfs merge=lfs -text +*.npy filter=lfs diff=lfs merge=lfs -text +*.npz filter=lfs diff=lfs merge=lfs -text +*.onnx filter=lfs diff=lfs merge=lfs -text +*.ot filter=lfs diff=lfs merge=lfs -text +*.parquet filter=lfs diff=lfs merge=lfs -text +*.pb filter=lfs diff=lfs merge=lfs -text +*.pickle filter=lfs diff=lfs merge=lfs -text +*.pkl filter=lfs diff=lfs merge=lfs -text +*.pt filter=lfs diff=lfs merge=lfs -text +*.pth filter=lfs diff=lfs merge=lfs -text +*.rar filter=lfs diff=lfs merge=lfs -text +*.safetensors filter=lfs diff=lfs merge=lfs -text +saved_model/**/* filter=lfs diff=lfs merge=lfs -text +*.tar.* filter=lfs diff=lfs merge=lfs -text +*.tar filter=lfs diff=lfs merge=lfs -text +*.tflite filter=lfs diff=lfs merge=lfs -text +*.tgz filter=lfs diff=lfs merge=lfs -text +*.wasm filter=lfs diff=lfs merge=lfs -text +*.xz filter=lfs diff=lfs merge=lfs -text +*.zip filter=lfs diff=lfs merge=lfs -text +*.zst filter=lfs diff=lfs merge=lfs -text +*tfevents* filter=lfs diff=lfs merge=lfs -text +tokenizer.json filter=lfs diff=lfs merge=lfs -text +model-00001-of-00002.safetensors filter=lfs diff=lfs merge=lfs -text +model-00002-of-00002.safetensors filter=lfs diff=lfs merge=lfs -text +tokenizer.model filter=lfs diff=lfs merge=lfs -text diff --git a/README.md b/README.md new file mode 100644 index 0000000..f6ba870 --- /dev/null +++ b/README.md @@ -0,0 +1,536 @@ +--- +license: gemma +library_name: transformers +pipeline_tag: image-text-to-text +extra_gated_heading: Access Gemma on Hugging Face +extra_gated_prompt: To access Gemma on Hugging Face, you’re required to review and + agree to Google’s usage license. To do this, please ensure you’re logged in to Hugging + Face and click below. Requests are processed immediately. +extra_gated_button_content: Acknowledge license +base_model: google/gemma-3-4b-pt +--- + +# Gemma 3 model card + +**Model Page**: [Gemma](https://ai.google.dev/gemma/docs/core) + +**Resources and Technical Documentation**: + +* [Gemma 3 Technical Report][g3-tech-report] +* [Responsible Generative AI Toolkit][rai-toolkit] +* [Gemma on Kaggle][kaggle-gemma] +* [Gemma on Vertex Model Garden][vertex-mg-gemma3] + +**Terms of Use**: [Terms][terms] + +**Authors**: Google DeepMind + +## Model Information + +Summary description and brief definition of inputs and outputs + +### Description + +Gemma is a family of lightweight, state-of-the-art open models from Google, +built from the same research and technology used to create the Gemini models. +Gemma 3 models are multimodal, handling text and image input and generating text +output, with open weights for both pre-trained variants and instruction-tuned +variants. Gemma 3 has a large, 128K context window, multilingual support in over +140 languages, and is available in more sizes than previous versions. Gemma 3 +models are well-suited for a variety of text generation and image understanding +tasks, including question answering, summarization, and reasoning. Their +relatively small size makes it possible to deploy them in environments with +limited resources such as laptops, desktops or your own cloud infrastructure, +democratizing access to state of the art AI models and helping foster innovation +for everyone. + +### Inputs and outputs + +- **Input:** + - Text string, such as a question, a prompt, or a document to be summarized + - Images, normalized to 896 x 896 resolution and encoded to 256 tokens + each + - Total input context of 128K tokens for the 4B, 12B, and 27B sizes, and + 32K tokens for the 1B size + +- **Output:** + - Generated text in response to the input, such as an answer to a + question, analysis of image content, or a summary of a document + - Total output context of 8192 tokens + +### Usage + +Below, there are some code snippets on how to get quickly started with running the model. First, install the Transformers library. Gemma 3 is supported starting from transformers 4.50.0. + +```sh +$ pip install -U transformers +``` + +Then, copy the snippet from the section that is relevant for your use case. + +#### Running with the `pipeline` API + +You can initialize the model and processor for inference with `pipeline` as follows. + +```python +from transformers import pipeline +import torch + +pipe = pipeline( + "image-text-to-text", + model="google/gemma-3-4b-it", + device="cuda", + torch_dtype=torch.bfloat16 +) +``` + +With instruction-tuned models, you need to use chat templates to process our inputs first. Then, you can pass it to the pipeline. + +```python +messages = [ + { + "role": "system", + "content": [{"type": "text", "text": "You are a helpful assistant."}] + }, + { + "role": "user", + "content": [ + {"type": "image", "url": "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/p-blog/candy.JPG"}, + {"type": "text", "text": "What animal is on the candy?"} + ] + } +] + +output = pipe(text=messages, max_new_tokens=200) +print(output[0]["generated_text"][-1]["content"]) +# Okay, let's take a look! +# Based on the image, the animal on the candy is a **turtle**. +# You can see the shell shape and the head and legs. +``` + +#### Running the model on a single/multi GPU + +```python +# pip install accelerate + +from transformers import AutoProcessor, Gemma3ForConditionalGeneration +from PIL import Image +import requests +import torch + +model_id = "google/gemma-3-4b-it" + +model = Gemma3ForConditionalGeneration.from_pretrained( + model_id, device_map="auto" +).eval() + +processor = AutoProcessor.from_pretrained(model_id) + +messages = [ + { + "role": "system", + "content": [{"type": "text", "text": "You are a helpful assistant."}] + }, + { + "role": "user", + "content": [ + {"type": "image", "image": "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/bee.jpg"}, + {"type": "text", "text": "Describe this image in detail."} + ] + } +] + +inputs = processor.apply_chat_template( + messages, add_generation_prompt=True, tokenize=True, + return_dict=True, return_tensors="pt" +).to(model.device, dtype=torch.bfloat16) + +input_len = inputs["input_ids"].shape[-1] + +with torch.inference_mode(): + generation = model.generate(**inputs, max_new_tokens=100, do_sample=False) + generation = generation[0][input_len:] + +decoded = processor.decode(generation, skip_special_tokens=True) +print(decoded) + +# **Overall Impression:** The image is a close-up shot of a vibrant garden scene, +# focusing on a cluster of pink cosmos flowers and a busy bumblebee. +# It has a slightly soft, natural feel, likely captured in daylight. +``` + + +### Citation + +```none +@article{gemma_2025, + title={Gemma 3}, + url={https://goo.gle/Gemma3Report}, + publisher={Kaggle}, + author={Gemma Team}, + year={2025} +} +``` + +## Model Data + +Data used for model training and how the data was processed. + +### Training Dataset + +These models were trained on a dataset of text data that includes a wide variety +of sources. The 27B model was trained with 14 trillion tokens, the 12B model was +trained with 12 trillion tokens, 4B model was trained with 4 trillion tokens and +1B with 2 trillion tokens. Here are the key components: + +- Web Documents: A diverse collection of web text ensures the model is + exposed to a broad range of linguistic styles, topics, and vocabulary. The + training dataset includes content in over 140 languages. +- Code: Exposing the model to code helps it to learn the syntax and + patterns of programming languages, which improves its ability to generate + code and understand code-related questions. +- Mathematics: Training on mathematical text helps the model learn logical + reasoning, symbolic representation, and to address mathematical queries. +- Images: A wide range of images enables the model to perform image + analysis and visual data extraction tasks. + +The combination of these diverse data sources is crucial for training a powerful +multimodal model that can handle a wide variety of different tasks and data +formats. + +### Data Preprocessing + +Here are the key data cleaning and filtering methods applied to the training +data: + +- CSAM Filtering: Rigorous CSAM (Child Sexual Abuse Material) filtering + was applied at multiple stages in the data preparation process to ensure + the exclusion of harmful and illegal content. +- Sensitive Data Filtering: As part of making Gemma pre-trained models + safe and reliable, automated techniques were used to filter out certain + personal information and other sensitive data from training sets. +- Additional methods: Filtering based on content quality and safety in + line with [our policies][safety-policies]. + +## Implementation Information + +Details about the model internals. + +### Hardware + +Gemma was trained using [Tensor Processing Unit (TPU)][tpu] hardware (TPUv4p, +TPUv5p and TPUv5e). Training vision-language models (VLMS) requires significant +computational power. TPUs, designed specifically for matrix operations common in +machine learning, offer several advantages in this domain: + +- Performance: TPUs are specifically designed to handle the massive + computations involved in training VLMs. They can speed up training + considerably compared to CPUs. +- Memory: TPUs often come with large amounts of high-bandwidth memory, + allowing for the handling of large models and batch sizes during training. + This can lead to better model quality. +- Scalability: TPU Pods (large clusters of TPUs) provide a scalable + solution for handling the growing complexity of large foundation models. + You can distribute training across multiple TPU devices for faster and more + efficient processing. +- Cost-effectiveness: In many scenarios, TPUs can provide a more + cost-effective solution for training large models compared to CPU-based + infrastructure, especially when considering the time and resources saved + due to faster training. +- These advantages are aligned with + [Google's commitments to operate sustainably][sustainability]. + +### Software + +Training was done using [JAX][jax] and [ML Pathways][ml-pathways]. + +JAX allows researchers to take advantage of the latest generation of hardware, +including TPUs, for faster and more efficient training of large models. ML +Pathways is Google's latest effort to build artificially intelligent systems +capable of generalizing across multiple tasks. This is specially suitable for +foundation models, including large language models like these ones. + +Together, JAX and ML Pathways are used as described in the +[paper about the Gemini family of models][gemini-2-paper]; *"the 'single +controller' programming model of Jax and Pathways allows a single Python +process to orchestrate the entire training run, dramatically simplifying the +development workflow."* + +## Evaluation + +Model evaluation metrics and results. + +### Benchmark Results + +These models were evaluated against a large collection of different datasets and +metrics to cover different aspects of text generation: + +#### Reasoning and factuality + +| Benchmark | Metric | Gemma 3 PT 1B | Gemma 3 PT 4B | Gemma 3 PT 12B | Gemma 3 PT 27B | +| ------------------------------ |----------------|:--------------:|:-------------:|:--------------:|:--------------:| +| [HellaSwag][hellaswag] | 10-shot | 62.3 | 77.2 | 84.2 | 85.6 | +| [BoolQ][boolq] | 0-shot | 63.2 | 72.3 | 78.8 | 82.4 | +| [PIQA][piqa] | 0-shot | 73.8 | 79.6 | 81.8 | 83.3 | +| [SocialIQA][socialiqa] | 0-shot | 48.9 | 51.9 | 53.4 | 54.9 | +| [TriviaQA][triviaqa] | 5-shot | 39.8 | 65.8 | 78.2 | 85.5 | +| [Natural Questions][naturalq] | 5-shot | 9.48 | 20.0 | 31.4 | 36.1 | +| [ARC-c][arc] | 25-shot | 38.4 | 56.2 | 68.9 | 70.6 | +| [ARC-e][arc] | 0-shot | 73.0 | 82.4 | 88.3 | 89.0 | +| [WinoGrande][winogrande] | 5-shot | 58.2 | 64.7 | 74.3 | 78.8 | +| [BIG-Bench Hard][bbh] | few-shot | 28.4 | 50.9 | 72.6 | 77.7 | +| [DROP][drop] | 1-shot | 42.4 | 60.1 | 72.2 | 77.2 | + +[hellaswag]: https://arxiv.org/abs/1905.07830 +[boolq]: https://arxiv.org/abs/1905.10044 +[piqa]: https://arxiv.org/abs/1911.11641 +[socialiqa]: https://arxiv.org/abs/1904.09728 +[triviaqa]: https://arxiv.org/abs/1705.03551 +[naturalq]: https://github.com/google-research-datasets/natural-questions +[arc]: https://arxiv.org/abs/1911.01547 +[winogrande]: https://arxiv.org/abs/1907.10641 +[bbh]: https://paperswithcode.com/dataset/bbh +[drop]: https://arxiv.org/abs/1903.00161 + +#### STEM and code + +| Benchmark | Metric | Gemma 3 PT 4B | Gemma 3 PT 12B | Gemma 3 PT 27B | +| ------------------------------ |----------------|:-------------:|:--------------:|:--------------:| +| [MMLU][mmlu] | 5-shot | 59.6 | 74.5 | 78.6 | +| [MMLU][mmlu] (Pro COT) | 5-shot | 29.2 | 45.3 | 52.2 | +| [AGIEval][agieval] | 3-5-shot | 42.1 | 57.4 | 66.2 | +| [MATH][math] | 4-shot | 24.2 | 43.3 | 50.0 | +| [GSM8K][gsm8k] | 8-shot | 38.4 | 71.0 | 82.6 | +| [GPQA][gpqa] | 5-shot | 15.0 | 25.4 | 24.3 | +| [MBPP][mbpp] | 3-shot | 46.0 | 60.4 | 65.6 | +| [HumanEval][humaneval] | 0-shot | 36.0 | 45.7 | 48.8 | + +[mmlu]: https://arxiv.org/abs/2009.03300 +[agieval]: https://arxiv.org/abs/2304.06364 +[math]: https://arxiv.org/abs/2103.03874 +[gsm8k]: https://arxiv.org/abs/2110.14168 +[gpqa]: https://arxiv.org/abs/2311.12022 +[mbpp]: https://arxiv.org/abs/2108.07732 +[humaneval]: https://arxiv.org/abs/2107.03374 + +#### Multilingual + +| Benchmark | Gemma 3 PT 1B | Gemma 3 PT 4B | Gemma 3 PT 12B | Gemma 3 PT 27B | +| ------------------------------------ |:-------------:|:-------------:|:--------------:|:--------------:| +| [MGSM][mgsm] | 2.04 | 34.7 | 64.3 | 74.3 | +| [Global-MMLU-Lite][global-mmlu-lite] | 24.9 | 57.0 | 69.4 | 75.7 | +| [WMT24++][wmt24pp] (ChrF) | 36.7 | 48.4 | 53.9 | 55.7 | +| [FloRes][flores] | 29.5 | 39.2 | 46.0 | 48.8 | +| [XQuAD][xquad] (all) | 43.9 | 68.0 | 74.5 | 76.8 | +| [ECLeKTic][eclektic] | 4.69 | 11.0 | 17.2 | 24.4 | +| [IndicGenBench][indicgenbench] | 41.4 | 57.2 | 61.7 | 63.4 | + +[mgsm]: https://arxiv.org/abs/2210.03057 +[flores]: https://arxiv.org/abs/2106.03193 +[xquad]: https://arxiv.org/abs/1910.11856v3 +[global-mmlu-lite]: https://huggingface.co/datasets/CohereForAI/Global-MMLU-Lite +[wmt24pp]: https://arxiv.org/abs/2502.12404v1 +[eclektic]: https://arxiv.org/abs/2502.21228 +[indicgenbench]: https://arxiv.org/abs/2404.16816 + +#### Multimodal + +| Benchmark | Gemma 3 PT 4B | Gemma 3 PT 12B | Gemma 3 PT 27B | +| ------------------------------ |:-------------:|:--------------:|:--------------:| +| [COCOcap][coco-cap] | 102 | 111 | 116 | +| [DocVQA][docvqa] (val) | 72.8 | 82.3 | 85.6 | +| [InfoVQA][info-vqa] (val) | 44.1 | 54.8 | 59.4 | +| [MMMU][mmmu] (pt) | 39.2 | 50.3 | 56.1 | +| [TextVQA][textvqa] (val) | 58.9 | 66.5 | 68.6 | +| [RealWorldQA][realworldqa] | 45.5 | 52.2 | 53.9 | +| [ReMI][remi] | 27.3 | 38.5 | 44.8 | +| [AI2D][ai2d] | 63.2 | 75.2 | 79.0 | +| [ChartQA][chartqa] | 63.6 | 74.7 | 76.3 | +| [VQAv2][vqav2] | 63.9 | 71.2 | 72.9 | +| [BLINK][blinkvqa] | 38.0 | 35.9 | 39.6 | +| [OKVQA][okvqa] | 51.0 | 58.7 | 60.2 | +| [TallyQA][tallyqa] | 42.5 | 51.8 | 54.3 | +| [SpatialSense VQA][ss-vqa] | 50.9 | 60.0 | 59.4 | +| [CountBenchQA][countbenchqa] | 26.1 | 17.8 | 68.0 | + +[coco-cap]: https://cocodataset.org/#home +[docvqa]: https://www.docvqa.org/ +[info-vqa]: https://arxiv.org/abs/2104.12756 +[mmmu]: https://arxiv.org/abs/2311.16502 +[textvqa]: https://textvqa.org/ +[realworldqa]: https://paperswithcode.com/dataset/realworldqa +[remi]: https://arxiv.org/html/2406.09175v1 +[ai2d]: https://allenai.org/data/diagrams +[chartqa]: https://arxiv.org/abs/2203.10244 +[vqav2]: https://visualqa.org/index.html +[blinkvqa]: https://arxiv.org/abs/2404.12390 +[okvqa]: https://okvqa.allenai.org/ +[tallyqa]: https://arxiv.org/abs/1810.12440 +[ss-vqa]: https://arxiv.org/abs/1908.02660 +[countbenchqa]: https://github.com/google-research/big_vision/blob/main/big_vision/datasets/countbenchqa/ + +## Ethics and Safety + +Ethics and safety evaluation approach and results. + +### Evaluation Approach + +Our evaluation methods include structured evaluations and internal red-teaming +testing of relevant content policies. Red-teaming was conducted by a number of +different teams, each with different goals and human evaluation metrics. These +models were evaluated against a number of different categories relevant to +ethics and safety, including: + +- **Child Safety**: Evaluation of text-to-text and image to text prompts + covering child safety policies, including child sexual abuse and + exploitation. +- **Content Safety:** Evaluation of text-to-text and image to text prompts + covering safety policies including, harassment, violence and gore, and hate + speech. +- **Representational Harms**: Evaluation of text-to-text and image to text + prompts covering safety policies including bias, stereotyping, and harmful + associations or inaccuracies. + +In addition to development level evaluations, we conduct "assurance +evaluations" which are our 'arms-length' internal evaluations for responsibility +governance decision making. They are conducted separately from the model +development team, to inform decision making about release. High level findings +are fed back to the model team, but prompt sets are held-out to prevent +overfitting and preserve the results' ability to inform decision making. +Assurance evaluation results are reported to our Responsibility & Safety Council +as part of release review. + +### Evaluation Results + +For all areas of safety testing, we saw major improvements in the categories of +child safety, content safety, and representational harms relative to previous +Gemma models. All testing was conducted without safety filters to evaluate the +model capabilities and behaviors. For both text-to-text and image-to-text, and +across all model sizes, the model produced minimal policy violations, and showed +significant improvements over previous Gemma models' performance with respect +to ungrounded inferences. A limitation of our evaluations was they included only +English language prompts. + +## Usage and Limitations + +These models have certain limitations that users should be aware of. + +### Intended Usage + +Open vision-language models (VLMs) models have a wide range of applications +across various industries and domains. The following list of potential uses is +not comprehensive. The purpose of this list is to provide contextual information +about the possible use-cases that the model creators considered as part of model +training and development. + +- Content Creation and Communication + - Text Generation: These models can be used to generate creative text + formats such as poems, scripts, code, marketing copy, and email drafts. + - Chatbots and Conversational AI: Power conversational interfaces + for customer service, virtual assistants, or interactive applications. + - Text Summarization: Generate concise summaries of a text corpus, + research papers, or reports. + - Image Data Extraction: These models can be used to extract, + interpret, and summarize visual data for text communications. +- Research and Education + - Natural Language Processing (NLP) and VLM Research: These + models can serve as a foundation for researchers to experiment with VLM + and NLP techniques, develop algorithms, and contribute to the + advancement of the field. + - Language Learning Tools: Support interactive language learning + experiences, aiding in grammar correction or providing writing practice. + - Knowledge Exploration: Assist researchers in exploring large + bodies of text by generating summaries or answering questions about + specific topics. + +### Limitations + +- Training Data + - The quality and diversity of the training data significantly + influence the model's capabilities. Biases or gaps in the training data + can lead to limitations in the model's responses. + - The scope of the training dataset determines the subject areas + the model can handle effectively. +- Context and Task Complexity + - Models are better at tasks that can be framed with clear + prompts and instructions. Open-ended or highly complex tasks might be + challenging. + - A model's performance can be influenced by the amount of context + provided (longer context generally leads to better outputs, up to a + certain point). +- Language Ambiguity and Nuance + - Natural language is inherently complex. Models might struggle + to grasp subtle nuances, sarcasm, or figurative language. +- Factual Accuracy + - Models generate responses based on information they learned + from their training datasets, but they are not knowledge bases. They + may generate incorrect or outdated factual statements. +- Common Sense + - Models rely on statistical patterns in language. They might + lack the ability to apply common sense reasoning in certain situations. + +### Ethical Considerations and Risks + +The development of vision-language models (VLMs) raises several ethical +concerns. In creating an open model, we have carefully considered the following: + +- Bias and Fairness + - VLMs trained on large-scale, real-world text and image data can + reflect socio-cultural biases embedded in the training material. These + models underwent careful scrutiny, input data pre-processing described + and posterior evaluations reported in this card. +- Misinformation and Misuse + - VLMs can be misused to generate text that is false, misleading, + or harmful. + - Guidelines are provided for responsible use with the model, see the + [Responsible Generative AI Toolkit][rai-toolkit]. +- Transparency and Accountability: + - This model card summarizes details on the models' architecture, + capabilities, limitations, and evaluation processes. + - A responsibly developed open model offers the opportunity to + share innovation by making VLM technology accessible to developers and + researchers across the AI ecosystem. + +Risks identified and mitigations: + +- **Perpetuation of biases**: It's encouraged to perform continuous + monitoring (using evaluation metrics, human review) and the exploration of + de-biasing techniques during model training, fine-tuning, and other use + cases. +- **Generation of harmful content**: Mechanisms and guidelines for content + safety are essential. Developers are encouraged to exercise caution and + implement appropriate content safety safeguards based on their specific + product policies and application use cases. +- **Misuse for malicious purposes**: Technical limitations and developer + and end-user education can help mitigate against malicious applications of + VLMs. Educational resources and reporting mechanisms for users to flag + misuse are provided. Prohibited uses of Gemma models are outlined in the + [Gemma Prohibited Use Policy][prohibited-use]. +- **Privacy violations**: Models were trained on data filtered for removal + of certain personal information and other sensitive data. Developers are + encouraged to adhere to privacy regulations with privacy-preserving + techniques. + +### Benefits + +At the time of release, this family of models provides high-performance open +vision-language model implementations designed from the ground up for +responsible AI development compared to similarly sized models. + +Using the benchmark evaluation metrics described in this document, these models +have shown to provide superior performance to other, comparably-sized open model +alternatives. + +[g3-tech-report]: https://goo.gle/Gemma3Report +[rai-toolkit]: https://ai.google.dev/responsible +[kaggle-gemma]: https://www.kaggle.com/models/google/gemma-3 +[vertex-mg-gemma3]: https://console.cloud.google.com/vertex-ai/publishers/google/model-garden/gemma3 +[terms]: https://ai.google.dev/gemma/terms +[safety-policies]: https://ai.google/static/documents/ai-responsibility-update-published-february-2025.pdf +[prohibited-use]: https://ai.google.dev/gemma/prohibited_use_policy +[tpu]: https://cloud.google.com/tpu/docs/intro-to-tpu +[sustainability]: https://sustainability.google/operating-sustainably/ +[jax]: https://github.com/jax-ml/jax +[ml-pathways]: https://blog.google/technology/ai/introducing-pathways-next-generation-ai-architecture/ +[sustainability]: https://sustainability.google/operating-sustainably/ +[gemini-2-paper]: https://arxiv.org/abs/2312.11805 \ No newline at end of file diff --git a/adapter_config.json b/adapter_config.json new file mode 100644 index 0000000..ce5b0ef --- /dev/null +++ b/adapter_config.json @@ -0,0 +1,44 @@ +{ + "alpha_pattern": {}, + "auto_mapping": { + "base_model_class": "Gemma3ForConditionalGeneration", + "parent_library": "transformers.models.gemma3.modeling_gemma3" + }, + "base_model_name_or_path": "/cheetah/input/model/groupuser/gemma-3-4b-it", + "bias": "none", + "fan_in_fan_out": false, + "inference_mode": true, + "init_lora_weights": true, + "layer_replication": null, + "layers_pattern": null, + "layers_to_transform": null, + "loftq_config": { + "loftq_bits": 4, + "loftq_iter": 1 + }, + "lora_alpha": 32, + "lora_dropout": 0.05, + "megatron_config": null, + "megatron_core": "megatron.core", + "modules_to_save": null, + "peft_type": "LORA", + "r": 16, + "rank_pattern": {}, + "revision": null, + "target_modules": [ + "lm_head", + "down_proj", + "q_proj", + "out_proj", + "v_proj", + "fc2", + "o_proj", + "fc1", + "up_proj", + "gate_proj", + "k_proj" + ], + "task_type": null, + "use_dora": false, + "use_rslora": false +} \ No newline at end of file diff --git a/adapter_model.safetensors b/adapter_model.safetensors new file mode 100644 index 0000000..af11a97 --- /dev/null +++ b/adapter_model.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:418fb0eafc47ae7a26bd7ac09061387feb541f35e96442419188207b712c4470 +size 2856072312 diff --git a/checkpoint-450/README.md b/checkpoint-450/README.md new file mode 100644 index 0000000..81359d2 --- /dev/null +++ b/checkpoint-450/README.md @@ -0,0 +1,202 @@ +--- +base_model: /cheetah/input/model/groupuser/gemma-3-4b-it +library_name: peft +--- + +# Model Card for Model ID + + + + + +## Model Details + +### Model Description + + + + + +- **Developed by:** [More Information Needed] +- **Funded by [optional]:** [More Information Needed] +- **Shared by [optional]:** [More Information Needed] +- **Model type:** [More Information Needed] +- **Language(s) (NLP):** [More Information Needed] +- **License:** [More Information Needed] +- **Finetuned from model [optional]:** [More Information Needed] + +### Model Sources [optional] + + + +- **Repository:** [More Information Needed] +- **Paper [optional]:** [More Information Needed] +- **Demo [optional]:** [More Information Needed] + +## Uses + + + +### Direct Use + + + +[More Information Needed] + +### Downstream Use [optional] + + + +[More Information Needed] + +### Out-of-Scope Use + + + +[More Information Needed] + +## Bias, Risks, and Limitations + + + +[More Information Needed] + +### Recommendations + + + +Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. + +## How to Get Started with the Model + +Use the code below to get started with the model. + +[More Information Needed] + +## Training Details + +### Training Data + + + +[More Information Needed] + +### Training Procedure + + + +#### Preprocessing [optional] + +[More Information Needed] + + +#### Training Hyperparameters + +- **Training regime:** [More Information Needed] + +#### Speeds, Sizes, Times [optional] + + + +[More Information Needed] + +## Evaluation + + + +### Testing Data, Factors & Metrics + +#### Testing Data + + + +[More Information Needed] + +#### Factors + + + +[More Information Needed] + +#### Metrics + + + +[More Information Needed] + +### Results + +[More Information Needed] + +#### Summary + + + +## Model Examination [optional] + + + +[More Information Needed] + +## Environmental Impact + + + +Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). + +- **Hardware Type:** [More Information Needed] +- **Hours used:** [More Information Needed] +- **Cloud Provider:** [More Information Needed] +- **Compute Region:** [More Information Needed] +- **Carbon Emitted:** [More Information Needed] + +## Technical Specifications [optional] + +### Model Architecture and Objective + +[More Information Needed] + +### Compute Infrastructure + +[More Information Needed] + +#### Hardware + +[More Information Needed] + +#### Software + +[More Information Needed] + +## Citation [optional] + + + +**BibTeX:** + +[More Information Needed] + +**APA:** + +[More Information Needed] + +## Glossary [optional] + + + +[More Information Needed] + +## More Information [optional] + +[More Information Needed] + +## Model Card Authors [optional] + +[More Information Needed] + +## Model Card Contact + +[More Information Needed] +### Framework versions + +- PEFT 0.13.2 \ No newline at end of file diff --git a/checkpoint-450/adapter_config.json b/checkpoint-450/adapter_config.json new file mode 100644 index 0000000..ce5b0ef --- /dev/null +++ b/checkpoint-450/adapter_config.json @@ -0,0 +1,44 @@ +{ + "alpha_pattern": {}, + "auto_mapping": { + "base_model_class": "Gemma3ForConditionalGeneration", + "parent_library": "transformers.models.gemma3.modeling_gemma3" + }, + "base_model_name_or_path": "/cheetah/input/model/groupuser/gemma-3-4b-it", + "bias": "none", + "fan_in_fan_out": false, + "inference_mode": true, + "init_lora_weights": true, + "layer_replication": null, + "layers_pattern": null, + "layers_to_transform": null, + "loftq_config": { + "loftq_bits": 4, + "loftq_iter": 1 + }, + "lora_alpha": 32, + "lora_dropout": 0.05, + "megatron_config": null, + "megatron_core": "megatron.core", + "modules_to_save": null, + "peft_type": "LORA", + "r": 16, + "rank_pattern": {}, + "revision": null, + "target_modules": [ + "lm_head", + "down_proj", + "q_proj", + "out_proj", + "v_proj", + "fc2", + "o_proj", + "fc1", + "up_proj", + "gate_proj", + "k_proj" + ], + "task_type": null, + "use_dora": false, + "use_rslora": false +} \ No newline at end of file diff --git a/checkpoint-450/adapter_model.safetensors b/checkpoint-450/adapter_model.safetensors new file mode 100644 index 0000000..af11a97 --- /dev/null +++ b/checkpoint-450/adapter_model.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:418fb0eafc47ae7a26bd7ac09061387feb541f35e96442419188207b712c4470 +size 2856072312 diff --git a/checkpoint-450/optimizer.pt b/checkpoint-450/optimizer.pt new file mode 100644 index 0000000..ff87de2 --- /dev/null +++ b/checkpoint-450/optimizer.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d107a46af05a2f1f01cad46340c0e76e1563913df5fe1185a76e2ce4296d85a3 +size 272713836 diff --git a/checkpoint-450/rng_state.pth b/checkpoint-450/rng_state.pth new file mode 100644 index 0000000..3526ba2 --- /dev/null +++ b/checkpoint-450/rng_state.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1a267e6acf13d2d52433946e7b13535d6bd754fe6ec097fd519015462487480e +size 14244 diff --git a/checkpoint-450/scheduler.pt b/checkpoint-450/scheduler.pt new file mode 100644 index 0000000..6eb767a --- /dev/null +++ b/checkpoint-450/scheduler.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b31cd93e368082a902459d288095335bdb79afa66effae6f02b2fe431b00650f +size 1064 diff --git a/checkpoint-450/special_tokens_map.json b/checkpoint-450/special_tokens_map.json new file mode 100644 index 0000000..b063dec --- /dev/null +++ b/checkpoint-450/special_tokens_map.json @@ -0,0 +1,27 @@ +{ + "boi_token": "", + "bos_token": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false + }, + "eoi_token": "", + "eos_token": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false + }, + "image_token": "", + "pad_token": "", + "unk_token": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false + } +} diff --git a/checkpoint-450/tokenizer.json b/checkpoint-450/tokenizer.json new file mode 100644 index 0000000..3f42d95 --- /dev/null +++ b/checkpoint-450/tokenizer.json @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f00cda2038b6d03c7afadf45c4eb54f1f50d05b40a9e9ecc6aaac13df20157f3 +size 33384834 diff --git a/checkpoint-450/tokenizer_config.json b/checkpoint-450/tokenizer_config.json new file mode 100644 index 0000000..0823568 --- /dev/null +++ b/checkpoint-450/tokenizer_config.json @@ -0,0 +1,51348 @@ +{ + "add_bos_token": true, + "add_eos_token": false, + "added_tokens_decoder": { + "0": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "1": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "2": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "3": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "4": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": false + }, + "5": { + "content": "[multimodal]", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": false + }, + "6": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": false + }, + "7": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": false + }, + "8": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": false + }, + "9": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": false + }, + "10": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": false + }, + "11": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": false + }, + "12": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": false + }, + "13": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": false + }, + "14": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": false + }, + "15": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": false + }, + "16": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": false + }, + "17": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": false + }, + "18": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": false + }, + "19": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": false + }, + "20": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": false + }, + "21": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": false + }, + "22": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": false + }, + "23": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": false + }, + "24": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": false + }, + "25": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": false + }, + "26": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": false + }, + "27": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": false + }, + "28": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": false + }, + "29": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": false + }, + "30": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": false + }, + "31": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": false + }, + "32": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": false + }, + "33": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": false + }, + "34": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": false + }, + "35": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": false + }, + "36": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": false + }, + "37": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": false + }, + "38": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": false + }, + "39": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": false + }, + "40": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": false + }, + "41": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": false + }, + "42": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": false + }, + "43": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": false + }, + "44": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": false + }, + "45": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": false + }, + "46": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": false + }, + "47": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": false + }, + "48": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": false + }, + "49": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": false + }, + "50": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": false + }, + "51": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": false + }, + "52": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": false + }, + "53": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": false + }, + "54": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": false + }, + "55": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": false + }, + "56": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": false + }, + "57": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": false + }, + "58": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": false + }, + "59": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": false + }, + "60": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": false + }, + "61": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": false + }, + "62": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": false + }, + "63": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": false + }, + "64": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": false + }, + "65": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": false + }, + "66": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": false + }, + "67": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": false + }, + "68": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": false + }, + "69": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": false + }, + "70": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": false + }, + "71": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": false + }, + "72": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": false + }, + "73": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": false + }, + "74": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": false + }, + "75": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": false + }, + "76": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": false + }, + "77": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": false + }, + "78": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": false + }, + "79": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": false + }, + "80": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": false + }, + "81": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": false + }, + "82": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": false + }, + "83": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": false + }, + "84": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": false + }, + "85": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": false + }, + "86": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": false + }, + "87": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": false + }, + "88": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": false + }, + "89": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": false + }, + "90": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": false + }, + "91": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": false + }, + "92": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": false + }, + "93": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": false + }, + "94": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": false + }, + "95": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": false + }, + "96": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": false + }, + "97": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": false + }, + "98": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": false + }, + "99": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": false + }, + "100": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": false + }, + "101": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": false + }, + "102": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": false + }, + "103": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": false + }, + "104": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": false + }, + "105": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "106": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "107": { + "content": "\n", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": false + }, + "108": { + "content": "\n\n", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": false + }, + "109": { + "content": "\n\n\n", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": false + }, + "110": { + "content": "\n\n\n\n", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": false + }, + "111": { + "content": "\n\n\n\n\n", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": false + }, + "112": { + "content": "\n\n\n\n\n\n", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": false + }, + "113": { + "content": "\n\n\n\n\n\n\n", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": false + }, + "114": { + "content": "\n\n\n\n\n\n\n\n", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": false + }, + "115": { + "content": "\n\n\n\n\n\n\n\n\n", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": false + }, + "116": { + "content": "\n\n\n\n\n\n\n\n\n\n", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": false + }, + "117": { + "content": "\n\n\n\n\n\n\n\n\n\n\n", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": false + }, + "118": { + "content": "\n\n\n\n\n\n\n\n\n\n\n\n", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": false + }, + "119": { + "content": "\n\n\n\n\n\n\n\n\n\n\n\n\n", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": false + }, + "120": { + "content": "\n\n\n\n\n\n\n\n\n\n\n\n\n\n", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": false + }, + "121": { + "content": "\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": false + }, + "122": { + "content": "\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": false + }, + "123": { + "content": "\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": false + }, + "124": { + "content": "\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": false + }, + "125": { + "content": "\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": false + }, + "126": { + "content": "\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": false + }, + "127": { + "content": "\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": false + }, + "128": { + "content": "\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": false + }, + "129": { + "content": "\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": false + }, + "130": { + "content": "\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": false + }, + "131": { + "content": "\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": false + }, + "132": { + "content": "\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": false + }, + "133": { + "content": "\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": false + }, + "134": { + "content": "\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": false + }, + "135": { + "content": "\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": false + }, + "136": { + "content": "\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": false + }, + "137": { + "content": "\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": false + }, + "138": { + "content": "▁▁", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": false + }, + "139": { + "content": "▁▁▁", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": false + }, + "140": { + "content": "▁▁▁▁", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": false + }, + "141": { + "content": "▁▁▁▁▁", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": false + }, + "142": { + "content": "▁▁▁▁▁▁", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": false + }, + "143": { + "content": "▁▁▁▁▁▁▁", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": false + }, + "144": { + "content": "▁▁▁▁▁▁▁▁", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": false + }, + "145": { + "content": "▁▁▁▁▁▁▁▁▁", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": false + }, + "146": { + "content": "▁▁▁▁▁▁▁▁▁▁", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": false + }, + "147": { + "content": "▁▁▁▁▁▁▁▁▁▁▁", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": false + }, + "148": { + "content": "▁▁▁▁▁▁▁▁▁▁▁▁", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": false + }, + "149": { + "content": "▁▁▁▁▁▁▁▁▁▁▁▁▁", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": false + }, + "150": { + "content": "▁▁▁▁▁▁▁▁▁▁▁▁▁▁", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": false + }, + "151": { + "content": "▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": false + }, + "152": { + "content": "▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": false + }, + "153": { + "content": "▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": false + }, + "154": { + "content": "▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": false + }, + "155": { + "content": "▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": false + }, + "156": { + "content": "▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": false + }, + "157": { + "content": "▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": false + }, + "158": { + "content": "▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": false + }, + "159": { + "content": "▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": false + }, + "160": { + "content": "▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": false + }, + "161": { + "content": "▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": false + }, + "162": { + "content": "▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": false + }, + "163": { + "content": "▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": false + }, + "164": { + "content": "▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": false + }, + "165": { + "content": "▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": false + }, + "166": { + "content": "▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": false + }, + "167": { + "content": "▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": false + }, + "168": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": false + }, + "169": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": false + }, + "171": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": false + }, + "172": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": false + }, + "173": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": false + }, + "174": { + "content": "
", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": false + }, + "170": { + "content": "
", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": false + }, + "175": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": false + }, + "176": { + "content": "
", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": false + }, + "177": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": false + }, + "178": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": false + }, + "179": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": false + }, + "180": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": false + }, + "181": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": false + }, + "182": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": false + }, + "183": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": false + }, + "184": { + "content": "

", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": false + }, + "185": { + "content": "

", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": false + }, + "186": { + "content": "

", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": false + }, + "187": { + "content": "

", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": false + }, + "188": { + "content": "

", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": false + }, + "189": { + "content": "
", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": false + }, + "190": { + "content": "
", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": false + }, + "191": { + "content": "
", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": false + }, + "192": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": false + }, + "193": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": false + }, + "194": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": false + }, + "195": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": false + }, + "196": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": false + }, + "197": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": false + }, + "198": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": false + }, + "199": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": false + }, + "200": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": false + }, + "201": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": false + }, + "202": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": false + }, + "203": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": false + }, + "204": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": false + }, + "205": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": false + }, + "206": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": false + }, + "207": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": false + }, + "208": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": false + }, + "209": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": false + }, + "210": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": false + }, + "211": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": false + }, + "212": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": false + }, + "213": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": false + }, + "214": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": false + }, + "215": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": false + }, + "216": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": false + }, + "217": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": false + }, + "218": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": false + }, + "219": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": false + }, + "220": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": false + }, + "221": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": false + }, + "222": { + "content": "