
    +sg                    <    d Z ddlmZ ddlZddlmZ  G d d      Zy)z
This file contains the templating for model cards prior to the v3.0 release. It still exists to be used alongside
SentenceTransformer.old_fit for backwards compatibility, but will be removed in a future release.
    )annotationsN   )fullnamec                  R    e Zd Zg dZdddddddZdZd	Zd
Zed        Z	ed        Z
y)ModelCardTemplate)zsentence-transformerszfeature-extractionsentence-similarityr   z"<!--- Describe your model here --> z/<!--- Describe how your model was evaluated -->z9<!--- Describe where people can find more information -->)z{PIPELINE_TAG}z{MODEL_DESCRIPTION}z{TRAINING_SECTION}z{USAGE_TRANSFORMERS_SECTION}z{EVALUATION}z{CITING}as  
---
library_name: sentence-transformers
pipeline_tag: {PIPELINE_TAG}
tags:
{TAGS}
{DATASETS}
---

# {MODEL_NAME}

This is a [sentence-transformers](https://www.SBERT.net) model: It maps sentences & paragraphs to a {NUM_DIMENSIONS} dimensional dense vector space and can be used for tasks like clustering or semantic search.

{MODEL_DESCRIPTION}

## Usage (Sentence-Transformers)

Using this model becomes easy when you have [sentence-transformers](https://www.SBERT.net) installed:

```
pip install -U sentence-transformers
```

Then you can use the model like this:

```python
from sentence_transformers import SentenceTransformer
sentences = ["This is an example sentence", "Each sentence is converted"]

model = SentenceTransformer('{MODEL_NAME}')
embeddings = model.encode(sentences)
print(embeddings)
```

{USAGE_TRANSFORMERS_SECTION}

## Evaluation Results

{EVALUATION}

For an automated evaluation of this model, see the *Sentence Embeddings Benchmark*: [https://seb.sbert.net](https://seb.sbert.net?model_name={MODEL_NAME})

{TRAINING_SECTION}

## Full Model Architecture
```
{FULL_MODEL_STR}
```

## Citing & Authors

{CITING}

z
## Training
The model was trained with the parameters:

{LOSS_FUNCTIONS}

Parameters of the fit()-Method:
```
{FIT_PARAMETERS}
```
a  

## Usage (HuggingFace Transformers)
Without [sentence-transformers](https://www.SBERT.net), you can use the model like this: First, you pass your input through the transformer model, then you have to apply the right pooling-operation on-top of the contextualized word embeddings.

```python
from transformers import AutoTokenizer, AutoModel
import torch

{POOLING_FUNCTION}

# Sentences we want sentence embeddings for
sentences = ['This is an example sentence', 'Each sentence is converted']

# Load model from HuggingFace Hub
tokenizer = AutoTokenizer.from_pretrained('{MODEL_NAME}')
model = AutoModel.from_pretrained('{MODEL_NAME}')

# Tokenize sentences
encoded_input = tokenizer(sentences, padding=True, truncation=True, return_tensors='pt')

# Compute token embeddings
with torch.no_grad():
    model_output = model(**encoded_input)

# Perform pooling. In this case, {POOLING_MODE} pooling.
sentence_embeddings = {POOLING_FUNCTION_NAME}(model_output, encoded_input['attention_mask'])

print("Sentence embeddings:")
print(sentence_embeddings)
```

c                (    | dk(  ry| dk(  ry| dk(  ryy )Nmax)max_poolinga  
# Max Pooling - Take the max value over time for every dimension.
def max_pooling(model_output, attention_mask):
    token_embeddings = model_output[0] #First element of model_output contains all token embeddings
    input_mask_expanded = attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float()
    token_embeddings[input_mask_expanded == 0] = -1e9  # Set padding tokens to large negative value
    return torch.max(token_embeddings, 1)[0]
mean)mean_poolinga  
#Mean Pooling - Take attention mask into account for correct averaging
def mean_pooling(model_output, attention_mask):
    token_embeddings = model_output[0] #First element of model_output contains all token embeddings
    input_mask_expanded = attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float()
    return torch.sum(token_embeddings * input_mask_expanded, 1) / torch.clamp(input_mask_expanded.sum(1), min=1e-9)
cls)cls_poolingzP
def cls_pooling(model_output, attention_mask):
    return model_output[0][:,0]
 )pooling_modes    ]/var/www/html/venv/lib/python3.12/site-packages/sentence_transformers/model_card_templates.pymodel_card_get_pooling_functionz1ModelCardTemplate.model_card_get_pooling_function|   s2    5 
 V#	 U" #    c                4   	 t        | d      r| j                         }ngi }t        | d      r| j                  nd|d<   t        | d      rt        | j                        |d<   t        | d      rt        | j
                        |d<   dt        |        dt        |        d| d	}d
j                  t        |      t        |d      rd|j                          dnd      }||gS # t        $ r+}t        j                  dt        |              Y d }~yd }~ww xY w)Nget_config_dict
batch_sizeunknownsamplerbatch_samplerz**DataLoader**:

`z` of length z with parameters:
```
z
```z**Loss**:

`{}` {}zwith parameters:
  ```
  z
  ```r	   z2Exception when creating get_train_objective_info: )hasattrr   r   r   r   r   lenformat	ExceptionloggingWARNstr)
dataloaderlossloader_paramsdataloader_strloss_stres         r   get_train_objective_infoz*ModelCardTemplate.get_train_objective_info   s4   	z#45 * : : < "GNz[gGhj.C.Cnwl+:y1/7
8J8J/KM),:75=j>V>V5WM/2!5hz6J5K<X[\fXgWh i N
 .44
 4!23	 	
 H #H-- 	LLMcRSfXVW	s   C C# #	D,!DDN)__name__
__module____qualname____TAGS____DEFAULT_VARS____MODEL_CARD____TRAINING_SECTION____USAGE_TRANSFORMERS__staticmethodr   r)   r   r   r   r   r      sc    UH/C (*IO5Nn
B    D  r   r   )__doc__
__future__r   r    utilr   r   r   r   r   <module>r6      s!   
 #  r rr   