Upload README.md with huggingface_hub
This commit is contained in:
parent
b4019bcd5c
commit
3f6000f2fe
@ -77,7 +77,8 @@ print(scores) # [0.00027803096387751553, 0.9948403768236574]
|
|||||||
|
|
||||||
```python
|
```python
|
||||||
from FlagEmbedding import FlagLLMReranker
|
from FlagEmbedding import FlagLLMReranker
|
||||||
reranker = FlagLLMReranker('BAAI/bge-reranker-v2-gemma', use_bf16=True) # Setting use_bf16 to True speeds up computation with a slight performance degradation
|
reranker = FlagLLMReranker('BAAI/bge-reranker-v2-gemma', use_fp16=True) # Setting use_fp16 to True speeds up computation with a slight performance degradation
|
||||||
|
# reranker = FlagLLMReranker('BAAI/bge-reranker-v2-gemma', use_bf16=True) # You can also set use_bf16=True to speed up computation with a slight performance degradation
|
||||||
|
|
||||||
score = reranker.compute_score(['query', 'passage'])
|
score = reranker.compute_score(['query', 'passage'])
|
||||||
print(score)
|
print(score)
|
||||||
@ -90,7 +91,8 @@ print(scores)
|
|||||||
|
|
||||||
```python
|
```python
|
||||||
from FlagEmbedding import LayerWiseFlagLLMReranker
|
from FlagEmbedding import LayerWiseFlagLLMReranker
|
||||||
reranker = LayerWiseFlagLLMReranker('BAAI/bge-reranker-v2-minicpm-layerwise', use_bf16=True) # Setting use_bf16 to True speeds up computation with a slight performance degradation
|
reranker = LayerWiseFlagLLMReranker('BAAI/bge-reranker-v2-minicpm-layerwise', use_fp16=True) # Setting use_fp16 to True speeds up computation with a slight performance degradation
|
||||||
|
# reranker = LayerWiseFlagLLMReranker('BAAI/bge-reranker-v2-minicpm-layerwise', use_bf16=True) # You can also set use_bf16=True to speed up computation with a slight performance degradation
|
||||||
|
|
||||||
score = reranker.compute_score(['query', 'passage'], cutoff_layers=[28]) # Adjusting 'cutoff_layers' to pick which layers are used for computing the score.
|
score = reranker.compute_score(['query', 'passage'], cutoff_layers=[28]) # Adjusting 'cutoff_layers' to pick which layers are used for computing the score.
|
||||||
print(score)
|
print(score)
|
||||||
@ -230,7 +232,7 @@ def get_inputs(pairs, tokenizer, prompt=None, max_length=1024):
|
|||||||
return_tensors='pt',
|
return_tensors='pt',
|
||||||
)
|
)
|
||||||
|
|
||||||
tokenizer = AutoTokenizer.from_pretrained('BAAI/bge-reranker-v2-minicpm-layerwise', trust_remote_code=True, torch_dtype=torch.bfloat16)
|
tokenizer = AutoTokenizer.from_pretrained('BAAI/bge-reranker-v2-minicpm-layerwise', trust_remote_code=True)
|
||||||
model = AutoModelForCausalLM.from_pretrained('BAAI/bge-reranker-v2-minicpm-layerwise', trust_remote_code=True, torch_dtype=torch.bfloat16)
|
model = AutoModelForCausalLM.from_pretrained('BAAI/bge-reranker-v2-minicpm-layerwise', trust_remote_code=True, torch_dtype=torch.bfloat16)
|
||||||
model = model.to('cuda')
|
model = model.to('cuda')
|
||||||
model.eval()
|
model.eval()
|
||||||
|
|||||||
Loading…
Reference in New Issue
Block a user