load_model.py 1.9 KB

123456789101112131415161718192021222324252627282930313233343536
  1. # from pymilvus import model
  2. # import torch
  3. # from transformers import AutoTokenizer, AutoModelForSequenceClassification
  4. # device = "cuda:1" if torch.cuda.is_available() else "cpu"
  5. # # 使用sentence transformer方式加载模型
  6. # embedding_path = r"/work/models/multilingual-e5-large-instruct" # 线上路径
  7. # # embedding_path = r"G:/work/code/models/multilingual-e5-large-instruct/" # 本地路径
  8. # sentence_transformer_ef = model.dense.SentenceTransformerEmbeddingFunction(model_name=embedding_path,device=device)
  9. # # rerank模型
  10. # bce_rerank_model_path = r"/work/models/bce-reranker-base_v1" # 线上路径
  11. # # bce_rerank_model_path = r"G:/work/code/models/bce-reranker-base_v1" # 本地路径
  12. # bce_rerank_tokenizer = AutoTokenizer.from_pretrained(bce_rerank_model_path)
  13. # bce_rerank_base_model = AutoModelForSequenceClassification.from_pretrained(bce_rerank_model_path).to(device)
  14. from pymilvus import model
  15. import torch
  16. from transformers import AutoTokenizer, AutoModelForSequenceClassification
  17. device = "cuda:1" if torch.cuda.is_available() else "cpu"
  18. # 使用sentence transformer方式加载模型
  19. # embedding_path = r"/work/models/multilingual-e5-large-instruct" # 线上路径
  20. embedding_path = "/opt/models/multilingual-e5-large-instruct/" # jk线上路径
  21. # embedding_path = r"G:/work/code/models/multilingual-e5-large-instruct/" # 本地路径
  22. sentence_transformer_ef = model.dense.SentenceTransformerEmbeddingFunction(model_name=embedding_path,device=device)
  23. # rerank模型
  24. # bce_rerank_model_path = r"/work/models/bce-reranker-base_v1" # 线上路径
  25. bce_rerank_model_path = r"/opt/models/bce-reranker-base_v1" # jk线上路径
  26. # bce_rerank_model_path = r"G:/work/code/models/bce-reranker-base_v1" # 本地路径
  27. bce_rerank_tokenizer = AutoTokenizer.from_pretrained(bce_rerank_model_path)
  28. bce_rerank_base_model = AutoModelForSequenceClassification.from_pretrained(bce_rerank_model_path).to(device)