This commit is contained in:
xxupiano 2021-09-26 21:04:08 +08:00
parent a13e659303
commit 7a95e3863c
6 changed files with 129 additions and 201 deletions

View File

@ -0,0 +1,11 @@
# ??? is a mandatory value.
# you should be able to set it without open_dict
# but if you try to read it before it's set an error will get thrown.
# populated at runtime
cwd: ???
defaults:
- hydra/output: custom
- train
- predict

View File

@ -0,0 +1,11 @@
hydra:
run:
# Output directory for normal runs
dir: logs/${now:%Y-%m-%d_%H-%M-%S}
sweep:
# Output directory for sweep runs
dir: logs/${now:%Y-%m-%d_%H-%M-%S}
# Output sub directory for sweep runs.
subdir: ${hydra.job.num}_${hydra.job.id}

View File

@ -0,0 +1 @@
text: "秦始皇兵马俑位于陕西省西安市1961年被国务院公布为第一批全国重点文物保护单位是世界八大奇迹之一。"

View File

@ -0,0 +1,25 @@
data_dir: "data/"
bert_model: "bert-base-chinese" # ["bert-base-chinese", "bert-base-cased"]
language: "cn" # ["cn", "en"]
task_name: "ner"
output_dir: "checkpoint"
max_seq_length: 128
do_train: True
do_eval: True
eval_on: "dev"
do_lower_case: True
train_batch_size: 32
eval_batch_size: 8
learning_rate: 5e-5
num_train_epochs: 1 # the number of training epochs
warmup_proportion: 0.1
weight_decay: 0.01
adam_epsilon: 1e-8
max_grad_norm: 1.0
no_cuda: False
local_rank: -1
seed: 42
gradient_accumulation_steps: 1
fp16: False
fp16_opt_level: "01"
loss_scale: 0.0

View File

@ -16,6 +16,9 @@ import argparse
import nltk
nltk.data.path.insert(0,'./data/nltk_data')
import hydra
from hydra import utils
class BertNer(BertForTokenClassification):
@ -53,7 +56,8 @@ class Ner:
def tokenize(self, text: str):
""" tokenize input"""
words = word_tokenize(text)
# words = word_tokenize(text)
words = list(text)
tokens = []
valid_positions = []
for i,word in enumerate(words):
@ -113,7 +117,7 @@ class Ner:
logits.pop()
labels = [(self.label_map[label],confidence) for label,confidence in logits]
words = word_tokenize(text)
words = list(text)
assert len(labels) == len(words)
result = []
@ -146,15 +150,10 @@ class Ner:
return tag
if __name__ == "__main__":
model = Ner("out_ner/")
parser = argparse.ArgumentParser()
parser.add_argument("--text",
default="Irene, a master student in Zhejiang University, Hangzhou, is traveling in Warsaw for Chopin Music Festival.",
type=str,
help="The text to be NERed")
text = parser.parse_args().text
@hydra.main(config_path="conf", config_name='config')
def main(cfg):
model = Ner(utils.get_original_cwd()+'/'+"checkpoint/")
text = cfg.text
print("The text to be NERed:")
print(text)
@ -172,3 +171,9 @@ if __name__ == "__main__":
print('Organization')
elif k=='MISC':
print('Miscellaneous')
if __name__ == "__main__":
main()

View File

@ -1,6 +1,5 @@
from __future__ import absolute_import, division, print_function
import argparse
import csv
import json
import logging
@ -27,6 +26,9 @@ logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s - %(messa
level = logging.INFO)
logger = logging.getLogger(__name__)
import hydra
from hydra import utils
class Ner(BertForTokenClassification):
def forward(self, input_ids, token_type_ids=None, attention_mask=None, labels=None,valid_ids=None,attention_mask_label=None):
@ -136,7 +138,7 @@ class DataProcessor(object):
class NerProcessor(DataProcessor):
"""Processor for the CoNLL-2003 data set."""
"""Processor for the dataset."""
def get_train_examples(self, data_dir):
"""See base class."""
@ -233,17 +235,6 @@ def convert_examples_to_features(examples, label_list, max_seq_length, tokenizer
assert len(valid) == max_seq_length
assert len(label_mask) == max_seq_length
if ex_index < 5:
logger.info("*** Example ***")
logger.info("guid: %s" % (example.guid))
logger.info("tokens: %s" % " ".join(
[str(x) for x in tokens]))
logger.info("input_ids: %s" % " ".join([str(x) for x in input_ids]))
logger.info("input_mask: %s" % " ".join([str(x) for x in input_mask]))
logger.info(
"segment_ids: %s" % " ".join([str(x) for x in segment_ids]))
# logger.info("label: %s (id = %d)" % (example.label, label_ids))
features.append(
InputFeatures(input_ids=input_ids,
input_mask=input_mask,
@ -253,150 +244,41 @@ def convert_examples_to_features(examples, label_list, max_seq_length, tokenizer
label_mask=label_mask))
return features
def main():
parser = argparse.ArgumentParser()
## Required parameters
parser.add_argument("--data_dir",
default=None,
type=str,
required=True,
help="The input data dir. Should contain the .tsv files (or other data files) for the task.")
parser.add_argument("--bert_model", default=None, type=str, required=True,
help="Bert pre-trained model selected in the list: bert-base-uncased, "
"bert-large-uncased, bert-base-cased, bert-large-cased, bert-base-multilingual-uncased, "
"bert-base-multilingual-cased, bert-base-chinese.")
parser.add_argument("--task_name",
default=None,
type=str,
required=True,
help="The name of the task to train.")
parser.add_argument("--output_dir",
default=None,
type=str,
required=True,
help="The output directory where the model predictions and checkpoints will be written.")
## Other parameters
parser.add_argument("--cache_dir",
default="",
type=str,
help="Where do you want to store the pre-trained models downloaded from s3")
parser.add_argument("--max_seq_length",
default=128,
type=int,
help="The maximum total input sequence length after WordPiece tokenization. \n"
"Sequences longer than this will be truncated, and sequences shorter \n"
"than this will be padded.")
parser.add_argument("--do_train",
action='store_true',
help="Whether to run training.")
parser.add_argument("--do_eval",
action='store_true',
help="Whether to run eval or not.")
parser.add_argument("--eval_on",
default="dev",
help="Whether to run eval on the dev set or test set.")
parser.add_argument("--do_lower_case",
action='store_true',
help="Set this flag if you are using an uncased model.")
parser.add_argument("--train_batch_size",
default=32,
type=int,
help="Total batch size for training.")
parser.add_argument("--eval_batch_size",
default=8,
type=int,
help="Total batch size for eval.")
parser.add_argument("--learning_rate",
default=5e-5,
type=float,
help="The initial learning rate for Adam.")
parser.add_argument("--num_train_epochs",
default=3.0,
type=float,
help="Total number of training epochs to perform.")
parser.add_argument("--warmup_proportion",
default=0.1,
type=float,
help="Proportion of training to perform linear learning rate warmup for. "
"E.g., 0.1 = 10%% of training.")
parser.add_argument("--weight_decay", default=0.01, type=float,
help="Weight deay if we apply some.")
parser.add_argument("--adam_epsilon", default=1e-8, type=float,
help="Epsilon for Adam optimizer.")
parser.add_argument("--max_grad_norm", default=1.0, type=float,
help="Max gradient norm.")
parser.add_argument("--no_cuda",
action='store_true',
help="Whether not to use CUDA when available")
parser.add_argument("--local_rank",
type=int,
default=-1,
help="local_rank for distributed training on gpus")
parser.add_argument('--seed',
type=int,
default=42,
help="random seed for initialization")
parser.add_argument('--gradient_accumulation_steps',
type=int,
default=1,
help="Number of updates steps to accumulate before performing a backward/update pass.")
parser.add_argument('--fp16',
action='store_true',
help="Whether to use 16-bit float precision instead of 32-bit")
parser.add_argument('--fp16_opt_level', type=str, default='O1',
help="For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
"See details at https://nvidia.github.io/apex/amp.html")
parser.add_argument('--loss_scale',
type=float, default=0,
help="Loss scaling to improve fp16 numeric stability. Only used when fp16 set to True.\n"
"0 (default value): dynamic loss scaling.\n"
"Positive power of 2: static loss scaling value.\n")
parser.add_argument('--server_ip', type=str, default='', help="Can be used for distant debugging.")
parser.add_argument('--server_port', type=str, default='', help="Can be used for distant debugging.")
args = parser.parse_args()
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("Waiting for debugger attach")
ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True)
ptvsd.wait_for_attach()
@hydra.main(config_path="conf", config_name='config')
def main(cfg):
processors = {"ner":NerProcessor}
if args.local_rank == -1 or args.no_cuda:
device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
if cfg.local_rank == -1 or cfg.no_cuda:
device = torch.device("cuda" if torch.cuda.is_available() and not cfg.no_cuda else "cpu")
n_gpu = torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank)
device = torch.device("cuda", args.local_rank)
torch.cuda.set_device(cfg.local_rank)
device = torch.device("cuda", cfg.local_rank)
n_gpu = 1
# Initializes the distributed backend which will take care of sychronizing nodes/GPUs
torch.distributed.init_process_group(backend='nccl')
logger.info("device: {} n_gpu: {}, distributed training: {}, 16-bits training: {}".format(
device, n_gpu, bool(args.local_rank != -1), args.fp16))
device, n_gpu, bool(cfg.local_rank != -1), cfg.fp16))
if args.gradient_accumulation_steps < 1:
if cfg.gradient_accumulation_steps < 1:
raise ValueError("Invalid gradient_accumulation_steps parameter: {}, should be >= 1".format(
args.gradient_accumulation_steps))
cfg.gradient_accumulation_steps))
args.train_batch_size = args.train_batch_size // args.gradient_accumulation_steps
cfg.train_batch_size = cfg.train_batch_size // cfg.gradient_accumulation_steps
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
random.seed(cfg.seed)
np.random.seed(cfg.seed)
torch.manual_seed(cfg.seed)
if not args.do_train and not args.do_eval:
if not cfg.do_train and not cfg.do_eval:
raise ValueError("At least one of `do_train` or `do_eval` must be True.")
if os.path.exists(args.output_dir) and os.listdir(args.output_dir) and args.do_train:
raise ValueError("Output directory ({}) already exists and is not empty.".format(args.output_dir))
if not os.path.exists(args.output_dir):
os.makedirs(args.output_dir)
if os.path.exists(utils.get_original_cwd()+'/'+cfg.output_dir) and os.listdir(utils.get_original_cwd()+'/'+cfg.output_dir) and cfg.do_train:
raise ValueError("Output directory ({}) already exists and is not empty.".format(utils.get_original_cwd()+'/'+cfg.output_dir))
if not os.path.exists(utils.get_original_cwd()+'/'+cfg.output_dir):
os.makedirs(utils.get_original_cwd()+'/'+cfg.output_dir)
task_name = args.task_name.lower()
task_name = cfg.task_name.lower()
if task_name not in processors:
raise ValueError("Task not found: %s" % (task_name))
@ -405,27 +287,27 @@ def main():
label_list = processor.get_labels()
num_labels = len(label_list) + 1
tokenizer = BertTokenizer.from_pretrained(args.bert_model, do_lower_case=args.do_lower_case)
tokenizer = BertTokenizer.from_pretrained(cfg.bert_model, do_lower_case=cfg.do_lower_case)
train_examples = None
num_train_optimization_steps = 0
if args.do_train:
train_examples = processor.get_train_examples(args.data_dir)
if cfg.do_train:
train_examples = processor.get_train_examples(utils.get_original_cwd()+'/'+cfg.data_dir)
num_train_optimization_steps = int(
len(train_examples) / args.train_batch_size / args.gradient_accumulation_steps) * args.num_train_epochs
if args.local_rank != -1:
len(train_examples) / cfg.train_batch_size / cfg.gradient_accumulation_steps) * cfg.num_train_epochs
if cfg.local_rank != -1:
num_train_optimization_steps = num_train_optimization_steps // torch.distributed.get_world_size()
if args.local_rank not in [-1, 0]:
if cfg.local_rank not in [-1, 0]:
torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab
# Prepare model
config = BertConfig.from_pretrained(args.bert_model, num_labels=num_labels, finetuning_task=args.task_name)
model = Ner.from_pretrained(args.bert_model,
config = BertConfig.from_pretrained(cfg.bert_model, num_labels=num_labels, finetuning_task=cfg.task_name)
model = Ner.from_pretrained(cfg.bert_model,
from_tf = False,
config = config)
if args.local_rank == 0:
if cfg.local_rank == 0:
torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab
model.to(device)
@ -433,39 +315,35 @@ def main():
param_optimizer = list(model.named_parameters())
no_decay = ['bias','LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': args.weight_decay},
{'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': cfg.weight_decay},
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
]
warmup_steps = int(args.warmup_proportion * num_train_optimization_steps)
optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)
warmup_steps = int(cfg.warmup_proportion * num_train_optimization_steps)
optimizer = AdamW(optimizer_grouped_parameters, lr=cfg.learning_rate, eps=cfg.adam_epsilon)
scheduler = WarmupLinearSchedule(optimizer, warmup_steps=warmup_steps, t_total=num_train_optimization_steps)
if args.fp16:
if cfg.fp16:
try:
from apex import amp
except ImportError:
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.")
model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level)
model, optimizer = amp.initialize(model, optimizer, opt_level=cfg.fp16_opt_level)
# multi-gpu training (should be after apex fp16 initialization)
if n_gpu > 1:
model = torch.nn.DataParallel(model)
if args.local_rank != -1:
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.local_rank],
output_device=args.local_rank,
if cfg.local_rank != -1:
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[cfg.local_rank],
output_device=cfg.local_rank,
find_unused_parameters=True)
global_step = 0
nb_tr_steps = 0
tr_loss = 0
label_map = {i : label for i, label in enumerate(label_list,1)}
if args.do_train:
if cfg.do_train:
train_features = convert_examples_to_features(
train_examples, label_list, args.max_seq_length, tokenizer)
logger.info("***** Running training *****")
logger.info(" Num examples = %d", len(train_examples))
logger.info(" Batch size = %d", args.train_batch_size)
logger.info(" Num steps = %d", num_train_optimization_steps)
train_examples, label_list, cfg.max_seq_length, tokenizer)
all_input_ids = torch.tensor([f.input_ids for f in train_features], dtype=torch.long)
all_input_mask = torch.tensor([f.input_mask for f in train_features], dtype=torch.long)
all_segment_ids = torch.tensor([f.segment_ids for f in train_features], dtype=torch.long)
@ -473,14 +351,14 @@ def main():
all_valid_ids = torch.tensor([f.valid_ids for f in train_features], dtype=torch.long)
all_lmask_ids = torch.tensor([f.label_mask for f in train_features], dtype=torch.long)
train_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label_ids,all_valid_ids,all_lmask_ids)
if args.local_rank == -1:
if cfg.local_rank == -1:
train_sampler = RandomSampler(train_data)
else:
train_sampler = DistributedSampler(train_data)
train_dataloader = DataLoader(train_data, sampler=train_sampler, batch_size=args.train_batch_size)
train_dataloader = DataLoader(train_data, sampler=train_sampler, batch_size=cfg.train_batch_size)
model.train()
for _ in trange(int(args.num_train_epochs), desc="Epoch"):
for _ in trange(int(cfg.num_train_epochs), desc="Epoch"):
tr_loss = 0
nb_tr_examples, nb_tr_steps = 0, 0
for step, batch in enumerate(tqdm(train_dataloader, desc="Iteration")):
@ -489,21 +367,21 @@ def main():
loss = model(input_ids, segment_ids, input_mask, label_ids,valid_ids,l_mask)
if n_gpu > 1:
loss = loss.mean() # mean() to average on multi-gpu.
if args.gradient_accumulation_steps > 1:
loss = loss / args.gradient_accumulation_steps
if cfg.gradient_accumulation_steps > 1:
loss = loss / cfg.gradient_accumulation_steps
if args.fp16:
if cfg.fp16:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm)
torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), cfg.max_grad_norm)
else:
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
torch.nn.utils.clip_grad_norm_(model.parameters(), cfg.max_grad_norm)
tr_loss += loss.item()
nb_tr_examples += input_ids.size(0)
nb_tr_steps += 1
if (step + 1) % args.gradient_accumulation_steps == 0:
if (step + 1) % cfg.gradient_accumulation_steps == 0:
optimizer.step()
scheduler.step() # Update learning rate schedule
model.zero_grad()
@ -511,30 +389,27 @@ def main():
# Save a trained model and the associated configuration
model_to_save = model.module if hasattr(model, 'module') else model # Only save the model it-self
model_to_save.save_pretrained(args.output_dir)
tokenizer.save_pretrained(args.output_dir)
model_to_save.save_pretrained(utils.get_original_cwd()+'/'+cfg.output_dir)
tokenizer.save_pretrained(utils.get_original_cwd()+'/'+cfg.output_dir)
label_map = {i : label for i, label in enumerate(label_list,1)}
model_config = {"bert_model":args.bert_model,"do_lower":args.do_lower_case,"max_seq_length":args.max_seq_length,"num_labels":len(label_list)+1,"label_map":label_map}
json.dump(model_config,open(os.path.join(args.output_dir,"model_config.json"),"w"))
model_config = {"bert_model":cfg.bert_model,"do_lower":cfg.do_lower_case,"max_seq_length":cfg.max_seq_length,"num_labels":len(label_list)+1,"label_map":label_map}
json.dump(model_config,open(os.path.join(utils.get_original_cwd()+'/'+cfg.output_dir,"model_config.json"),"w"))
# Load a trained model and config that you have fine-tuned
else:
# Load a trained model and vocabulary that you have fine-tuned
model = Ner.from_pretrained(args.output_dir)
tokenizer = BertTokenizer.from_pretrained(args.output_dir, do_lower_case=args.do_lower_case)
model = Ner.from_pretrained(utils.get_original_cwd()+'/'+cfg.output_dir)
tokenizer = BertTokenizer.from_pretrained(utils.get_original_cwd()+'/'+cfg.output_dir, do_lower_case=cfg.do_lower_case)
model.to(device)
if args.do_eval and (args.local_rank == -1 or torch.distributed.get_rank() == 0):
if args.eval_on == "dev":
eval_examples = processor.get_dev_examples(args.data_dir)
elif args.eval_on == "test":
eval_examples = processor.get_test_examples(args.data_dir)
if cfg.do_eval and (cfg.local_rank == -1 or torch.distributed.get_rank() == 0):
if cfg.eval_on == "dev":
eval_examples = processor.get_dev_examples(utils.get_original_cwd()+'/'+cfg.data_dir)
elif cfg.eval_on == "test":
eval_examples = processor.get_test_examples(utils.get_original_cwd()+'/'+cfg.data_dir)
else:
raise ValueError("eval on dev or test set only")
eval_features = convert_examples_to_features(eval_examples, label_list, args.max_seq_length, tokenizer)
logger.info("***** Running evaluation *****")
logger.info(" Num examples = %d", len(eval_examples))
logger.info(" Batch size = %d", args.eval_batch_size)
eval_features = convert_examples_to_features(eval_examples, label_list, cfg.max_seq_length, tokenizer)
all_input_ids = torch.tensor([f.input_ids for f in eval_features], dtype=torch.long)
all_input_mask = torch.tensor([f.input_mask for f in eval_features], dtype=torch.long)
all_segment_ids = torch.tensor([f.segment_ids for f in eval_features], dtype=torch.long)
@ -544,7 +419,7 @@ def main():
eval_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label_ids,all_valid_ids,all_lmask_ids)
# Run prediction for full data
eval_sampler = SequentialSampler(eval_data)
eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=args.eval_batch_size)
eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=cfg.eval_batch_size)
model.eval()
eval_loss, eval_accuracy = 0, 0
nb_eval_steps, nb_eval_examples = 0, 0
@ -583,7 +458,7 @@ def main():
report = classification_report(y_true, y_pred,digits=4)
logger.info("\n%s", report)
output_eval_file = os.path.join(args.output_dir, "eval_results.txt")
output_eval_file = os.path.join(utils.get_original_cwd()+'/'+cfg.output_dir, "eval_results.txt")
with open(output_eval_file, "w") as writer:
logger.info("***** Eval results *****")
logger.info("\n%s", report)