-
Notifications
You must be signed in to change notification settings - Fork 56
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
Merge pull request #82 from tjake/qwen2
Add Qwen2 support and fix bug with small models using I8Q4
- Loading branch information
Showing
7 changed files
with
225 additions
and
9 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
44 changes: 44 additions & 0 deletions
44
jlama-core/src/main/java/com/github/tjake/jlama/model/qwen2/Qwen2Config.java
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,44 @@ | ||
package com.github.tjake.jlama.model.qwen2; | ||
|
||
import com.fasterxml.jackson.annotation.JsonCreator; | ||
import com.fasterxml.jackson.annotation.JsonProperty; | ||
import com.github.tjake.jlama.math.ActivationFunction; | ||
import com.github.tjake.jlama.safetensors.Config; | ||
|
||
import java.util.List; | ||
|
||
public class Qwen2Config extends Config { | ||
|
||
@JsonCreator | ||
public Qwen2Config( | ||
@JsonProperty("max_position_embeddings") int contextLength, | ||
@JsonProperty("hidden_size") int embeddingLength, | ||
@JsonProperty("intermediate_size") int hiddenLength, | ||
@JsonProperty("num_attention_heads") int numberOfHeads, | ||
@JsonProperty("num_key_value_heads") int numberOfKeyValueHeads, | ||
@JsonProperty("num_hidden_layers") int numberOfLayers, | ||
@JsonProperty("rms_norm_eps") float layerNormEps, | ||
@JsonProperty("vocab_size") int vocabularySize, | ||
@JsonProperty("bos_token_id") int bosToken, | ||
@JsonProperty("eos_token_id") int eosToken, | ||
@JsonProperty("hidden_act") ActivationFunction.Type activationFunction, | ||
@JsonProperty("rope_theta") Double ropeTheta) { | ||
super( | ||
contextLength, | ||
embeddingLength, | ||
hiddenLength, | ||
numberOfHeads, | ||
numberOfKeyValueHeads, | ||
numberOfLayers, | ||
layerNormEps, | ||
vocabularySize, | ||
bosToken, | ||
List.of(eosToken), | ||
activationFunction, | ||
ropeTheta, | ||
1.0, | ||
null, | ||
embeddingLength / numberOfHeads | ||
); | ||
} | ||
} |
98 changes: 98 additions & 0 deletions
98
jlama-core/src/main/java/com/github/tjake/jlama/model/qwen2/Qwen2Model.java
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,98 @@ | ||
package com.github.tjake.jlama.model.qwen2; | ||
|
||
import com.github.tjake.jlama.model.*; | ||
import com.github.tjake.jlama.model.llama.LlamaModel; | ||
import com.github.tjake.jlama.safetensors.Config; | ||
import com.github.tjake.jlama.safetensors.DType; | ||
import com.github.tjake.jlama.safetensors.WeightLoader; | ||
import com.github.tjake.jlama.safetensors.tokenizer.Tokenizer; | ||
import org.slf4j.Logger; | ||
import org.slf4j.LoggerFactory; | ||
|
||
import java.util.Optional; | ||
import java.util.stream.IntStream; | ||
|
||
public class Qwen2Model extends LlamaModel { | ||
|
||
private static final Logger logger = LoggerFactory.getLogger(Qwen2Model.class); | ||
|
||
public Qwen2Model( | ||
Config config, | ||
WeightLoader weights, | ||
Tokenizer tokenizer, | ||
DType workingDType, | ||
DType workingQType, | ||
Optional<DType> modelQType | ||
) { | ||
super(config, weights, tokenizer, workingDType, workingQType, modelQType); | ||
} | ||
|
||
public Qwen2Model( | ||
InferenceType inferenceType, | ||
Config config, | ||
WeightLoader weights, | ||
Tokenizer tokenizer, | ||
DType workingDType, | ||
DType workingQType, | ||
Optional<DType> modelQType | ||
) { | ||
super(inferenceType, config, weights, tokenizer, workingDType, workingQType, modelQType); | ||
} | ||
|
||
@Override | ||
protected TransformerBlock[] loadTransformerBlockWeights() { | ||
DType qType = modelQType.orElse(this.modelDType); | ||
if (qType != this.modelDType) { | ||
logger.info("Quantizing model with {} - Please hold...", qType); | ||
} | ||
|
||
TransformerBlock[] transformerBlocks = new TransformerBlock[c.dctx().numberOfLayers]; | ||
|
||
IntStream.range(c.dctx().layerStart, c.dctx().layerEnd).parallel().forEach(i -> { | ||
|
||
int relativeLayer = i - c.dctx().layerStart; // FIXME: add a helper to the context | ||
|
||
String base = "model.layers." + i + "."; | ||
String prefix = base + "self_attn."; | ||
CausalSelfAttention attention = new CausalSelfAttention( | ||
this, | ||
relativeLayer, | ||
Optional.of(weights.load(prefix + "q_proj.bias").quantize(qType)), | ||
Optional.of(weights.load(prefix + "k_proj.bias").quantize(qType)), | ||
Optional.of(weights.load(prefix + "v_proj.bias").quantize(qType)), | ||
weights.load(prefix + "q_proj.weight", c.dctx(), true, false).quantize(qType), | ||
weights.load(prefix + "k_proj.weight", c.dctx(), true, false).quantize(qType), | ||
weights.load(prefix + "v_proj.weight", c.dctx(), true, false).quantize(qType), | ||
Optional.empty(), | ||
weights.load(prefix + "o_proj.weight", c.dctx(), false, true).quantize(qType) | ||
); | ||
|
||
prefix = base + "mlp."; | ||
|
||
MLPBlock mlp = new MLPBlock( | ||
this, | ||
c.activationFunction, | ||
weights.load(prefix + "gate_proj.weight", c.dctx(), true, false).quantize(qType), // w1 | ||
weights.load(prefix + "down_proj.weight", c.dctx(), false, true).quantize(qType), // w2 | ||
weights.load(prefix + "up_proj.weight", c.dctx(), true, false).quantize(qType) | ||
); // w3 | ||
|
||
transformerBlocks[relativeLayer] = new TransformerBlock( | ||
this, | ||
relativeLayer, | ||
new RMSNorm(this, weights.load(base + "input_layernorm.weight").quantize(qType)), | ||
attention, | ||
new RMSNorm(this, weights.load(base + "post_attention_layernorm.weight").quantize(qType)), | ||
mlp | ||
); | ||
}); | ||
|
||
return transformerBlocks; | ||
} | ||
|
||
|
||
@Override | ||
public ModelSupport.ModelType getModelType() { | ||
return ModelSupport.ModelType.QWEN2; | ||
} | ||
} |
53 changes: 53 additions & 0 deletions
53
jlama-core/src/main/java/com/github/tjake/jlama/model/qwen2/Qwen2Tokenizer.java
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,53 @@ | ||
package com.github.tjake.jlama.model.qwen2; | ||
|
||
import com.github.tjake.jlama.safetensors.tokenizer.BPETokenizer; | ||
|
||
import java.nio.file.Path; | ||
import java.util.Optional; | ||
import java.util.stream.Collectors; | ||
|
||
public class Qwen2Tokenizer extends BPETokenizer { | ||
|
||
|
||
public Qwen2Tokenizer(Path modelRoot) { | ||
super(modelRoot); | ||
} | ||
|
||
@Override | ||
protected String preProcess(String sentence) { | ||
if (model.normalizer() != null) sentence = model.normalizer().normalize(sentence); | ||
|
||
if (model.isLegacy() && !model.byteFallback) { | ||
sentence = sentence.codePoints() | ||
.map(c -> alteredBytes.getOrDefault(c, c)) | ||
.mapToObj(Character::toString) | ||
.collect(Collectors.joining()); | ||
} | ||
|
||
return sentence; | ||
} | ||
|
||
@Override | ||
protected long encodeCharacterAsToken(byte c) { | ||
return Byte.toUnsignedLong(c); | ||
} | ||
|
||
@Override | ||
protected Optional<Character> maybeDecodeTokenAsCharacter(long id) { | ||
return Optional.empty(); | ||
} | ||
|
||
@Override | ||
protected String postProcessToken(String decoded) { | ||
if (decoded == null) decoded = model.unkToken; | ||
|
||
if (model.isLegacy() && !model.byteFallback) { | ||
decoded = decoded.codePoints() | ||
.map(c -> alteredBytes.inverse().getOrDefault(c, c)) | ||
.mapToObj(Character::toString) | ||
.collect(Collectors.joining()); | ||
} | ||
|
||
return decoded; | ||
} | ||
} |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters